From 889e7c668e2e2abc5a9c32e1c62ec99a13cb5460 Mon Sep 17 00:00:00 2001 From: teor Date: Mon, 22 May 2023 22:05:40 +1000 Subject: [PATCH 001/265] fix(ci): Reduce number of sent transactions in tests (#6736) * Reduce sent transactions in tests * rustfmt --- zebrad/tests/common/lightwalletd/send_transaction_test.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/zebrad/tests/common/lightwalletd/send_transaction_test.rs b/zebrad/tests/common/lightwalletd/send_transaction_test.rs index 1db9d1211d4..c58c7b8386e 100644 --- a/zebrad/tests/common/lightwalletd/send_transaction_test.rs +++ b/zebrad/tests/common/lightwalletd/send_transaction_test.rs @@ -45,7 +45,7 @@ use crate::common::{ /// TODO: replace with a const when `min()` stabilises as a const function: /// https://github.com/rust-lang/rust/issues/92391 fn max_sent_transactions() -> usize { - min(CHANNEL_AND_QUEUE_CAPACITY, MAX_INBOUND_CONCURRENCY) - 1 + min(CHANNEL_AND_QUEUE_CAPACITY, MAX_INBOUND_CONCURRENCY) / 2 } /// Number of blocks past the finalized to load transactions from. From f6641eaaeec33d145c1781a2b47173b09422834b Mon Sep 17 00:00:00 2001 From: teor Date: Tue, 23 May 2023 05:21:53 +1000 Subject: [PATCH 002/265] cleanup(gossip): Use a separate named constant for the gossip interval (#6704) * Use a named consttant for the gossip interval * Update tests --------- Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> --- zebrad/src/components/inbound/tests/fake_peer_set.rs | 10 +++++----- zebrad/src/components/mempool/gossip.rs | 7 +++++-- zebrad/src/components/sync.rs | 10 ++++++++++ zebrad/src/components/sync/gossip.rs | 9 +++++---- 4 files changed, 25 insertions(+), 11 deletions(-) diff --git a/zebrad/src/components/inbound/tests/fake_peer_set.rs b/zebrad/src/components/inbound/tests/fake_peer_set.rs index 55816fd4eb9..be7154198bc 100644 --- a/zebrad/src/components/inbound/tests/fake_peer_set.rs +++ b/zebrad/src/components/inbound/tests/fake_peer_set.rs @@ -35,7 +35,7 @@ use crate::{ gossip_mempool_transaction_id, unmined_transactions_in_blocks, Config as MempoolConfig, Mempool, MempoolError, SameEffectsChainRejectionError, UnboxMempoolError, }, - sync::{self, BlockGossipError, SyncStatus, TIPS_RESPONSE_TIMEOUT}, + sync::{self, BlockGossipError, SyncStatus, PEER_GOSSIP_DELAY}, }, BoxError, }; @@ -421,7 +421,7 @@ async fn mempool_transaction_expiration() -> Result<(), crate::BoxError> { hs.insert(tx1_id); // Transaction and Block IDs are gossipped, in any order, after waiting for the gossip delay - tokio::time::sleep(TIPS_RESPONSE_TIMEOUT).await; + tokio::time::sleep(PEER_GOSSIP_DELAY).await; let possible_requests = &mut [ Request::AdvertiseTransactionIds(hs), Request::AdvertiseBlock(block_two.hash()), @@ -490,7 +490,7 @@ async fn mempool_transaction_expiration() -> Result<(), crate::BoxError> { .unwrap(); // Test the block is gossiped, after waiting for the multi-gossip delay - tokio::time::sleep(TIPS_RESPONSE_TIMEOUT).await; + tokio::time::sleep(PEER_GOSSIP_DELAY).await; peer_set .expect_request(Request::AdvertiseBlock(block_three.hash())) .await @@ -567,7 +567,7 @@ async fn mempool_transaction_expiration() -> Result<(), crate::BoxError> { ); // Test transaction 2 is gossiped, after waiting for the multi-gossip delay - tokio::time::sleep(TIPS_RESPONSE_TIMEOUT).await; + tokio::time::sleep(PEER_GOSSIP_DELAY).await; let mut hs = HashSet::new(); hs.insert(tx2_id); @@ -598,7 +598,7 @@ async fn mempool_transaction_expiration() -> Result<(), crate::BoxError> { .unwrap(); // Test the block is gossiped, after waiting for the multi-gossip delay - tokio::time::sleep(TIPS_RESPONSE_TIMEOUT).await; + tokio::time::sleep(PEER_GOSSIP_DELAY).await; peer_set .expect_request(Request::AdvertiseBlock(block.hash())) .await diff --git a/zebrad/src/components/mempool/gossip.rs b/zebrad/src/components/mempool/gossip.rs index eefa2d53ae4..6d3b2b638bf 100644 --- a/zebrad/src/components/mempool/gossip.rs +++ b/zebrad/src/components/mempool/gossip.rs @@ -16,7 +16,10 @@ use zebra_network::MAX_TX_INV_IN_SENT_MESSAGE; use zebra_network as zn; -use crate::{components::sync::TIPS_RESPONSE_TIMEOUT, BoxError}; +use crate::{ + components::sync::{PEER_GOSSIP_DELAY, TIPS_RESPONSE_TIMEOUT}, + BoxError, +}; /// The maximum number of channel messages we will combine into a single peer broadcast. pub const MAX_CHANGES_BEFORE_SEND: usize = 10; @@ -96,6 +99,6 @@ where // // in practice, transactions arrive every 1-20 seconds, // so waiting 6 seconds can delay transaction propagation, in order to reduce peer load - tokio::time::sleep(TIPS_RESPONSE_TIMEOUT).await; + tokio::time::sleep(PEER_GOSSIP_DELAY).await; } } diff --git a/zebrad/src/components/sync.rs b/zebrad/src/components/sync.rs index 8679f66d6b4..2e984b3af25 100644 --- a/zebrad/src/components/sync.rs +++ b/zebrad/src/components/sync.rs @@ -107,6 +107,16 @@ pub const MAX_TIPS_RESPONSE_HASH_COUNT: usize = 500; /// failure loop. pub const TIPS_RESPONSE_TIMEOUT: Duration = Duration::from_secs(6); +/// Controls how long we wait between gossiping successive blocks or transactions. +/// +/// ## Correctness +/// +/// If this timeout is set too high, blocks and transactions won't propagate through +/// the network efficiently. +/// +/// If this timeout is set too low, the peer set and remote peers can get overloaded. +pub const PEER_GOSSIP_DELAY: Duration = Duration::from_secs(7); + /// Controls how long we wait for a block download request to complete. /// /// This timeout makes sure that the syncer doesn't hang when: diff --git a/zebrad/src/components/sync/gossip.rs b/zebrad/src/components/sync/gossip.rs index a6fcb3b49d5..9cb02c6529f 100644 --- a/zebrad/src/components/sync/gossip.rs +++ b/zebrad/src/components/sync/gossip.rs @@ -9,9 +9,10 @@ use tower::{timeout::Timeout, Service, ServiceExt}; use zebra_network as zn; use zebra_state::ChainTipChange; -use crate::BoxError; - -use super::{SyncStatus, TIPS_RESPONSE_TIMEOUT}; +use crate::{ + components::sync::{SyncStatus, PEER_GOSSIP_DELAY, TIPS_RESPONSE_TIMEOUT}, + BoxError, +}; use BlockGossipError::*; @@ -90,6 +91,6 @@ where // // in practice, we expect blocks to arrive approximately every 75 seconds, // so waiting 6 seconds won't make much difference - tokio::time::sleep(TIPS_RESPONSE_TIMEOUT).await; + tokio::time::sleep(PEER_GOSSIP_DELAY).await; } } From 3f13072c4634d5a41aa762736a6a6f785dbecf01 Mon Sep 17 00:00:00 2001 From: teor Date: Tue, 23 May 2023 05:22:13 +1000 Subject: [PATCH 003/265] cleanup(rust): Simplify code using closure capture in Rust 2021 edition (#6737) * Simplify code using closure capture in Rust 2021 edition * clippy: manual_next_back and unit_arg * cargo fmt --all --- zebra-consensus/src/transaction/tests.rs | 6 ++---- zebra-network/src/peer_set/set.rs | 7 ++----- zebra-rpc/src/server/tests/vectors.rs | 3 +++ zebra-state/src/service/non_finalized_state.rs | 2 +- 4 files changed, 8 insertions(+), 10 deletions(-) diff --git a/zebra-consensus/src/transaction/tests.rs b/zebra-consensus/src/transaction/tests.rs index 7e6f8fe331e..ea5bdc2c494 100644 --- a/zebra-consensus/src/transaction/tests.rs +++ b/zebra-consensus/src/transaction/tests.rs @@ -866,8 +866,7 @@ async fn v5_transaction_is_rejected_before_nu5_activation() { let verifier = Verifier::new(network, state_service); let transaction = fake_v5_transactions_for_network(network, blocks) - .rev() - .next() + .next_back() .expect("At least one fake V5 transaction in the test vectors"); let result = verifier @@ -918,8 +917,7 @@ fn v5_transaction_is_accepted_after_nu5_activation_for_network(network: Network) let verifier = Verifier::new(network, state_service); let mut transaction = fake_v5_transactions_for_network(network, blocks) - .rev() - .next() + .next_back() .expect("At least one fake V5 transaction in the test vectors"); if transaction .expiry_height() diff --git a/zebra-network/src/peer_set/set.rs b/zebra-network/src/peer_set/set.rs index 88d53b5461a..c668c3a0d83 100644 --- a/zebra-network/src/peer_set/set.rs +++ b/zebra-network/src/peer_set/set.rs @@ -504,15 +504,12 @@ where /// Checks if the minimum peer version has changed, and disconnects from outdated peers. fn disconnect_from_outdated_peers(&mut self) { if let Some(minimum_version) = self.minimum_peer_version.changed() { - // TODO: Remove when the code base migrates to Rust 2021 edition (#2709). - let preselected_p2c_peer = &mut self.preselected_p2c_peer; - self.ready_services.retain(|address, peer| { if peer.remote_version() >= minimum_version { true } else { - if *preselected_p2c_peer == Some(*address) { - *preselected_p2c_peer = None; + if self.preselected_p2c_peer == Some(*address) { + self.preselected_p2c_peer = None; } false diff --git a/zebra-rpc/src/server/tests/vectors.rs b/zebra-rpc/src/server/tests/vectors.rs index ad7bb8b050a..89fa8333ceb 100644 --- a/zebra-rpc/src/server/tests/vectors.rs +++ b/zebra-rpc/src/server/tests/vectors.rs @@ -1,5 +1,8 @@ //! Fixed test vectors for the RPC server. +// These tests call functions which can take unit arguments if some features aren't enabled. +#![allow(clippy::unit_arg)] + use std::{ net::{Ipv4Addr, SocketAddrV4}, time::Duration, diff --git a/zebra-state/src/service/non_finalized_state.rs b/zebra-state/src/service/non_finalized_state.rs index 9beed6b1f0e..3b036ae57b9 100644 --- a/zebra-state/src/service/non_finalized_state.rs +++ b/zebra-state/src/service/non_finalized_state.rs @@ -549,7 +549,7 @@ impl NonFinalizedState { /// Return the non-finalized portion of the current best chain. pub fn best_chain(&self) -> Option<&Arc> { - self.chain_set.iter().rev().next() + self.chain_iter().next() } /// Return the number of chains. From fc556eeb5a32b18dd4989ab93d563902f0fde9ab Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 22 May 2023 21:40:04 +0000 Subject: [PATCH 004/265] build(deps): bump sentry from 0.31.0 to 0.31.1 (#6734) Bumps [sentry](https://github.com/getsentry/sentry-rust) from 0.31.0 to 0.31.1. - [Release notes](https://github.com/getsentry/sentry-rust/releases) - [Changelog](https://github.com/getsentry/sentry-rust/blob/master/CHANGELOG.md) - [Commits](https://github.com/getsentry/sentry-rust/compare/0.31.0...0.31.1) --- updated-dependencies: - dependency-name: sentry dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 24 ++++++++++++------------ zebrad/Cargo.toml | 2 +- 2 files changed, 13 insertions(+), 13 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index acf7fedb717..b4e6941aa61 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3982,9 +3982,9 @@ checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" [[package]] name = "sentry" -version = "0.31.0" +version = "0.31.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c3d7f8bf7373e75222452fcdd9347d857452a92d0eec738f941bc4656c5b5df" +checksum = "37dd6c0cdca6b1d1ca44cde7fff289f2592a97965afec870faa7b81b9fc87745" dependencies = [ "httpdate", "reqwest", @@ -4000,9 +4000,9 @@ dependencies = [ [[package]] name = "sentry-backtrace" -version = "0.31.0" +version = "0.31.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03b7cdefbdca51f1146f0f24a3cb4ecb6428951f030ff5c720cfb5c60bd174c0" +checksum = "c029fe8317cdd75cb2b52c600bab4e2ef64c552198e669ba874340447f330962" dependencies = [ "backtrace", "once_cell", @@ -4012,9 +4012,9 @@ dependencies = [ [[package]] name = "sentry-contexts" -version = "0.31.0" +version = "0.31.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6af4cb29066e0e8df0cc3111211eb93543ccb09e1ccbe71de6d88b4bb459a2b1" +checksum = "bc575098d73c8b942b589ab453b06e4c43527556dd8f95532220d1b54d7c6b4b" dependencies = [ "hostname", "libc", @@ -4026,9 +4026,9 @@ dependencies = [ [[package]] name = "sentry-core" -version = "0.31.0" +version = "0.31.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e781b55761e47a60d1ff326ae8059de22b0e6b0cee68eab1c5912e4fb199a76" +checksum = "20216140001bbf05895f013abd0dae4df58faee24e016d54cbf107f070bac56b" dependencies = [ "once_cell", "rand 0.8.5", @@ -4039,9 +4039,9 @@ dependencies = [ [[package]] name = "sentry-tracing" -version = "0.31.0" +version = "0.31.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be4692bfc5bf69a8a41ccb0ce96612686eddb2406e32f7113f536efa15949af8" +checksum = "0ef4111647923c797687094bc792b8da938c4b0d64fab331d5b7a7de41964de8" dependencies = [ "sentry-core", "tracing-core", @@ -4050,9 +4050,9 @@ dependencies = [ [[package]] name = "sentry-types" -version = "0.31.0" +version = "0.31.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d642a04657cc77d8de52ae7c6d93a15cb02284eb219344a89c1e2b26bbaf578c" +checksum = "d7f6959d8cb3a77be27e588eef6ce9a2a469651a556d9de662e4d07e5ace4232" dependencies = [ "debugid", "getrandom 0.2.9", diff --git a/zebrad/Cargo.toml b/zebrad/Cargo.toml index 064848fff4c..2d28a3d345f 100644 --- a/zebrad/Cargo.toml +++ b/zebrad/Cargo.toml @@ -152,7 +152,7 @@ num-integer = "0.1.45" rand = { version = "0.8.5", package = "rand" } # prod feature sentry -sentry = { version = "0.31.0", default-features = false, features = ["backtrace", "contexts", "reqwest", "rustls", "tracing"], optional = true } +sentry = { version = "0.31.1", default-features = false, features = ["backtrace", "contexts", "reqwest", "rustls", "tracing"], optional = true } # prod feature flamegraph tracing-flame = { version = "0.2.0", optional = true } From 6784a9a413608d4378f338a954eb92696e700e32 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 22 May 2023 21:40:22 +0000 Subject: [PATCH 005/265] build(deps): bump Swatinem/rust-cache from 2.3.0 to 2.4.0 (#6741) Bumps [Swatinem/rust-cache](https://github.com/Swatinem/rust-cache) from 2.3.0 to 2.4.0. - [Release notes](https://github.com/Swatinem/rust-cache/releases) - [Changelog](https://github.com/Swatinem/rust-cache/blob/master/CHANGELOG.md) - [Commits](https://github.com/Swatinem/rust-cache/compare/v2.3.0...v2.4.0) --- updated-dependencies: - dependency-name: Swatinem/rust-cache dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/continous-integration-os.yml | 4 ++-- .github/workflows/docs.yml | 2 +- .github/workflows/lint.yml | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/continous-integration-os.yml b/.github/workflows/continous-integration-os.yml index 9633100541d..0fe71185dbc 100644 --- a/.github/workflows/continous-integration-os.yml +++ b/.github/workflows/continous-integration-os.yml @@ -101,7 +101,7 @@ jobs: curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain=${{ matrix.rust }} --profile=minimal - - uses: Swatinem/rust-cache@v2.3.0 + - uses: Swatinem/rust-cache@v2.4.0 # TODO: change Rust cache target directory on Windows, # or remove this workaround once the build is more efficient (#3005). #with: @@ -236,7 +236,7 @@ jobs: run: | curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain=stable --profile=minimal - - uses: Swatinem/rust-cache@v2.3.0 + - uses: Swatinem/rust-cache@v2.4.0 with: shared-key: "clippy-cargo-lock" diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index c2e84be3cbb..b7e668a258e 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -56,7 +56,7 @@ jobs: run: | curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain=beta --profile=default - - uses: Swatinem/rust-cache@v2.3.0 + - uses: Swatinem/rust-cache@v2.4.0 - name: Setup mdBook uses: peaceiris/actions-mdbook@v1.2.0 diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 8ad8e2a2f20..8ebe0fca4a4 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -86,7 +86,7 @@ jobs: run: | curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain=stable --profile=default - - uses: Swatinem/rust-cache@v2.3.0 + - uses: Swatinem/rust-cache@v2.4.0 with: shared-key: "clippy-cargo-lock" @@ -131,7 +131,7 @@ jobs: # We don't cache `fmt` outputs because the job is quick, # and we want to use the limited GitHub actions cache space for slower jobs. - #- uses: Swatinem/rust-cache@v2.3.0 + #- uses: Swatinem/rust-cache@v2.4.0 - run: | cargo fmt --all -- --check From f2be848de0066d14e0186b003768be4ff68d6ed4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 23 May 2023 00:43:48 +0000 Subject: [PATCH 006/265] build(deps): bump vergen from 8.1.3 to 8.2.0 (#6742) Bumps [vergen](https://github.com/rustyhorde/vergen) from 8.1.3 to 8.2.0. - [Release notes](https://github.com/rustyhorde/vergen/releases) - [Commits](https://github.com/rustyhorde/vergen/compare/8.1.3...8.2.0) --- updated-dependencies: - dependency-name: vergen dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- zebrad/Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b4e6941aa61..a6d4030924e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5180,9 +5180,9 @@ checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191" [[package]] name = "vergen" -version = "8.1.3" +version = "8.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e03272e388fb78fc79481a493424f78d77be1d55f21bcd314b5a6716e195afe" +checksum = "3e884903ddba094df9bcdeb43b41170658bb4c2001ca8c47df7368244f4210ee" dependencies = [ "anyhow", "git2", diff --git a/zebrad/Cargo.toml b/zebrad/Cargo.toml index 2d28a3d345f..4d5002c59db 100644 --- a/zebrad/Cargo.toml +++ b/zebrad/Cargo.toml @@ -185,7 +185,7 @@ proptest-derive = { version = "0.3.0", optional = true } console-subscriber = { version = "0.1.8", optional = true } [build-dependencies] -vergen = { version = "8.1.3", default-features = false, features = ["cargo", "git", "git2", "rustc"] } +vergen = { version = "8.2.0", default-features = false, features = ["cargo", "git", "git2", "rustc"] } # test feature lightwalletd-grpc-tests tonic-build = { version = "0.9.2", optional = true } From ec2e9ca276643f71ce8405e06019b0e016bfec5a Mon Sep 17 00:00:00 2001 From: Pili Guerra Date: Tue, 23 May 2023 04:33:14 +0100 Subject: [PATCH 007/265] Delete outdated `TODOs` refering to closed issues (#6732) * ZIPs were updated to remove ambiguity, this was tracked in #1267. * #2105 was fixed by #3039 and #2379 was closed by #3069 * #2230 was a duplicate of #2231 which was closed by #2511 * #3235 was obsoleted by #2156 which was fixed by #3505 * #1850 was fixed by #2944, #1851 was fixed by #2961 and #2902 was fixed by #2969 * We migrated to Rust 2021 edition in Jan 2022 with #3332 * #1631 was closed as not needed * #338 was fixed by #3040 and #1162 was fixed by #3067 * #2079 was fixed by #2445 * #4794 was fixed by #6122 * #1678 stopped being an issue * #3151 was fixed by #3934 * #3204 was closed as not needed * #1213 was fixed by #4586 * #1774 was closed as not needed * #4633 was closed as not needed * Clarify behaviour of difficulty spacing Co-authored-by: teor * Update comment to reflect implemented behaviour Co-authored-by: teor * Update comment to reflect implemented behaviour when retrying block downloads Co-authored-by: teor * Update `TODO` to remove closed issue and clarify when we might want to fix Co-authored-by: teor * Update `TODO` to remove closed issue and clarify what we might want to change in future Co-authored-by: teor * Clarify benefits of how we do block verification Co-authored-by: teor * Fix rustfmt errors --------- Co-authored-by: teor --- zebra-chain/src/orchard/sinsemilla.rs | 4 ---- zebra-chain/src/work/difficulty/tests/vectors.rs | 4 +++- zebra-consensus/src/transaction.rs | 4 ---- zebra-network/src/config.rs | 1 - zebra-network/src/peer_set/limit.rs | 3 ++- zebra-network/src/peer_set/set.rs | 2 -- .../src/peer_set/unready_service/tests/vectors.rs | 2 -- zebra-network/src/policies.rs | 7 +++++-- zebra-network/src/protocol/external/codec.rs | 8 ++++---- zebra-network/src/protocol/external/message.rs | 2 +- zebra-rpc/src/methods/get_block_template_rpcs.rs | 3 +-- zebra-state/src/config.rs | 5 +---- zebra-state/src/service/check/utxo.rs | 1 - zebra-state/src/service/finalized_state/tests/prop.rs | 1 - .../finalized_state/zebra_db/block/tests/snapshot.rs | 1 - 15 files changed, 17 insertions(+), 31 deletions(-) diff --git a/zebra-chain/src/orchard/sinsemilla.rs b/zebra-chain/src/orchard/sinsemilla.rs index 060fbfb39e8..d7d05813a5c 100644 --- a/zebra-chain/src/orchard/sinsemilla.rs +++ b/zebra-chain/src/orchard/sinsemilla.rs @@ -159,10 +159,6 @@ pub fn sinsemilla_hash(D: &[u8], M: &BitVec) -> Option { extract_p_bottom(sinsemilla_hash_to_point(D, M)) } -// TODO: test the above correctness and compatibility with the zcash-hackworks test vectors -// https://github.com/ZcashFoundation/zebra/issues/2079 -// https://github.com/zcash-hackworks/zcash-test-vectors/pulls - #[cfg(test)] mod tests { diff --git a/zebra-chain/src/work/difficulty/tests/vectors.rs b/zebra-chain/src/work/difficulty/tests/vectors.rs index 3a64cdec6f1..d198fd32c4b 100644 --- a/zebra-chain/src/work/difficulty/tests/vectors.rs +++ b/zebra-chain/src/work/difficulty/tests/vectors.rs @@ -457,7 +457,9 @@ fn check_testnet_minimum_difficulty_block(height: block::Height) -> Result<(), R .signed_duration_since(previous_block.header.time); // zcashd requires a gap that's strictly greater than 6 times the target - // threshold, but ZIP-205 and ZIP-208 are ambiguous. See bug #1276. + // threshold, as documented in ZIP-205 and ZIP-208: + // https://zips.z.cash/zip-0205#change-to-difficulty-adjustment-on-testnet + // https://zips.z.cash/zip-0208#minimum-difficulty-blocks-on-testnet match NetworkUpgrade::minimum_difficulty_spacing_for_height(Network::Testnet, height) { None => Err(eyre!("the minimum difficulty rule is not active"))?, Some(spacing) if (time_gap <= spacing) => Err(eyre!( diff --git a/zebra-consensus/src/transaction.rs b/zebra-consensus/src/transaction.rs index 28ecc7e0394..674438c23c0 100644 --- a/zebra-consensus/src/transaction.rs +++ b/zebra-consensus/src/transaction.rs @@ -740,10 +740,6 @@ where orchard_shielded_data, &shielded_sighash, )?)) - - // TODO: - // - verify orchard shielded pool (ZIP-224) (#2105) - // - shielded input and output limits? (#2379) } /// Verifies if a V5 `transaction` is supported by `network_upgrade`. diff --git a/zebra-network/src/config.rs b/zebra-network/src/config.rs index 14f46ec4ff2..c1d49ab7905 100644 --- a/zebra-network/src/config.rs +++ b/zebra-network/src/config.rs @@ -347,7 +347,6 @@ impl<'de> Deserialize<'de> for Config { let config = DConfig::deserialize(deserializer)?; - // TODO: perform listener DNS lookups asynchronously with a timeout (#1631) let listen_addr = match config.listen_addr.parse::() { Ok(socket) => Ok(socket), Err(_) => match config.listen_addr.parse::() { diff --git a/zebra-network/src/peer_set/limit.rs b/zebra-network/src/peer_set/limit.rs index 6c1bfc76f2b..5b5f4a61273 100644 --- a/zebra-network/src/peer_set/limit.rs +++ b/zebra-network/src/peer_set/limit.rs @@ -178,8 +178,9 @@ impl Drop for ConnectionTracker { // We ignore disconnected errors, because the receiver can be dropped // before some connections are dropped. + // # Security // - // TODO: This channel will be bounded by the connection limit (#1850, #1851, #2902). + // This channel is actually bounded by the inbound and outbound connection limit. let _ = self.close_notification_tx.send(ConnectionClosed); } } diff --git a/zebra-network/src/peer_set/set.rs b/zebra-network/src/peer_set/set.rs index c668c3a0d83..abdd2a87495 100644 --- a/zebra-network/src/peer_set/set.rs +++ b/zebra-network/src/peer_set/set.rs @@ -418,8 +418,6 @@ where for guard in self.guards.iter() { guard.abort(); } - - // TODO: implement graceful shutdown for InventoryRegistry (#1678) } /// Check busy peer services for request completion or errors. diff --git a/zebra-network/src/peer_set/unready_service/tests/vectors.rs b/zebra-network/src/peer_set/unready_service/tests/vectors.rs index 6869900f93d..4f78980ea6a 100644 --- a/zebra-network/src/peer_set/unready_service/tests/vectors.rs +++ b/zebra-network/src/peer_set/unready_service/tests/vectors.rs @@ -1,6 +1,4 @@ //! Fixed test vectors for unready services. -//! -//! TODO: test that inner service errors are handled correctly (#3204) use std::marker::PhantomData; diff --git a/zebra-network/src/policies.rs b/zebra-network/src/policies.rs index 58ed093c64a..5e00207ba75 100644 --- a/zebra-network/src/policies.rs +++ b/zebra-network/src/policies.rs @@ -35,8 +35,11 @@ impl Policy // Let other tasks run, so we're more likely to choose a different peer, // and so that any notfound inv entries win the race to the PeerSet. // - // TODO: move syncer retries into the PeerSet, - // so we always choose different peers (#3235) + // # Security + // + // We want to choose different peers for retries, so we have a better chance of getting each block. + // This is implemented by the connection state machine sending synthetic `notfound`s to the + // `InventoryRegistry`, as well as forwarding actual `notfound`s from peers. Box::pin(tokio::task::yield_now().map(move |()| retry_outcome)), ) } else { diff --git a/zebra-network/src/protocol/external/codec.rs b/zebra-network/src/protocol/external/codec.rs index aec54772c85..7aee299dafa 100644 --- a/zebra-network/src/protocol/external/codec.rs +++ b/zebra-network/src/protocol/external/codec.rs @@ -184,10 +184,10 @@ impl Codec { /// Obtain the size of the body of a given message. This will match the /// number of bytes written to the writer provided to `write_body` for the /// same message. - /// - /// TODO: Replace with a size estimate, to avoid multiple serializations - /// for large data structures like lists, blocks, and transactions. - /// See #1774. + // # Performance TODO + // + // If this code shows up in profiles, replace with a size estimate or cached size, + // to avoid multiple serializations for large data structures like lists, blocks, and transactions. fn body_length(&self, msg: &Message) -> usize { let mut writer = FakeWriter(0); diff --git a/zebra-network/src/protocol/external/message.rs b/zebra-network/src/protocol/external/message.rs index 009566dc24d..f8ee8cbc9b8 100644 --- a/zebra-network/src/protocol/external/message.rs +++ b/zebra-network/src/protocol/external/message.rs @@ -401,7 +401,7 @@ impl TryFrom for VersionMessage { } } -// TODO: add tests for Error conversion and Reject message serialization (#4633) +// TODO: add tests for Error conversion and Reject message serialization // (Zebra does not currently send reject messages, and it ignores received reject messages.) impl From for Message where diff --git a/zebra-rpc/src/methods/get_block_template_rpcs.rs b/zebra-rpc/src/methods/get_block_template_rpcs.rs index 5a94be6ceb0..f7ea4e3f98f 100644 --- a/zebra-rpc/src/methods/get_block_template_rpcs.rs +++ b/zebra-rpc/src/methods/get_block_template_rpcs.rs @@ -110,8 +110,7 @@ pub trait GetBlockTemplateRpc { /// - the parent block is a valid block that Zebra already has, or will receive soon. /// /// Zebra verifies blocks in parallel, and keeps recent chains in parallel, - /// so moving between chains is very cheap. (But forking a new chain may take some time, - /// until bug #4794 is fixed.) + /// so moving between chains and forking chains is very cheap. /// /// This rpc method is available only if zebra is built with `--features getblocktemplate-rpcs`. #[rpc(name = "getblocktemplate")] diff --git a/zebra-state/src/config.rs b/zebra-state/src/config.rs index b350f82ac24..ba62f834c9a 100644 --- a/zebra-state/src/config.rs +++ b/zebra-state/src/config.rs @@ -29,10 +29,7 @@ pub struct Config { /// When Zebra's state format changes, it creates a new state subdirectory for that version, /// and re-syncs from genesis. /// - /// Old state versions are [not automatically deleted](https://github.com/ZcashFoundation/zebra/issues/1213). - /// It is ok to manually delete old state versions. - /// - /// It is also ok to delete the entire cached state directory. + /// It is ok to delete the entire cached state directory. /// If you do, Zebra will re-sync from genesis next time it is launched. /// /// The default directory is platform dependent, based on diff --git a/zebra-state/src/service/check/utxo.rs b/zebra-state/src/service/check/utxo.rs index 22001923702..0cd8a0f0dc2 100644 --- a/zebra-state/src/service/check/utxo.rs +++ b/zebra-state/src/service/check/utxo.rs @@ -229,7 +229,6 @@ pub fn remaining_transaction_value( utxos: &HashMap, ) -> Result<(), ValidateContextError> { for (tx_index_in_block, transaction) in prepared.block.transactions.iter().enumerate() { - // TODO: check coinbase transaction remaining value (#338, #1162) if transaction.is_coinbase() { continue; } diff --git a/zebra-state/src/service/finalized_state/tests/prop.rs b/zebra-state/src/service/finalized_state/tests/prop.rs index 5893d4ff2ea..bdc6438f551 100644 --- a/zebra-state/src/service/finalized_state/tests/prop.rs +++ b/zebra-state/src/service/finalized_state/tests/prop.rs @@ -35,7 +35,6 @@ fn blocks_with_v5_transactions() -> Result<()> { ); prop_assert_eq!(Some(height), state.finalized_tip_height()); prop_assert_eq!(hash.unwrap(), block.hash); - // TODO: check that the nullifiers were correctly inserted (#2230) height = Height(height.0 + 1); } }); diff --git a/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshot.rs b/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshot.rs index f40b40156b8..8ce1e67ece5 100644 --- a/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshot.rs +++ b/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshot.rs @@ -253,7 +253,6 @@ fn snapshot_block_and_transaction_data(state: &FinalizedState) { // test the rest of the chain data (value balance). let history_tree_at_tip = state.history_tree(); - // TODO: split out block snapshots into their own function (#3151) for query_height in 0..=max_height.0 { let query_height = Height(query_height); From 0b8e73206f1849a29dd16b8244c76ecd5ca17e57 Mon Sep 17 00:00:00 2001 From: Marek Date: Tue, 23 May 2023 15:18:57 +0200 Subject: [PATCH 008/265] change(docs): Update User Docs (#6733) * Move releases info to `Building Zebra` * Move Zebra use-cases to `Using Zebra` * Point the links in Zebra use-cases to docs * Move the contents of `Release Candidates` * Refactor the `System Requirements` link * Update the syncing times * Update notes on performance * Move data usage to `System Requirements` * Remove "building Zebra" from lightwalletd docs I think we can assume people will follow the previous parts of the docs for how to build Zebra. * Move lightwalletd details from `README.md` to docs `README.md` already mentions lightwalletd from the `Using Zebra` section, and refers the reader to the docs where the details were moved and refactored. * Mention `lightwalletd` and mining in Running Zebra * Move Troubleshooting to its own file * Move "Improving Performance" to its own file * Move instructions for ARM to "Installing Zebra" * Reword the Testnet sync duration description Co-authored-by: Pili Guerra * Move "Improving Performance" to "Troubleshooting" * Remove the Testnet unreliability caveat --------- Co-authored-by: Pili Guerra --- README.md | 57 +++++++-------------- book/src/SUMMARY.md | 1 + book/src/user/install.md | 6 +++ book/src/user/lightwalletd.md | 45 ++++++++++------- book/src/user/requirements.md | 54 +++++--------------- book/src/user/run.md | 83 +++--------------------------- book/src/user/troubleshooting.md | 87 ++++++++++++++++++++++++++++++++ 7 files changed, 157 insertions(+), 176 deletions(-) create mode 100644 book/src/user/troubleshooting.md diff --git a/README.md b/README.md index 362f19c8aff..9090ccb5c5c 100644 --- a/README.md +++ b/README.md @@ -15,7 +15,6 @@ - [Docker](#docker) - [Building Zebra](#building-zebra) - [Optional Features](#optional-features) - - [Configuring JSON-RPC for lightwalletd](#configuring-json-rpc-for-lightwalletd) - [Network Ports](#network-ports) - [Known Issues](#known-issues) - [Future Work](#future-work) @@ -46,30 +45,26 @@ You would want to run Zebra if you want to contribute to the Zcash network: the more nodes are run, the more reliable the network will be in terms of speed and resistance to denial of service attacks, for example. -Zebra aims to be [faster, more secure, and more easily extensible](https://doc.zebra.zfnd.org/zebrad/index.html#zebra-advantages) -than other Zcash implementations. - -## Release Candidates - -Every few weeks, we release a [new Zebra version](https://github.com/ZcashFoundation/zebra/releases). - -Zebra's network stack is interoperable with `zcashd`, -and Zebra implements all the features required to reach Zcash network consensus. -Currently, Zebra validates all of the Zcash consensus rules for the NU5 network upgrade. - Zebra validates blocks and transactions, but needs extra software to generate them: -- to generate transactions, [configure `zebrad`'s JSON-RPC port](https://github.com/ZcashFoundation/zebra#configuring-json-rpc-for-lightwalletd), - and use a light wallet with `lightwalletd` and Zebra. -- to generate blocks, [compile `zebrad` with the `getblocktemplate-rpcs` feature](https://doc.zebra.zfnd.org/zebrad/#json-rpc), configure the JSON-RPC port, - and use a mining pool or miner with Zebra's mining JSON-RPCs. - Mining support is currently incomplete, experimental, and off by default. +- To generate transactions, [run Zebra with + `lightwalletd`](https://zebra.zfnd.org/user/lightwalletd.html). +- To generate blocks, [enable mining + support](https://zebra.zfnd.org/user/mining.html), and use a mining pool or + miner with Zebra's mining JSON-RPCs. Mining support is currently incomplete, + experimental, and off by default. + +Zebra's network stack is interoperable with `zcashd`, and Zebra implements all +the features required to reach Zcash network consensus, including the validation +of all the consensus rules for the NU5 network upgrade. +[Here](https://doc.zebra.zfnd.org/zebrad/index.html#zebra-advantages) are some +benefits of Zebra. ## Getting Started You can run Zebra using our Docker image or you can build it manually. Please -see the [requirements section of the Zebra Book](https://zebra.zfnd.org/user/requirements.html) for system -requirements. +see the [System Requirements](https://zebra.zfnd.org/user/requirements.html) +section in the Zebra book for system requirements. ### Docker @@ -91,6 +86,9 @@ Zebra is tested with the latest `stable` Rust version. Earlier versions are not supported or tested. Note that Zebra's code currently uses features introduced in Rust 1.68, or any later stable release. +Every few weeks, we release a [new Zebra +version](https://github.com/ZcashFoundation/zebra/releases). + Below are quick summaries for installing the dependencies on your machine.

General instructions for installing dependencies

@@ -152,27 +150,6 @@ documentation](https://doc.zebra.zfnd.org/zebrad/index.html#zebra-feature-flags) Some debugging and monitoring features are disabled in release builds to increase performance. -### Configuring JSON-RPC for lightwalletd - -To use `zebrad` as a `lightwalletd` backend, give it this `~/.config/zebrad.toml`: - -```toml -[rpc] -# listen for RPC queries on localhost -listen_addr = '127.0.0.1:8232' - -# automatically use multiple CPU threads -parallel_cpu_threads = 0 -``` - -**WARNING:** This config allows multiple Zebra instances to share the same RPC port. -See the [RPC config documentation](https://doc.zebra.zfnd.org/zebra_rpc/config/struct.Config.html) for details. - -`lightwalletd` also requires a `zcash.conf` file. - -It is recommended to use [adityapk00/lightwalletd](https://github.com/adityapk00/lightwalletd) because that is used in testing. -Other `lightwalletd` forks have limited support, see the [detailed `lightwalletd` instructions](https://github.com/ZcashFoundation/zebra/blob/main/book/src/user/lightwalletd.md#sync-lightwalletd). - ### Network Ports Zebra uses the following inbound and outbound TCP ports: diff --git a/book/src/SUMMARY.md b/book/src/SUMMARY.md index a98c39c5e97..6840019a278 100644 --- a/book/src/SUMMARY.md +++ b/book/src/SUMMARY.md @@ -16,6 +16,7 @@ - [Mining](user/mining.md) - [Testnet Mining with s-nomp](user/mining-testnet-s-nomp.md) - [Kibana blockchain explorer](user/elasticsearch.md) + - [Troubleshooting](user/troubleshooting.md) - [Developer Documentation](dev.md) - [Contribution Guide](CONTRIBUTING.md) - [Design Overview](dev/overview.md) diff --git a/book/src/user/install.md b/book/src/user/install.md index fad05d693c2..6f0394839d6 100644 --- a/book/src/user/install.md +++ b/book/src/user/install.md @@ -2,6 +2,12 @@ Follow the [Docker or compilation instructions in the README](https://github.com/ZcashFoundation/zebra#getting-started). +#### ARM + +If you're using an ARM machine, [install the Rust compiler for +ARM](https://rust-lang.github.io/rustup/installation/other.html). If you build +using the x86_64 tools, Zebra might run really slowly. + #### Build Troubleshooting If you're having trouble with: diff --git a/book/src/user/lightwalletd.md b/book/src/user/lightwalletd.md index aade5583f0c..cc9c99100af 100644 --- a/book/src/user/lightwalletd.md +++ b/book/src/user/lightwalletd.md @@ -1,12 +1,15 @@ # Running lightwalletd with zebra -Zebra's RPC methods can support a lightwalletd service backed by zebrad. +Zebra's RPC methods can support a lightwalletd service backed by zebrad. We +recommend using +[adityapk00/lightwalletd](https://github.com/adityapk00/lightwalletd) because we +use it in testing. Other `lightwalletd` forks have limited support, see the +[Sync lightwalletd](#sync-lightwalletd) section for more info. Contents: -- [Download and build Zebra](#download-and-build-zebra) - [Configure zebra for lightwalletd](#configure-zebra-for-lightwalletd) - - [RPC section](#rpc-section) + - [JSON-RPC](#json-rpc) - [Sync Zebra](#sync-zebra) - [Download and build lightwalletd](#download-and-build-lightwalletd) - [Sync lightwalletd](#sync-lightwalletd) @@ -15,16 +18,8 @@ Contents: - [Download and build the cli-wallet](#download-and-build-the-cli-wallet) - [Run the wallet](#run-the-wallet) -## Download and build Zebra -[#download-and-build-zebra]: #download-and-build-zebra - -```console -cargo install --locked --git https://github.com/ZcashFoundation/zebra zebrad -``` - -Zebra binary will be at ` ~/.cargo/bin/zebrad`. - ## Configure zebra for lightwalletd + [#configure-zebra-for-lightwalletd]: #configure-zebra-for-lightwalletd We need a zebra configuration file. First, we create a file with the default settings: @@ -37,17 +32,33 @@ The above command places the generated `zebrad.toml` config file in the default Tweak the following option in order to prepare for lightwalletd setup. -### RPC section -[#rpc-section]: #rpc-section +### JSON-RPC -This change is required for zebra to behave as an RPC endpoint. The standard port for RPC endpoint is `8232`. +[#rpc-section]: #json-rpc -``` +We need to configure Zebra to behave as an RPC endpoint. The standard RPC port +for Zebra is: + +- `8232` for Mainnet, and +- `18323` for Testnet. + +For example, to use Zebra as a `lightwalletd` backend on Mainnet, give it this +`~/.config/zebrad.toml`: + +```toml [rpc] -listen_addr = "127.0.0.1:8232" +# listen for RPC queries on localhost +listen_addr = '127.0.0.1:8232' + +# automatically use multiple CPU threads +parallel_cpu_threads = 0 ``` +**WARNING:** This config allows multiple Zebra instances to share the same RPC port. +See the [RPC config documentation](https://doc.zebra.zfnd.org/zebra_rpc/config/struct.Config.html) for details. + ## Sync Zebra + [#sync-zebra]: #sync-zebra With the configuration in place you can start synchronizing Zebra with the Zcash blockchain. This may take a while depending on your hardware. diff --git a/book/src/user/requirements.md b/book/src/user/requirements.md index 70ee6b00c5b..d12b16ce338 100644 --- a/book/src/user/requirements.md +++ b/book/src/user/requirements.md @@ -30,13 +30,20 @@ Zebra uses the following inbound and outbound TCP ports: - 8233 on Mainnet - 18233 on Testnet -Outbound connections are required to sync, inbound connections are optional. -Zebra also needs access to the Zcash DNS seeders, via the OS DNS resolver -(usually port 53). +If you configure Zebra with a specific +[`listen_addr`](https://doc.zebra.zfnd.org/zebra_network/struct.Config.html#structfield.listen_addr), +it will advertise this address to other nodes for inbound connections. Outbound +connections are required to sync, inbound connections are optional. Zebra also +needs access to the Zcash DNS seeders, via the OS DNS resolver (usually port +53). -The typical Mainnet network usage is: +Zebra makes outbound connections to peers on any port. But `zcashd` prefers +peers on the default ports, so that it can't be used for DDoS attacks on other +networks. -- Initial sync: 300 GB download, as already noted, we expect the initial +### Typical Mainnet Network Usage + +- Initial sync: 300 GB download. As already noted, we expect the initial download to grow. - Ongoing updates: 10 MB - 10 GB upload and download per day, depending on user-created transaction size and peer requests. @@ -51,40 +58,3 @@ ticket.](https://github.com/ZcashFoundation/zebra/issues/new/choose) ## Sentry Production Monitoring Compile Zebra with `--features sentry` to monitor it using Sentry in production. - -# Troubleshooting - -We continuously test that our builds and tests pass on the _latest_ [GitHub -Runners](https://docs.github.com/en/actions/using-github-hosted-runners/about-github-hosted-runners#supported-runners-and-hardware-resources) -for: - -- macOS, -- Ubuntu, -- Docker: - - Debian Bullseye. - -## Memory Issues - -- If Zebra's build runs out of RAM, try setting `export CARGO_BUILD_JOBS=2`. -- If Zebra's tests timeout or run out of RAM, try running `cargo test -- --test-threads=2`. Note that `cargo` uses all processor cores on your machine - by default. - -## Network Issues - -- Some of Zebra's tests download Zcash blocks, so they might be unreliable - depending on your network connection. You can set `ZEBRA_SKIP_NETWORK_TESTS=1` - to skip the network tests. -- Zebra may be unreliable on Testnet, and under less-than-perfect network - conditions. See our [future - work](https://github.com/ZcashFoundation/zebra#future-work) for details. - -## Issues with Tests on macOS - -Some of Zebra's tests deliberately cause errors that make Zebra panic. macOS -records these panics as crash reports. If you are seeing "Crash Reporter" -dialogs during Zebra tests, you can disable them using this Terminal.app -command: - -```sh -defaults write com.apple.CrashReporter DialogType none -``` diff --git a/book/src/user/run.md b/book/src/user/run.md index 2ee478996b6..8d383db60f6 100644 --- a/book/src/user/run.md +++ b/book/src/user/run.md @@ -9,7 +9,12 @@ The configuration format is the TOML encoding of the internal config structure, and documentation for all of the config options can be found [here](https://doc.zebra.zfnd.org/zebrad/config/struct.ZebradConfig.html). -* `zebrad start` starts a full node. +- `zebrad start` starts a full node. + +You can run Zebra as a: + +- [`lightwalletd` backend](https://zebra.zfnd.org/user/lightwalletd.html), and +- experimental [mining backend](https://zebra.zfnd.org/user/mining.html). ## Supported versions @@ -21,79 +26,3 @@ Always run a supported version of Zebra, and upgrade it regularly, so it doesn't - `1`: Application exited unsuccessfully - `2`: Application crashed - `zebrad` may also return platform-dependent codes. - -## Network Ports and Data Usage - -`zebrad`'s default ports and network usage are -[documented in the README.](https://github.com/ZcashFoundation/zebra#network-ports-and-data-usage) - -If Zebra is configured with a specific [`listen_addr`](https://doc.zebra.zfnd.org/zebra_network/struct.Config.html#structfield.listen_addr), -it will advertise this address to other nodes for inbound connections. - -Zebra makes outbound connections to peers on any port. -But `zcashd` prefers peers on the default ports, -so that it can't be used for DDoS attacks on other networks. - -The major constraint we've found on `zebrad` performance is the network weather, -especially the ability to make good connections to other Zcash network peers. - -Zebra needs some peers which have a round-trip latency of 2 seconds or less. -If this is a problem for you, please let us know! - -## Improving Performance - -Zebra usually syncs in around a day, depending on your network connection, and the overall Zcash network load. - -If you're having trouble syncing, try the following config changes: - -### Release Build - -Make sure you're using a release build on your native architecture. - -If you're using an ARM machine, -[install the Rust compiler for ARM](https://rust-lang.github.io/rustup/installation/other.html). -If you build using the x86_64 tools, Zebra might run really slowly. - -Run a release build using the -[`cargo install` command from the README.](https://github.com/ZcashFoundation/zebra#build-and-run-instructions) - -### Syncer Lookahead Limit - -If your connection is slow, try -[downloading fewer blocks at a time](https://doc.zebra.zfnd.org/zebrad/config/struct.SyncSection.html#structfield.lookahead_limit): - -```toml -[sync] -lookahead_limit = 1000 -max_concurrent_block_requests = 25 -``` - -### Peer Set Size - -If your connection is slow, try [connecting to fewer peers](https://doc.zebra.zfnd.org/zebra_network/struct.Config.html#structfield.peerset_initial_target_size): - -```toml -[network] -peerset_initial_target_size = 25 -``` - -### Turn off debug logging - -Zebra logs at info level by default. - -If Zebra is slow, make sure it is logging at info level: - -```toml -[tracing] -filter = 'info' -``` - -Or restrict debug logging to a specific Zebra component: - -```toml -[tracing] -filter = 'info,zebra_network=debug' -``` - -If you keep on seeing multiple info logs per second, please -[open a bug.](https://github.com/ZcashFoundation/zebra/issues/new/choose) diff --git a/book/src/user/troubleshooting.md b/book/src/user/troubleshooting.md new file mode 100644 index 00000000000..b1a89d43576 --- /dev/null +++ b/book/src/user/troubleshooting.md @@ -0,0 +1,87 @@ +# Troubleshooting + +We continuously test that our builds and tests pass on the _latest_ [GitHub +Runners](https://docs.github.com/en/actions/using-github-hosted-runners/about-github-hosted-runners#supported-runners-and-hardware-resources) +for: + +- macOS, +- Ubuntu, +- Docker: + - Debian Bullseye. + +## Memory Issues + +- If Zebra's build runs out of RAM, try setting `export CARGO_BUILD_JOBS=2`. +- If Zebra's tests timeout or run out of RAM, try running `cargo test -- --test-threads=2`. Note that `cargo` uses all processor cores on your machine + by default. + +## Network Issues + +Some of Zebra's tests download Zcash blocks, so they might be unreliable +depending on your network connection. You can set `ZEBRA_SKIP_NETWORK_TESTS=1` +to skip the network tests. + +## Issues with Tests on macOS + +Some of Zebra's tests deliberately cause errors that make Zebra panic. macOS +records these panics as crash reports. If you are seeing "Crash Reporter" +dialogs during Zebra tests, you can disable them using this Terminal.app +command: + +```sh +defaults write com.apple.CrashReporter DialogType none +``` + +## Improving Performance + +Zebra usually syncs in around three days on Mainnet and half a day on +Testnet. The sync speed depends on your network connection and the overall Zcash +network load. The major constraint we've found on `zebrad` performance is the +network weather, especially the ability to make good connections to other Zcash +network peers. If you're having trouble syncing, try the following config +changes. + +### Release Build + +Make sure you're using a release build on your native architecture. + +### Syncer Lookahead Limit + +If your connection is slow, try +[downloading fewer blocks at a time](https://doc.zebra.zfnd.org/zebrad/config/struct.SyncSection.html#structfield.lookahead_limit): + +```toml +[sync] +lookahead_limit = 1000 +max_concurrent_block_requests = 25 +``` + +### Peer Set Size + +If your connection is slow, try [connecting to fewer peers](https://doc.zebra.zfnd.org/zebra_network/struct.Config.html#structfield.peerset_initial_target_size): + +```toml +[network] +peerset_initial_target_size = 25 +``` + +### Turn off debug logging + +Zebra logs at info level by default. + +If Zebra is slow, make sure it is logging at info level: + +```toml +[tracing] +filter = 'info' +``` + +Or restrict debug logging to a specific Zebra component: + +```toml +[tracing] +filter = 'info,zebra_network=debug' +``` + +If you keep on seeing multiple info logs per second, please +[open a bug.](https://github.com/ZcashFoundation/zebra/issues/new/choose) From 0918663e3ea3ff509e07925fb8d7ad97e7bf807b Mon Sep 17 00:00:00 2001 From: teor Date: Wed, 24 May 2023 06:50:29 +1000 Subject: [PATCH 009/265] fix(net): Rate-limit MetaAddrChange::Responded from peers (#6738) * Rate-limit MetaAddrChange::Responded from peers * Document rate-limits on the address book updater channel --- zebra-network/src/peer/handshake.rs | 46 +++++++++++++++------ zebra-network/src/peer_set/candidate_set.rs | 6 +++ zebra-network/src/peer_set/initialize.rs | 6 +++ 3 files changed, 46 insertions(+), 12 deletions(-) diff --git a/zebra-network/src/peer/handshake.rs b/zebra-network/src/peer/handshake.rs index 40c07493d64..f6660ac0597 100644 --- a/zebra-network/src/peer/handshake.rs +++ b/zebra-network/src/peer/handshake.rs @@ -914,6 +914,10 @@ where // addresses. Otherwise, malicious peers could interfere with the // address book state of other peers by providing their addresses in // `Version` messages. + // + // New alternate peer address and peer responded updates are rate-limited because: + // - opening connections is rate-limited + // - we only send these messages once per handshake let alternate_addrs = connected_addr.get_alternate_addrs(remote_canonical_addr); for alt_addr in alternate_addrs { let alt_addr = MetaAddr::new_alternate(alt_addr, &remote_services); @@ -1010,18 +1014,10 @@ where "addr" => connected_addr.get_transient_addr_label(), ); - if let Some(book_addr) = connected_addr.get_address_book_addr() { - if matches!(msg, Message::Ping(_) | Message::Pong(_)) { - // the collector doesn't depend on network activity, - // so this await should not hang - let _ = inbound_ts_collector - .send(MetaAddr::new_responded( - book_addr, - &remote_services, - )) - .await; - } - } + // # Security + // + // Peer messages are not rate-limited, so we can't send anything + // to a shared channel or do anything expensive here. } Err(err) => { metrics::counter!( @@ -1031,6 +1027,12 @@ where "addr" => connected_addr.get_transient_addr_label(), ); + // # Security + // + // Peer errors are rate-limited because: + // - opening connections is rate-limited + // - the number of connections is limited + // - after the first error, the peer is disconnected if let Some(book_addr) = connected_addr.get_address_book_addr() { let _ = inbound_ts_collector .send(MetaAddr::new_errored(book_addr, remote_services)) @@ -1295,6 +1297,20 @@ async fn send_periodic_heartbeats_run_loop( &remote_services, ) .await?; + + // # Security + // + // Peer heartbeats are rate-limited because: + // - opening connections is rate-limited + // - the number of connections is limited + // - Zebra initiates each heartbeat using a timer + if let Some(book_addr) = connected_addr.get_address_book_addr() { + // the collector doesn't depend on network activity, + // so this await should not hang + let _ = heartbeat_ts_collector + .send(MetaAddr::new_responded(book_addr, &remote_services)) + .await; + } } unreachable!("unexpected IntervalStream termination") @@ -1399,6 +1415,12 @@ where Err(err) => { tracing::debug!(?err, "heartbeat error, shutting down"); + // # Security + // + // Peer errors and shutdowns are rate-limited because: + // - opening connections is rate-limited + // - the number of connections is limited + // - after the first error or shutdown, the peer is disconnected if let Some(book_addr) = connected_addr.get_address_book_addr() { let _ = address_book_updater .send(MetaAddr::new_errored(book_addr, *remote_services)) diff --git a/zebra-network/src/peer_set/candidate_set.rs b/zebra-network/src/peer_set/candidate_set.rs index 042a92b27aa..76006672c9a 100644 --- a/zebra-network/src/peer_set/candidate_set.rs +++ b/zebra-network/src/peer_set/candidate_set.rs @@ -316,6 +316,12 @@ where /// Add new `addrs` to the address book. async fn send_addrs(&self, addrs: impl IntoIterator) { + // # Security + // + // New gossiped peers are rate-limited because: + // - Zebra initiates requests for new gossiped peers + // - the fanout is limited + // - the number of addresses per peer is limited let addrs: Vec = addrs .into_iter() .map(MetaAddr::new_gossiped_change) diff --git a/zebra-network/src/peer_set/initialize.rs b/zebra-network/src/peer_set/initialize.rs index f9b002f941c..31c5a662586 100644 --- a/zebra-network/src/peer_set/initialize.rs +++ b/zebra-network/src/peer_set/initialize.rs @@ -433,6 +433,12 @@ async fn limit_initial_peers( // Send every initial peer to the address book, in preferred order. // (This treats initial peers the same way we treat gossiped peers.) + // + // # Security + // + // Initial peers are limited because: + // - the number of initial peers is limited + // - this code only runs once at startup for peer in preferred_peers.values().flatten() { let peer_addr = MetaAddr::new_initial_peer(*peer); // `send` only waits when the channel is full. From 3706ff6e67e3214a7acc8beb98dacd878771f0ab Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 23 May 2023 23:23:16 +0000 Subject: [PATCH 010/265] build(deps): bump regex from 1.8.1 to 1.8.2 (#6748) Bumps [regex](https://github.com/rust-lang/regex) from 1.8.1 to 1.8.2. - [Release notes](https://github.com/rust-lang/regex/releases) - [Changelog](https://github.com/rust-lang/regex/blob/master/CHANGELOG.md) - [Commits](https://github.com/rust-lang/regex/compare/1.8.1...1.8.2) --- updated-dependencies: - dependency-name: regex dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 10 +++++----- zebra-network/Cargo.toml | 2 +- zebra-state/Cargo.toml | 2 +- zebra-test/Cargo.toml | 2 +- zebra-utils/Cargo.toml | 2 +- zebrad/Cargo.toml | 2 +- 6 files changed, 10 insertions(+), 10 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a6d4030924e..87b93e96e16 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3612,13 +3612,13 @@ dependencies = [ [[package]] name = "regex" -version = "1.8.1" +version = "1.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af83e617f331cc6ae2da5443c602dfa5af81e517212d9d611a5b3ba1777b5370" +checksum = "d1a59b5d8e97dee33696bf13c5ba8ab85341c002922fba050069326b9c498974" dependencies = [ "aho-corasick 1.0.1", "memchr", - "regex-syntax 0.7.1", + "regex-syntax 0.7.2", ] [[package]] @@ -3638,9 +3638,9 @@ checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] name = "regex-syntax" -version = "0.7.1" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5996294f19bd3aae0453a862ad728f60e6600695733dd5df01da90c54363a3c" +checksum = "436b050e76ed2903236f032a59761c1eb99e1b0aead2c257922771dab1fc8c78" [[package]] name = "reqwest" diff --git a/zebra-network/Cargo.toml b/zebra-network/Cargo.toml index d5b3c9516e6..75ab5a80504 100644 --- a/zebra-network/Cargo.toml +++ b/zebra-network/Cargo.toml @@ -35,7 +35,7 @@ ordered-map = "0.4.2" pin-project = "1.1.0" rand = { version = "0.8.5", package = "rand" } rayon = "1.7.0" -regex = "1.8.1" +regex = "1.8.2" serde = { version = "1.0.163", features = ["serde_derive"] } thiserror = "1.0.40" diff --git a/zebra-state/Cargo.toml b/zebra-state/Cargo.toml index cf5be35b714..3fb94021b75 100644 --- a/zebra-state/Cargo.toml +++ b/zebra-state/Cargo.toml @@ -43,7 +43,7 @@ itertools = "0.10.5" lazy_static = "1.4.0" metrics = "0.21.0" mset = "0.1.1" -regex = "1.8.1" +regex = "1.8.2" rlimit = "0.9.1" rocksdb = { version = "0.21.0", default_features = false, features = ["lz4"] } serde = { version = "1.0.163", features = ["serde_derive"] } diff --git a/zebra-test/Cargo.toml b/zebra-test/Cargo.toml index f5dd2e7a1b4..bf74e64865d 100644 --- a/zebra-test/Cargo.toml +++ b/zebra-test/Cargo.toml @@ -15,7 +15,7 @@ insta = "1.29.0" proptest = "1.1.0" once_cell = "1.17.1" rand = { version = "0.8.5", package = "rand" } -regex = "1.8.1" +regex = "1.8.2" tokio = { version = "1.28.0", features = ["full", "tracing", "test-util"] } tower = { version = "0.4.13", features = ["util"] } diff --git a/zebra-utils/Cargo.toml b/zebra-utils/Cargo.toml index 46ddc49d752..dc076ab5c07 100644 --- a/zebra-utils/Cargo.toml +++ b/zebra-utils/Cargo.toml @@ -67,7 +67,7 @@ zebra-chain = { path = "../zebra-chain" } itertools = { version = "0.10.5", optional = true } # These crates are needed for the search-issue-refs binary -regex = { version = "1.8.1", optional = true } +regex = { version = "1.8.2", optional = true } reqwest = { version = "0.11.18", optional = true } # These crates are needed for the zebra-checkpoints and search-issue-refs binaries diff --git a/zebrad/Cargo.toml b/zebrad/Cargo.toml index 4d5002c59db..f4d3fab357b 100644 --- a/zebrad/Cargo.toml +++ b/zebrad/Cargo.toml @@ -195,7 +195,7 @@ abscissa_core = { version = "0.5", features = ["testing"] } hex = "0.4.3" jsonrpc-core = "18.0.0" once_cell = "1.17.1" -regex = "1.8.1" +regex = "1.8.2" semver = "1.0.17" # zebra-rpc needs the preserve_order feature, it also makes test results more stable From 54214478045c219fd5367f2076f6471b2c8d3cd0 Mon Sep 17 00:00:00 2001 From: Marek Date: Wed, 24 May 2023 18:56:22 +0200 Subject: [PATCH 011/265] Listen on `0.0.0.0` instead of `127.0.0.1` (#6755) Binding `127.0.0.1` means that Zebra will accept inbound connections coming only from the loopback network interface. This is desirable as long as Zebra runs on a native machine. When Zebra runs inside a Docker container, incoming connections coming from the host machine don't come from the container's loopback interface. In order to be able to connect to Zebra from the host machine, we can listen on `0.0.0.0` so Zebra accepts inbound connections coming from any interface. Users then can limit inbound connection to the loopback of their host by ```bash docker run -p 127.0.0.1:8232:8232 zfnd/zebra:1.0.0-rc.8 ``` --- docker/Dockerfile | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docker/Dockerfile b/docker/Dockerfile index 180a50e52b8..68d0a95be1b 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -180,18 +180,18 @@ RUN set -ex; \ { \ echo "[network]"; \ echo "network = '${NETWORK}'"; \ - echo "listen_addr = '127.0.0.1'"; \ + echo "listen_addr = '0.0.0.0'"; \ echo "[consensus]"; \ echo "checkpoint_sync = ${CHECKPOINT_SYNC}"; \ echo "[state]"; \ echo "cache_dir = '/zebrad-cache'"; \ echo "[rpc]"; \ - [ -n "$RPC_PORT" ] && echo "listen_addr = '127.0.0.1:${RPC_PORT}'"; \ + [ -n "$RPC_PORT" ] && echo "listen_addr = '0.0.0.0:${RPC_PORT}'"; \ echo "parallel_cpu_threads = 0"; \ echo "[metrics]"; \ - echo "#endpoint_addr = '127.0.0.1:9999'"; \ + echo "#endpoint_addr = '0.0.0.0:9999'"; \ echo "[tracing]"; \ - echo "#endpoint_addr = '127.0.0.1:3000'"; \ + echo "#endpoint_addr = '0.0.0.0:3000'"; \ } > "${ZEBRA_CONF_PATH}" From 56c9116649a1f735d59e87ebb2e1c6c7e1fcb71e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 24 May 2023 21:37:05 +0000 Subject: [PATCH 012/265] build(deps): bump criterion from 0.4.0 to 0.5.0 (#6757) Bumps [criterion](https://github.com/bheisler/criterion.rs) from 0.4.0 to 0.5.0. - [Changelog](https://github.com/bheisler/criterion.rs/blob/master/CHANGELOG.md) - [Commits](https://github.com/bheisler/criterion.rs/compare/0.4.0...0.5.0) --- updated-dependencies: - dependency-name: criterion dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 53 +++++++++++++++++++++--------------------- zebra-chain/Cargo.toml | 2 +- 2 files changed, 27 insertions(+), 28 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 87b93e96e16..20295ee0715 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -154,6 +154,12 @@ dependencies = [ "winapi", ] +[[package]] +name = "anstyle" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41ed9a86bf92ae6580e0a31281f65a1b1d867c0cc68d5346e2ae128dddfa6a7d" + [[package]] name = "anyhow" version = "1.0.71" @@ -704,31 +710,36 @@ dependencies = [ "atty", "bitflags 1.3.2", "strsim 0.8.0", - "textwrap 0.11.0", + "textwrap", "unicode-width", "vec_map", ] [[package]] name = "clap" -version = "3.2.25" +version = "4.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93aae7a4192245f70fe75dd9157fc7b4a5bf53e88d30bd4396f7d8f9284d5acc" +dependencies = [ + "clap_builder", +] + +[[package]] +name = "clap_builder" +version = "4.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ea181bf566f71cb9a5d17a59e1871af638180a18fb0035c92ae62b705207123" +checksum = "4f423e341edefb78c9caba2d9c7f7687d0e72e89df3ce3394554754393ac3990" dependencies = [ + "anstyle", "bitflags 1.3.2", "clap_lex", - "indexmap", - "textwrap 0.16.0", ] [[package]] name = "clap_lex" -version = "0.2.4" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2850f2f5a82cbf437dd5af4d49848fbdfc27c157c3d010345776f952765261c5" -dependencies = [ - "os_str_bytes", -] +checksum = "2da6da31387c7e4ef160ffab6d5e7f00c42626fe39aea70a7b0f1773f7dd6c1b" [[package]] name = "codespan-reporting" @@ -870,19 +881,19 @@ dependencies = [ [[package]] name = "criterion" -version = "0.4.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7c76e09c1aae2bc52b3d2f29e13c6572553b30c4aa1b8a49fd70de6412654cb" +checksum = "9f9c16c823fba76d9643cc387e9677d9771abe0827561381815215c47f808da9" dependencies = [ "anes", - "atty", "cast", "ciborium", - "clap 3.2.25", + "clap 4.3.0", "criterion-plot", + "is-terminal", "itertools", - "lazy_static", "num-traits", + "once_cell", "oorandom", "plotters", "rayon", @@ -2851,12 +2862,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "os_str_bytes" -version = "6.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ceedf44fb00f2d1984b0bc98102627ce622e083e49a5bacdb3e514fa4238e267" - [[package]] name = "overload" version = "0.1.1" @@ -4472,12 +4477,6 @@ dependencies = [ "unicode-width", ] -[[package]] -name = "textwrap" -version = "0.16.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "222a222a5bfe1bba4a77b45ec488a741b3cb8872e5e499451fd7d0129c9c7c3d" - [[package]] name = "thiserror" version = "1.0.40" diff --git a/zebra-chain/Cargo.toml b/zebra-chain/Cargo.toml index 5d448dda815..760cfc7e908 100644 --- a/zebra-chain/Cargo.toml +++ b/zebra-chain/Cargo.toml @@ -113,7 +113,7 @@ zebra-test = { path = "../zebra-test/", optional = true } [dev-dependencies] # Benchmarks -criterion = { version = "0.4.0", features = ["html_reports"] } +criterion = { version = "0.5.0", features = ["html_reports"] } # Error Handling & Formatting color-eyre = "0.6.2" From 8af4e572c985cd2e37844b77e61ee825f7a4f650 Mon Sep 17 00:00:00 2001 From: teor Date: Thu, 25 May 2023 09:53:53 +1000 Subject: [PATCH 013/265] fix(network): Ignore out of order Address Book changes, unless they are concurrent (#6717) * Ignore out of order Address Book changes, and restructure the function * Handle concurrent changes using the connection state machine order * Handle out of order changes correctly * Pass times through the call stack so they are consistent in tests * Add time arguments to tests * Fix tests that were broken by the address order checks * fastmod wall_ local_ zebra* * cargo fmt --all * Fix a bug in the concurrent change check * Test all the new apply and skip checks for address changes * Document more edge cases and increase the concurrency time slightly * Simplify enum ordering matches * Fix comment typos Co-authored-by: Arya --------- Co-authored-by: Arya --- zebra-network/src/address_book.rs | 13 +- zebra-network/src/constants.rs | 18 + zebra-network/src/meta_addr.rs | 354 +++++++++++++----- zebra-network/src/meta_addr/arbitrary.rs | 61 +-- zebra-network/src/meta_addr/tests/prop.rs | 64 ++-- zebra-network/src/meta_addr/tests/vectors.rs | 248 +++++++++++- .../src/peer_set/initialize/tests/vectors.rs | 7 +- .../tests/snapshot/get_block_template_rpcs.rs | 8 +- zebra-rpc/src/methods/tests/vectors.rs | 6 +- 9 files changed, 613 insertions(+), 166 deletions(-) diff --git a/zebra-network/src/address_book.rs b/zebra-network/src/address_book.rs index 0220928071a..fc0bda70dcd 100644 --- a/zebra-network/src/address_book.rs +++ b/zebra-network/src/address_book.rs @@ -14,7 +14,7 @@ use ordered_map::OrderedMap; use tokio::sync::watch; use tracing::Span; -use zebra_chain::parameters::Network; +use zebra_chain::{parameters::Network, serialization::DateTime32}; use crate::{ constants, @@ -228,10 +228,11 @@ impl AddressBook { /// Get the local listener address. /// /// This address contains minimal state, but it is not sanitized. - pub fn local_listener_meta_addr(&self) -> MetaAddr { + pub fn local_listener_meta_addr(&self, now: chrono::DateTime) -> MetaAddr { + let now: DateTime32 = now.try_into().expect("will succeed until 2038"); + MetaAddr::new_local_listener_change(self.local_listener) - .into_new_meta_addr() - .expect("unexpected invalid new local listener addr") + .local_listener_into_new_meta_addr(now) } /// Get the local listener [`SocketAddr`]. @@ -249,7 +250,7 @@ impl AddressBook { // Unconditionally add our local listener address to the advertised peers, // to replace any self-connection failures. The address book and change // constructors make sure that the SocketAddr is canonical. - let local_listener = self.local_listener_meta_addr(); + let local_listener = self.local_listener_meta_addr(now); peers.insert(local_listener.addr, local_listener); // Then sanitize and shuffle @@ -313,7 +314,7 @@ impl AddressBook { let instant_now = Instant::now(); let chrono_now = Utc::now(); - let updated = change.apply_to_meta_addr(previous); + let updated = change.apply_to_meta_addr(previous, instant_now, chrono_now); trace!( ?change, diff --git a/zebra-network/src/constants.rs b/zebra-network/src/constants.rs index 67e73874fd9..c6dfc0a3de0 100644 --- a/zebra-network/src/constants.rs +++ b/zebra-network/src/constants.rs @@ -90,6 +90,24 @@ pub const REQUEST_TIMEOUT: Duration = Duration::from_secs(20); /// nodes, and on testnet. pub const HANDSHAKE_TIMEOUT: Duration = Duration::from_secs(3); +/// The maximum time difference for two address book changes to be considered concurrent. +/// +/// This prevents simultaneous or nearby important changes or connection progress +/// being overridden by less important changes. +/// +/// This timeout should be less than: +/// - the [peer reconnection delay](MIN_PEER_RECONNECTION_DELAY), and +/// - the [peer keepalive/heartbeat interval](HEARTBEAT_INTERVAL). +/// +/// But more than: +/// - the amount of time between connection events and address book updates, +/// even under heavy load (in tests, we have observed delays up to 500ms), +/// - the delay between an outbound connection failing, +/// and the [CandidateSet](crate::peer_set::CandidateSet) registering the failure, and +/// - the delay between the application closing a connection, +/// and any remaining positive changes from the peer. +pub const CONCURRENT_ADDRESS_CHANGE_PERIOD: Duration = Duration::from_secs(5); + /// We expect to receive a message from a live peer at least once in this time duration. /// /// This is the sum of: diff --git a/zebra-network/src/meta_addr.rs b/zebra-network/src/meta_addr.rs index 3d9657fcb64..6fbd4e7ebdf 100644 --- a/zebra-network/src/meta_addr.rs +++ b/zebra-network/src/meta_addr.rs @@ -1,7 +1,7 @@ //! An address-with-metadata type used in Bitcoin networking. use std::{ - cmp::{Ord, Ordering}, + cmp::{max, Ord, Ordering}, time::Instant, }; @@ -79,6 +79,38 @@ impl PeerAddrState { AttemptPending | Responded | Failed => false, } } + + /// Returns the typical connection state machine order of `self` and `other`. + /// Partially ordered states are sorted in connection attempt order. + /// + /// See [`MetaAddrChange::apply_to_meta_addr()`] for more details. + fn connection_state_order(&self, other: &Self) -> Ordering { + use Ordering::*; + match (self, other) { + _ if self == other => Equal, + // Peers start in one of the "never attempted" states, + // then typically progress towards a "responded" or "failed" state. + // + // # Security + // + // Prefer gossiped addresses to alternate addresses, + // so that peers can't replace the addresses of other peers. + // (This is currently checked explicitly by the address update code, + // but we respect the same order here as a precaution.) + (NeverAttemptedAlternate, _) => Less, + (_, NeverAttemptedAlternate) => Greater, + (NeverAttemptedGossiped, _) => Less, + (_, NeverAttemptedGossiped) => Greater, + (AttemptPending, _) => Less, + (_, AttemptPending) => Greater, + (Responded, _) => Less, + (_, Responded) => Greater, + // These patterns are redundant, but Rust doesn't assume that `==` is reflexive, + // so the first is still required (but unreachable). + (Failed, _) => Less, + //(_, Failed) => Greater, + } + } } // non-test code should explicitly specify the peer address state @@ -100,11 +132,7 @@ impl Ord for PeerAddrState { fn cmp(&self, other: &Self) -> Ordering { use Ordering::*; match (self, other) { - (Responded, Responded) - | (Failed, Failed) - | (NeverAttemptedGossiped, NeverAttemptedGossiped) - | (NeverAttemptedAlternate, NeverAttemptedAlternate) - | (AttemptPending, AttemptPending) => Equal, + _ if self == other => Equal, // We reconnect to `Responded` peers that have stopped sending messages, // then `NeverAttempted` peers, then `Failed` peers (Responded, _) => Less, @@ -115,7 +143,10 @@ impl Ord for PeerAddrState { (_, NeverAttemptedAlternate) => Greater, (Failed, _) => Less, (_, Failed) => Greater, - // AttemptPending is covered by the other cases + // These patterns are redundant, but Rust doesn't assume that `==` is reflexive, + // so the first is still required (but unreachable). + (AttemptPending, _) => Less, + //(_, AttemptPending) => Greater, } } } @@ -195,6 +226,9 @@ pub struct MetaAddr { #[derive(Copy, Clone, Debug, Eq, PartialEq)] #[cfg_attr(any(test, feature = "proptest-impl"), derive(Arbitrary))] pub enum MetaAddrChange { + // TODO: + // - split the common `addr` field into an outer struct + // /// Creates a `MetaAddr` for an initial peer. NewInitial { #[cfg_attr( @@ -694,7 +728,7 @@ impl MetaAddrChange { } /// Return the untrusted last seen time for this change, if available. - pub fn untrusted_last_seen(&self) -> Option { + pub fn untrusted_last_seen(&self, now: DateTime32) -> Option { match self { NewInitial { .. } => None, NewGossiped { @@ -703,15 +737,34 @@ impl MetaAddrChange { } => Some(*untrusted_last_seen), NewAlternate { .. } => None, // We know that our local listener is available - NewLocal { .. } => Some(DateTime32::now()), + NewLocal { .. } => Some(now), UpdateAttempt { .. } => None, UpdateResponded { .. } => None, UpdateFailed { .. } => None, } } + // # Concurrency + // + // We assign a time to each change when it is applied to the address book by either the + // address book updater or candidate set tasks. This is the time that the change was received + // from the updater channel, rather than the time that the message was read from the peer + // connection. + // + // Since the connection tasks run concurrently in an unspecified order, and the address book + // updater runs in a separate thread, these times are almost always very similar. If Zebra's + // address book is under load, we should use lower rate-limits for new inbound or outbound + // connections, disconnections, peer gossip crawls, or peer `UpdateResponded` updates. + // + // TODO: + // - move the time API calls from `impl MetaAddrChange` `last_*()` methods: + // - if they impact performance, call them once in the address book updater task, + // then apply them to all the waiting changes + // - otherwise, move them to the `impl MetaAddrChange` `new_*()` methods, + // so they are called in the connection tasks + // /// Return the last attempt for this change, if available. - pub fn last_attempt(&self) -> Option { + pub fn last_attempt(&self, now: Instant) -> Option { match self { NewInitial { .. } => None, NewGossiped { .. } => None, @@ -720,14 +773,14 @@ impl MetaAddrChange { // Attempt changes are applied before we start the handshake to the // peer address. So the attempt time is a lower bound for the actual // handshake time. - UpdateAttempt { .. } => Some(Instant::now()), + UpdateAttempt { .. } => Some(now), UpdateResponded { .. } => None, UpdateFailed { .. } => None, } } /// Return the last response for this change, if available. - pub fn last_response(&self) -> Option { + pub fn last_response(&self, now: DateTime32) -> Option { match self { NewInitial { .. } => None, NewGossiped { .. } => None, @@ -739,13 +792,13 @@ impl MetaAddrChange { // - we might send outdated last seen times to our peers, and // - the peer will appear to be live for longer, delaying future // reconnection attempts. - UpdateResponded { .. } => Some(DateTime32::now()), + UpdateResponded { .. } => Some(now), UpdateFailed { .. } => None, } } /// Return the last failure for this change, if available. - pub fn last_failure(&self) -> Option { + pub fn last_failure(&self, now: Instant) -> Option { match self { NewInitial { .. } => None, NewGossiped { .. } => None, @@ -758,7 +811,7 @@ impl MetaAddrChange { // states for longer, and // - the peer will appear to be used for longer, delaying future // reconnection attempts. - UpdateFailed { .. } => Some(Instant::now()), + UpdateFailed { .. } => Some(now), } } @@ -776,93 +829,212 @@ impl MetaAddrChange { } } - /// If this change can create a new `MetaAddr`, return that address. - pub fn into_new_meta_addr(self) -> Option { - Some(MetaAddr { + /// Returns the corresponding `MetaAddr` for this change. + pub fn into_new_meta_addr(self, instant_now: Instant, local_now: DateTime32) -> MetaAddr { + MetaAddr { addr: self.addr(), services: self.untrusted_services(), - untrusted_last_seen: self.untrusted_last_seen(), - last_response: self.last_response(), - last_attempt: self.last_attempt(), - last_failure: self.last_failure(), + untrusted_last_seen: self.untrusted_last_seen(local_now), + last_response: self.last_response(local_now), + last_attempt: self.last_attempt(instant_now), + last_failure: self.last_failure(instant_now), last_connection_state: self.peer_addr_state(), - }) + } + } + + /// Returns the corresponding [`MetaAddr`] for a local listener change. + /// + /// This method exists so we don't have to provide an unused [`Instant`] to get a local + /// listener `MetaAddr`. + /// + /// # Panics + /// + /// If this change is not a [`MetaAddrChange::NewLocal`]. + pub fn local_listener_into_new_meta_addr(self, local_now: DateTime32) -> MetaAddr { + assert!(matches!(self, MetaAddrChange::NewLocal { .. })); + + MetaAddr { + addr: self.addr(), + services: self.untrusted_services(), + untrusted_last_seen: self.untrusted_last_seen(local_now), + last_response: self.last_response(local_now), + last_attempt: None, + last_failure: None, + last_connection_state: self.peer_addr_state(), + } } /// Apply this change to a previous `MetaAddr` from the address book, /// producing a new or updated `MetaAddr`. /// /// If the change isn't valid for the `previous` address, returns `None`. - pub fn apply_to_meta_addr(&self, previous: impl Into>) -> Option { - if let Some(previous) = previous.into() { - assert_eq!(previous.addr, self.addr(), "unexpected addr mismatch"); - - let previous_has_been_attempted = !previous.last_connection_state.is_never_attempted(); - let change_to_never_attempted = self - .into_new_meta_addr() - .map(|meta_addr| meta_addr.last_connection_state.is_never_attempted()) - .unwrap_or(false); - - if change_to_never_attempted { - if previous_has_been_attempted { - // Existing entry has been attempted, change is NeverAttempted - // - ignore the change - // - // # Security - // - // Ignore NeverAttempted changes once we have made an attempt, - // so malicious peers can't keep changing our peer connection order. - None - } else { - // Existing entry and change are both NeverAttempted - // - preserve original values of all fields - // - but replace None with Some - // - // # Security - // - // Preserve the original field values for NeverAttempted peers, - // so malicious peers can't keep changing our peer connection order. - Some(MetaAddr { - addr: self.addr(), - services: previous.services.or_else(|| self.untrusted_services()), - untrusted_last_seen: previous - .untrusted_last_seen - .or_else(|| self.untrusted_last_seen()), - // The peer has not been attempted, so these fields must be None - last_response: None, - last_attempt: None, - last_failure: None, - last_connection_state: self.peer_addr_state(), - }) - } - } else { - // Existing entry and change are both Attempt, Responded, Failed - // - ignore changes to earlier times - // - update the services from the change - // - // # Security - // - // Ignore changes to earlier times. This enforces the peer - // connection timeout, even if changes are applied out of order. - Some(MetaAddr { - addr: self.addr(), - // We want up-to-date services, even if they have fewer bits, - // or they are applied out of order. - services: self.untrusted_services().or(previous.services), - // Only NeverAttempted changes can modify the last seen field - untrusted_last_seen: previous.untrusted_last_seen, - // Since Some(time) is always greater than None, `max` prefers: - // - the latest time if both are Some - // - Some(time) if the other is None - last_response: self.last_response().max(previous.last_response), - last_attempt: self.last_attempt().max(previous.last_attempt), - last_failure: self.last_failure().max(previous.last_failure), - last_connection_state: self.peer_addr_state(), + #[allow(clippy::unwrap_in_result)] + pub fn apply_to_meta_addr( + &self, + previous: impl Into>, + instant_now: Instant, + chrono_now: chrono::DateTime, + ) -> Option { + let local_now: DateTime32 = chrono_now.try_into().expect("will succeed until 2038"); + + let Some(previous) = previous.into() else { + // no previous: create a new entry + return Some(self.into_new_meta_addr(instant_now, local_now)); + }; + + assert_eq!(previous.addr, self.addr(), "unexpected addr mismatch"); + + let instant_previous = max(previous.last_attempt, previous.last_failure); + let local_previous = previous.last_response; + + // Is this change potentially concurrent with the previous change? + // + // Since we're using saturating arithmetic, one of each pair of less than comparisons + // will always be true, because subtraction saturates to zero. + let change_is_concurrent = instant_previous + .map(|instant_previous| { + instant_previous.saturating_duration_since(instant_now) + < constants::CONCURRENT_ADDRESS_CHANGE_PERIOD + && instant_now.saturating_duration_since(instant_previous) + < constants::CONCURRENT_ADDRESS_CHANGE_PERIOD + }) + .unwrap_or_default() + || local_previous + .map(|local_previous| { + local_previous.saturating_duration_since(local_now).to_std() + < constants::CONCURRENT_ADDRESS_CHANGE_PERIOD + && local_now.saturating_duration_since(local_previous).to_std() + < constants::CONCURRENT_ADDRESS_CHANGE_PERIOD }) - } + .unwrap_or_default(); + let change_is_out_of_order = instant_previous + .map(|instant_previous| instant_previous > instant_now) + .unwrap_or_default() + || local_previous + .map(|local_previous| local_previous > local_now) + .unwrap_or_default(); + + // Is this change typically from a connection state that has more progress? + let connection_has_more_progress = self + .peer_addr_state() + .connection_state_order(&previous.last_connection_state) + == Ordering::Greater; + + let previous_has_been_attempted = !previous.last_connection_state.is_never_attempted(); + let change_to_never_attempted = self.peer_addr_state().is_never_attempted(); + + // Invalid changes + + if change_to_never_attempted && previous_has_been_attempted { + // Existing entry has been attempted, change is NeverAttempted + // - ignore the change + // + // # Security + // + // Ignore NeverAttempted changes once we have made an attempt, + // so malicious peers can't keep changing our peer connection order. + return None; + } + + if change_is_out_of_order && !change_is_concurrent { + // Change is significantly out of order: ignore it. + // + // # Security + // + // Ignore changes that arrive out of order, if they are far enough apart. + // This enforces the peer connection retry interval. + return None; + } + + if change_is_concurrent && !connection_has_more_progress { + // Change is close together in time, and it would revert the connection to an earlier + // state. + // + // # Security + // + // If the changes might have been concurrent, ignore connection states with less + // progress. + // + // ## Sources of Concurrency + // + // If two changes happen close together, the async scheduler can run their change + // send and apply code in any order. This includes the code that records the time of + // the change. So even if a failure happens after a response message, the failure time + // can be recorded before the response time code is run. + // + // Some machines and OSes have limited time resolution, so we can't guarantee that + // two messages on the same connection will always have different times. There are + // also known bugs impacting monotonic times which make them go backwards or stay + // equal. For wall clock times, clock skew is an expected event, particularly with + // network time server updates. + // + // Also, the application can fail a connection independently and simultaneously + // (or slightly before) a positive update from that peer connection. We want the + // application change to take priority in the address book, because the connection + // state machine also prioritises failures over any other peer messages. + // + // ## Resolution + // + // In these cases, we want to apply the failure, then ignore any nearby changes that + // reset the address book entry to a more appealing state. This prevents peers from + // sending updates right before failing a connection, in order to make themselves more + // likely to get a reconnection. + // + // The connection state machine order is used so that state transitions which are + // typically close together are preserved. These transitions are: + // - NeverAttempted*->AttemptPending->(Responded|Failed) + // - Responded->Failed + // + // State transitions like (Responded|Failed)->AttemptPending only happen after the + // reconnection timeout, so they will never be considered concurrent. + return None; + } + + // Valid changes + + if change_to_never_attempted && !previous_has_been_attempted { + // Existing entry and change are both NeverAttempted + // - preserve original values of all fields + // - but replace None with Some + // + // # Security + // + // Preserve the original field values for NeverAttempted peers, + // so malicious peers can't keep changing our peer connection order. + Some(MetaAddr { + addr: self.addr(), + services: previous.services.or_else(|| self.untrusted_services()), + untrusted_last_seen: previous + .untrusted_last_seen + .or_else(|| self.untrusted_last_seen(local_now)), + // The peer has not been attempted, so these fields must be None + last_response: None, + last_attempt: None, + last_failure: None, + last_connection_state: self.peer_addr_state(), + }) } else { - // no previous: create a new entry - self.into_new_meta_addr() + // Existing entry and change are both Attempt, Responded, Failed, + // and the change is later, either in time or in connection progress + // (this is checked above and returns None early): + // - update the fields from the change + Some(MetaAddr { + addr: self.addr(), + // Always update optional fields, unless the update is None. + // + // We want up-to-date services, even if they have fewer bits + services: self.untrusted_services().or(previous.services), + // Only NeverAttempted changes can modify the last seen field + untrusted_last_seen: previous.untrusted_last_seen, + // This is a wall clock time, but we already checked that responses are in order. + // Even if the wall clock time has jumped, we want to use the latest time. + last_response: self.last_response(local_now).or(previous.last_response), + // These are monotonic times, we already checked the responses are in order. + last_attempt: self.last_attempt(instant_now).or(previous.last_attempt), + last_failure: self.last_failure(instant_now).or(previous.last_failure), + // Replace the state with the updated state. + last_connection_state: self.peer_addr_state(), + }) } } } diff --git a/zebra-network/src/meta_addr/arbitrary.rs b/zebra-network/src/meta_addr/arbitrary.rs index 955607e775d..1b96440e968 100644 --- a/zebra-network/src/meta_addr/arbitrary.rs +++ b/zebra-network/src/meta_addr/arbitrary.rs @@ -1,5 +1,7 @@ //! Randomised test data generation for MetaAddr. +use std::time::Instant; + use proptest::{arbitrary::any, collection::vec, prelude::*}; use zebra_chain::{parameters::Network::*, serialization::DateTime32}; @@ -49,12 +51,20 @@ impl MetaAddr { /// /// [1]: super::PeerAddrState::NeverAttemptedAlternate pub fn alternate_strategy() -> BoxedStrategy { - (canonical_peer_addr_strategy(), any::()) - .prop_map(|(socket_addr, untrusted_services)| { - MetaAddr::new_alternate(socket_addr, &untrusted_services) - .into_new_meta_addr() - .expect("unexpected invalid alternate change") - }) + ( + canonical_peer_addr_strategy(), + any::(), + any::(), + any::(), + ) + .prop_map( + |(socket_addr, untrusted_services, instant_now, local_now)| { + // instant_now is not actually used for this variant, + // so we could just provide a default value + MetaAddr::new_alternate(socket_addr, &untrusted_services) + .into_new_meta_addr(instant_now, local_now) + }, + ) .boxed() } } @@ -98,22 +108,29 @@ impl MetaAddrChange { /// /// [1]: super::NewAlternate pub fn ready_outbound_strategy() -> BoxedStrategy { - canonical_peer_addr_strategy() - .prop_filter_map("failed MetaAddr::is_valid_for_outbound", |addr| { - // Alternate nodes use the current time, so they're always ready - // - // TODO: create a "Zebra supported services" constant - let change = MetaAddr::new_alternate(addr, &PeerServices::NODE_NETWORK); - if change - .into_new_meta_addr() - .expect("unexpected invalid alternate change") - .last_known_info_is_valid_for_outbound(Mainnet) - { - Some(change) - } else { - None - } - }) + ( + canonical_peer_addr_strategy(), + any::(), + any::(), + ) + .prop_filter_map( + "failed MetaAddr::is_valid_for_outbound", + |(addr, instant_now, local_now)| { + // Alternate nodes use the current time, so they're always ready + // + // TODO: create a "Zebra supported services" constant + + let change = MetaAddr::new_alternate(addr, &PeerServices::NODE_NETWORK); + if change + .into_new_meta_addr(instant_now, local_now) + .last_known_info_is_valid_for_outbound(Mainnet) + { + Some(change) + } else { + None + } + }, + ) .boxed() } } diff --git a/zebra-network/src/meta_addr/tests/prop.rs b/zebra-network/src/meta_addr/tests/prop.rs index 0b5f968aebd..19f66718ccb 100644 --- a/zebra-network/src/meta_addr/tests/prop.rs +++ b/zebra-network/src/meta_addr/tests/prop.rs @@ -4,7 +4,6 @@ use std::{collections::HashMap, env, net::SocketAddr, str::FromStr, sync::Arc, t use chrono::Utc; use proptest::{collection::vec, prelude::*}; -use tokio::time::Instant; use tower::service_fn; use tracing::Span; @@ -64,8 +63,12 @@ proptest! { ) { let _init_guard = zebra_test::init(); + let instant_now = std::time::Instant::now(); + let chrono_now = Utc::now(); + let local_now: DateTime32 = chrono_now.try_into().expect("will succeed until 2038"); + for change in changes { - if let Some(changed_addr) = change.apply_to_meta_addr(addr) { + if let Some(changed_addr) = change.apply_to_meta_addr(addr, instant_now, chrono_now) { // untrusted last seen times: // check that we replace None with Some, but leave Some unchanged if addr.untrusted_last_seen.is_some() { @@ -73,7 +76,7 @@ proptest! { } else { prop_assert_eq!( changed_addr.untrusted_last_seen, - change.untrusted_last_seen() + change.untrusted_last_seen(local_now) ); } @@ -112,18 +115,22 @@ proptest! { for change in changes { while addr.is_ready_for_connection_attempt(instant_now, chrono_now, Mainnet) { - attempt_count += 1; - // Assume that this test doesn't last longer than MIN_PEER_RECONNECTION_DELAY - prop_assert!(attempt_count <= 1); - // Simulate an attempt - addr = MetaAddr::new_reconnect(addr.addr) - .apply_to_meta_addr(addr) - .expect("unexpected invalid attempt"); + addr = if let Some(addr) = MetaAddr::new_reconnect(addr.addr) + .apply_to_meta_addr(addr, instant_now, chrono_now) { + attempt_count += 1; + // Assume that this test doesn't last longer than MIN_PEER_RECONNECTION_DELAY + prop_assert!(attempt_count <= 1); + addr + } else { + // Stop updating when an attempt comes too soon after a failure. + // In production these are prevented by the dialer code. + break; + } } // If `change` is invalid for the current MetaAddr state, skip it. - if let Some(changed_addr) = change.apply_to_meta_addr(addr) { + if let Some(changed_addr) = change.apply_to_meta_addr(addr, instant_now, chrono_now) { prop_assert_eq!(changed_addr.addr, addr.addr); addr = changed_addr; } @@ -155,7 +162,7 @@ proptest! { ); let sanitized_addrs = address_book.sanitized(chrono_now); - let expected_local_listener = address_book.local_listener_meta_addr(); + let expected_local_listener = address_book.local_listener_meta_addr(chrono_now); let canonical_local_listener = canonical_peer_addr(local_listener); let book_sanitized_local_listener = sanitized_addrs .iter() @@ -186,9 +193,12 @@ proptest! { let local_listener = "0.0.0.0:0".parse().expect("unexpected invalid SocketAddr"); + let instant_now = std::time::Instant::now(); + let chrono_now = Utc::now(); + for change in changes { // Check direct application - let new_addr = change.apply_to_meta_addr(None); + let new_addr = change.apply_to_meta_addr(None, instant_now, chrono_now); prop_assert!( new_addr.is_some(), @@ -328,7 +338,7 @@ proptest! { tokio::time::pause(); // The earliest time we can have a valid next attempt for this peer - let earliest_next_attempt = Instant::now() + MIN_PEER_RECONNECTION_DELAY; + let earliest_next_attempt = tokio::time::Instant::now() + MIN_PEER_RECONNECTION_DELAY; // The number of attempts for this peer in the last MIN_PEER_RECONNECTION_DELAY let mut attempt_count: usize = 0; @@ -349,7 +359,7 @@ proptest! { original addr was in address book: {}\n", candidate_addr, i, - Instant::now(), + tokio::time::Instant::now(), earliest_next_attempt, attempt_count, LIVE_PEER_INTERVALS, @@ -365,7 +375,7 @@ proptest! { address_book.clone().lock().unwrap().update(change); tokio::time::advance(peer_change_interval).await; - if Instant::now() >= earliest_next_attempt { + if tokio::time::Instant::now() >= earliest_next_attempt { attempt_count = 0; } } @@ -423,20 +433,24 @@ proptest! { let change = changes.get(change_index); while addr.is_ready_for_connection_attempt(instant_now, chrono_now, Mainnet) { - *attempt_counts.entry(addr.addr).or_default() += 1; - prop_assert!( - *attempt_counts.get(&addr.addr).unwrap() <= LIVE_PEER_INTERVALS + 1 - ); - // Simulate an attempt - *addr = MetaAddr::new_reconnect(addr.addr) - .apply_to_meta_addr(*addr) - .expect("unexpected invalid attempt"); + *addr = if let Some(addr) = MetaAddr::new_reconnect(addr.addr) + .apply_to_meta_addr(*addr, instant_now, chrono_now) { + *attempt_counts.entry(addr.addr).or_default() += 1; + prop_assert!( + *attempt_counts.get(&addr.addr).unwrap() <= LIVE_PEER_INTERVALS + 1 + ); + addr + } else { + // Stop updating when an attempt comes too soon after a failure. + // In production these are prevented by the dialer code. + break; + } } // If `change` is invalid for the current MetaAddr state, skip it. // If we've run out of changes for this addr, do nothing. - if let Some(changed_addr) = change.and_then(|change| change.apply_to_meta_addr(*addr)) + if let Some(changed_addr) = change.and_then(|change| change.apply_to_meta_addr(*addr, instant_now, chrono_now)) { prop_assert_eq!(changed_addr.addr, addr.addr); *addr = changed_addr; diff --git a/zebra-network/src/meta_addr/tests/vectors.rs b/zebra-network/src/meta_addr/tests/vectors.rs index 187f70778a2..5b341901b18 100644 --- a/zebra-network/src/meta_addr/tests/vectors.rs +++ b/zebra-network/src/meta_addr/tests/vectors.rs @@ -1,5 +1,7 @@ //! Fixed test cases for MetaAddr and MetaAddrChange. +use std::time::Instant; + use chrono::Utc; use zebra_chain::{ @@ -7,7 +9,11 @@ use zebra_chain::{ serialization::{DateTime32, Duration32}, }; -use crate::{constants::MAX_PEER_ACTIVE_FOR_GOSSIP, protocol::types::PeerServices, PeerSocketAddr}; +use crate::{ + constants::{CONCURRENT_ADDRESS_CHANGE_PERIOD, MAX_PEER_ACTIVE_FOR_GOSSIP}, + protocol::types::PeerServices, + PeerSocketAddr, +}; use super::{super::MetaAddr, check}; @@ -57,12 +63,13 @@ fn sanitize_extremes() { fn new_local_listener_is_gossipable() { let _init_guard = zebra_test::init(); + let instant_now = Instant::now(); let chrono_now = Utc::now(); + let local_now: DateTime32 = chrono_now.try_into().expect("will succeed until 2038"); let address = PeerSocketAddr::from(([192, 168, 180, 9], 10_000)); - let peer = MetaAddr::new_local_listener_change(address) - .into_new_meta_addr() - .expect("MetaAddrChange can't create a new MetaAddr"); + let peer = + MetaAddr::new_local_listener_change(address).into_new_meta_addr(instant_now, local_now); assert!(peer.is_active_for_gossip(chrono_now)); } @@ -75,12 +82,13 @@ fn new_local_listener_is_gossipable() { fn new_alternate_peer_address_is_not_gossipable() { let _init_guard = zebra_test::init(); + let instant_now = Instant::now(); let chrono_now = Utc::now(); + let local_now: DateTime32 = chrono_now.try_into().expect("will succeed until 2038"); let address = PeerSocketAddr::from(([192, 168, 180, 9], 10_000)); let peer = MetaAddr::new_alternate(address, &PeerServices::NODE_NETWORK) - .into_new_meta_addr() - .expect("MetaAddrChange can't create a new MetaAddr"); + .into_new_meta_addr(instant_now, local_now); assert!(!peer.is_active_for_gossip(chrono_now)); } @@ -153,16 +161,17 @@ fn gossiped_peer_reportedly_seen_long_ago_is_not_gossipable() { fn recently_responded_peer_is_gossipable() { let _init_guard = zebra_test::init(); + let instant_now = Instant::now(); let chrono_now = Utc::now(); + let local_now: DateTime32 = chrono_now.try_into().expect("will succeed until 2038"); let address = PeerSocketAddr::from(([192, 168, 180, 9], 10_000)); let peer_seed = MetaAddr::new_alternate(address, &PeerServices::NODE_NETWORK) - .into_new_meta_addr() - .expect("MetaAddrChange can't create a new MetaAddr"); + .into_new_meta_addr(instant_now, local_now); // Create a peer that has responded let peer = MetaAddr::new_responded(address, &PeerServices::NODE_NETWORK) - .apply_to_meta_addr(peer_seed) + .apply_to_meta_addr(peer_seed, instant_now, chrono_now) .expect("Failed to create MetaAddr for responded peer"); assert!(peer.is_active_for_gossip(chrono_now)); @@ -173,16 +182,17 @@ fn recently_responded_peer_is_gossipable() { fn not_so_recently_responded_peer_is_still_gossipable() { let _init_guard = zebra_test::init(); + let instant_now = Instant::now(); let chrono_now = Utc::now(); + let local_now: DateTime32 = chrono_now.try_into().expect("will succeed until 2038"); let address = PeerSocketAddr::from(([192, 168, 180, 9], 10_000)); let peer_seed = MetaAddr::new_alternate(address, &PeerServices::NODE_NETWORK) - .into_new_meta_addr() - .expect("MetaAddrChange can't create a new MetaAddr"); + .into_new_meta_addr(instant_now, local_now); // Create a peer that has responded let mut peer = MetaAddr::new_responded(address, &PeerServices::NODE_NETWORK) - .apply_to_meta_addr(peer_seed) + .apply_to_meta_addr(peer_seed, instant_now, chrono_now) .expect("Failed to create MetaAddr for responded peer"); // Tweak the peer's last response time to be within the limits of the reachable duration @@ -203,16 +213,17 @@ fn not_so_recently_responded_peer_is_still_gossipable() { fn responded_long_ago_peer_is_not_gossipable() { let _init_guard = zebra_test::init(); + let instant_now = Instant::now(); let chrono_now = Utc::now(); + let local_now: DateTime32 = chrono_now.try_into().expect("will succeed until 2038"); let address = PeerSocketAddr::from(([192, 168, 180, 9], 10_000)); let peer_seed = MetaAddr::new_alternate(address, &PeerServices::NODE_NETWORK) - .into_new_meta_addr() - .expect("MetaAddrChange can't create a new MetaAddr"); + .into_new_meta_addr(instant_now, local_now); // Create a peer that has responded let mut peer = MetaAddr::new_responded(address, &PeerServices::NODE_NETWORK) - .apply_to_meta_addr(peer_seed) + .apply_to_meta_addr(peer_seed, instant_now, chrono_now) .expect("Failed to create MetaAddr for responded peer"); // Tweak the peer's last response time to be outside the limits of the reachable duration @@ -227,3 +238,210 @@ fn responded_long_ago_peer_is_not_gossipable() { assert!(!peer.is_active_for_gossip(chrono_now)); } + +/// Test that a change that is delayed for a long time is not applied to the address state. +#[test] +fn long_delayed_change_is_not_applied() { + let _init_guard = zebra_test::init(); + + let instant_now = Instant::now(); + let chrono_now = Utc::now(); + let local_now: DateTime32 = chrono_now.try_into().expect("will succeed until 2038"); + + let address = PeerSocketAddr::from(([192, 168, 180, 9], 10_000)); + let peer_seed = MetaAddr::new_alternate(address, &PeerServices::NODE_NETWORK) + .into_new_meta_addr(instant_now, local_now); + + // Create a peer that has responded + let peer = MetaAddr::new_responded(address, &PeerServices::NODE_NETWORK) + .apply_to_meta_addr(peer_seed, instant_now, chrono_now) + .expect("Failed to create MetaAddr for responded peer"); + + // Create an earlier change to Failed that has been delayed a long time. + // Failed typically comes after Responded, so it will pass the connection progress check. + // + // This is very unlikely in the May 2023 production code, + // but it can happen due to getting the time, then waiting for the address book mutex. + + // Create some change times that are much earlier + let instant_early = instant_now - (CONCURRENT_ADDRESS_CHANGE_PERIOD * 3); + let chrono_early = chrono_now + - chrono::Duration::from_std(CONCURRENT_ADDRESS_CHANGE_PERIOD * 3) + .expect("constant is valid"); + + let change = MetaAddr::new_errored(address, PeerServices::NODE_NETWORK); + let outcome = change.apply_to_meta_addr(peer, instant_early, chrono_early); + + assert_eq!( + outcome, None, + "\n\ + unexpected application of a much earlier change to a peer:\n\ + change: {change:?}\n\ + times: {instant_early:?} {chrono_early}\n\ + peer: {peer:?}" + ); +} + +/// Test that a change that happens a long time after the previous change +/// is applied to the address state, even if it is a revert. +#[test] +fn later_revert_change_is_applied() { + let _init_guard = zebra_test::init(); + + let instant_now = Instant::now(); + let chrono_now = Utc::now(); + let local_now: DateTime32 = chrono_now.try_into().expect("will succeed until 2038"); + + let address = PeerSocketAddr::from(([192, 168, 180, 9], 10_000)); + let peer_seed = MetaAddr::new_alternate(address, &PeerServices::NODE_NETWORK) + .into_new_meta_addr(instant_now, local_now); + + // Create a peer that has responded + let peer = MetaAddr::new_responded(address, &PeerServices::NODE_NETWORK) + .apply_to_meta_addr(peer_seed, instant_now, chrono_now) + .expect("Failed to create MetaAddr for responded peer"); + + // Create an earlier change to AttemptPending that happens a long time later. + // AttemptPending typically comes before Responded, so it will fail the connection progress + // check, but that failure should be ignored because it is not concurrent. + // + // This is a typical reconnect in production. + + // Create some change times that are much later + let instant_late = instant_now + (CONCURRENT_ADDRESS_CHANGE_PERIOD * 3); + let chrono_late = chrono_now + + chrono::Duration::from_std(CONCURRENT_ADDRESS_CHANGE_PERIOD * 3) + .expect("constant is valid"); + + let change = MetaAddr::new_reconnect(address); + let outcome = change.apply_to_meta_addr(peer, instant_late, chrono_late); + + assert!( + outcome.is_some(), + "\n\ + unexpected skipped much later change to a peer:\n\ + change: {change:?}\n\ + times: {instant_late:?} {chrono_late}\n\ + peer: {peer:?}" + ); +} + +/// Test that a concurrent change which reverses the connection state is not applied. +#[test] +fn concurrent_state_revert_change_is_not_applied() { + let _init_guard = zebra_test::init(); + + let instant_now = Instant::now(); + let chrono_now = Utc::now(); + let local_now: DateTime32 = chrono_now.try_into().expect("will succeed until 2038"); + + let address = PeerSocketAddr::from(([192, 168, 180, 9], 10_000)); + let peer_seed = MetaAddr::new_alternate(address, &PeerServices::NODE_NETWORK) + .into_new_meta_addr(instant_now, local_now); + + // Create a peer that has responded + let peer = MetaAddr::new_responded(address, &PeerServices::NODE_NETWORK) + .apply_to_meta_addr(peer_seed, instant_now, chrono_now) + .expect("Failed to create MetaAddr for responded peer"); + + // Create a concurrent change to AttemptPending. + // AttemptPending typically comes before Responded, so it will fail the progress check. + // + // This is likely to happen in production, it just requires a short delay in the earlier change. + + // Create some change times that are earlier but concurrent + let instant_early = instant_now - (CONCURRENT_ADDRESS_CHANGE_PERIOD / 2); + let chrono_early = chrono_now + - chrono::Duration::from_std(CONCURRENT_ADDRESS_CHANGE_PERIOD / 2) + .expect("constant is valid"); + + let change = MetaAddr::new_reconnect(address); + let outcome = change.apply_to_meta_addr(peer, instant_early, chrono_early); + + assert_eq!( + outcome, None, + "\n\ + unexpected application of an early concurrent change to a peer:\n\ + change: {change:?}\n\ + times: {instant_early:?} {chrono_early}\n\ + peer: {peer:?}" + ); + + // Create some change times that are later but concurrent + let instant_late = instant_now + (CONCURRENT_ADDRESS_CHANGE_PERIOD / 2); + let chrono_late = chrono_now + + chrono::Duration::from_std(CONCURRENT_ADDRESS_CHANGE_PERIOD / 2) + .expect("constant is valid"); + + let change = MetaAddr::new_reconnect(address); + let outcome = change.apply_to_meta_addr(peer, instant_late, chrono_late); + + assert_eq!( + outcome, None, + "\n\ + unexpected application of a late concurrent change to a peer:\n\ + change: {change:?}\n\ + times: {instant_late:?} {chrono_late}\n\ + peer: {peer:?}" + ); +} + +/// Test that a concurrent change which progresses the connection state is applied. +#[test] +fn concurrent_state_progress_change_is_applied() { + let _init_guard = zebra_test::init(); + + let instant_now = Instant::now(); + let chrono_now = Utc::now(); + let local_now: DateTime32 = chrono_now.try_into().expect("will succeed until 2038"); + + let address = PeerSocketAddr::from(([192, 168, 180, 9], 10_000)); + let peer_seed = MetaAddr::new_alternate(address, &PeerServices::NODE_NETWORK) + .into_new_meta_addr(instant_now, local_now); + + // Create a peer that has responded + let peer = MetaAddr::new_responded(address, &PeerServices::NODE_NETWORK) + .apply_to_meta_addr(peer_seed, instant_now, chrono_now) + .expect("Failed to create MetaAddr for responded peer"); + + // Create a concurrent change to Failed. + // Failed typically comes after Responded, so it will pass the progress check. + // + // This is a typical update in production. + + // Create some change times that are earlier but concurrent + let instant_early = instant_now - (CONCURRENT_ADDRESS_CHANGE_PERIOD / 2); + let chrono_early = chrono_now + - chrono::Duration::from_std(CONCURRENT_ADDRESS_CHANGE_PERIOD / 2) + .expect("constant is valid"); + + let change = MetaAddr::new_errored(address, None); + let outcome = change.apply_to_meta_addr(peer, instant_early, chrono_early); + + assert!( + outcome.is_some(), + "\n\ + unexpected skipped early concurrent change to a peer:\n\ + change: {change:?}\n\ + times: {instant_early:?} {chrono_early}\n\ + peer: {peer:?}" + ); + + // Create some change times that are later but concurrent + let instant_late = instant_now + (CONCURRENT_ADDRESS_CHANGE_PERIOD / 2); + let chrono_late = chrono_now + + chrono::Duration::from_std(CONCURRENT_ADDRESS_CHANGE_PERIOD / 2) + .expect("constant is valid"); + + let change = MetaAddr::new_errored(address, None); + let outcome = change.apply_to_meta_addr(peer, instant_late, chrono_late); + + assert!( + outcome.is_some(), + "\n\ + unexpected skipped late concurrent change to a peer:\n\ + change: {change:?}\n\ + times: {instant_late:?} {chrono_late}\n\ + peer: {peer:?}" + ); +} diff --git a/zebra-network/src/peer_set/initialize/tests/vectors.rs b/zebra-network/src/peer_set/initialize/tests/vectors.rs index 4ef69cb549d..76110e8e9d1 100644 --- a/zebra-network/src/peer_set/initialize/tests/vectors.rs +++ b/zebra-network/src/peer_set/initialize/tests/vectors.rs @@ -1145,7 +1145,7 @@ async fn self_connections_should_fail() { .lock() .expect("unexpected panic in address book"); - let real_self_listener = unlocked_address_book.local_listener_meta_addr(); + let real_self_listener = unlocked_address_book.local_listener_meta_addr(Utc::now()); // Set a fake listener to get past the check for adding our own address unlocked_address_book.set_local_listener("192.168.0.0:1".parse().unwrap()); @@ -1384,7 +1384,10 @@ async fn local_listener_port_with(listen_addr: SocketAddr, network: Network) { "Test user agent".to_string(), ) .await; - let local_listener = address_book.lock().unwrap().local_listener_meta_addr(); + let local_listener = address_book + .lock() + .unwrap() + .local_listener_meta_addr(Utc::now()); if listen_addr.port() == 0 { assert_ne!( diff --git a/zebra-rpc/src/methods/tests/snapshot/get_block_template_rpcs.rs b/zebra-rpc/src/methods/tests/snapshot/get_block_template_rpcs.rs index 5b8155f209d..c827d5f0ccb 100644 --- a/zebra-rpc/src/methods/tests/snapshot/get_block_template_rpcs.rs +++ b/zebra-rpc/src/methods/tests/snapshot/get_block_template_rpcs.rs @@ -5,7 +5,10 @@ //! cargo insta test --review --features getblocktemplate-rpcs --delete-unreferenced-snapshots //! ``` -use std::net::{IpAddr, Ipv4Addr, SocketAddr}; +use std::{ + net::{IpAddr, Ipv4Addr, SocketAddr}, + time::Instant, +}; use hex::FromHex; use insta::Settings; @@ -133,8 +136,7 @@ pub async fn test_responses( ) .into(), ) - .into_new_meta_addr() - .unwrap()]); + .into_new_meta_addr(Instant::now(), DateTime32::now())]); // get an rpc instance with continuous blockchain state let get_block_template_rpc = GetBlockTemplateRpcImpl::new( diff --git a/zebra-rpc/src/methods/tests/vectors.rs b/zebra-rpc/src/methods/tests/vectors.rs index 02ccd9bc36b..ccc018b5e7f 100644 --- a/zebra-rpc/src/methods/tests/vectors.rs +++ b/zebra-rpc/src/methods/tests/vectors.rs @@ -951,8 +951,10 @@ async fn rpc_getpeerinfo() { ) .into(), ) - .into_new_meta_addr() - .unwrap(); + .into_new_meta_addr( + std::time::Instant::now(), + zebra_chain::serialization::DateTime32::now(), + ); let mock_address_book = MockAddressBookPeers::new(vec![mock_peer_address]); From 77d27c7c5ccfede5342984fe4614756506f28507 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 24 May 2023 23:54:13 +0000 Subject: [PATCH 014/265] build(deps): bump vergen from 8.2.0 to 8.2.1 (#6756) Bumps [vergen](https://github.com/rustyhorde/vergen) from 8.2.0 to 8.2.1. - [Release notes](https://github.com/rustyhorde/vergen/releases) - [Commits](https://github.com/rustyhorde/vergen/compare/8.2.0...8.2.1) --- updated-dependencies: - dependency-name: vergen dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- zebrad/Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 20295ee0715..00e8a09b06b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5179,9 +5179,9 @@ checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191" [[package]] name = "vergen" -version = "8.2.0" +version = "8.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e884903ddba094df9bcdeb43b41170658bb4c2001ca8c47df7368244f4210ee" +checksum = "8b3c89c2c7e50f33e4d35527e5bf9c11d6d132226dbbd1753f0fbe9f19ef88c6" dependencies = [ "anyhow", "git2", diff --git a/zebrad/Cargo.toml b/zebrad/Cargo.toml index f4d3fab357b..cef9f440e02 100644 --- a/zebrad/Cargo.toml +++ b/zebrad/Cargo.toml @@ -185,7 +185,7 @@ proptest-derive = { version = "0.3.0", optional = true } console-subscriber = { version = "0.1.8", optional = true } [build-dependencies] -vergen = { version = "8.2.0", default-features = false, features = ["cargo", "git", "git2", "rustc"] } +vergen = { version = "8.2.1", default-features = false, features = ["cargo", "git", "git2", "rustc"] } # test feature lightwalletd-grpc-tests tonic-build = { version = "0.9.2", optional = true } From f9b5eb3a1513d2c6ee6e68af85096b0ec59972e8 Mon Sep 17 00:00:00 2001 From: teor Date: Fri, 26 May 2023 04:28:25 +1000 Subject: [PATCH 015/265] fix(log): Limit RPC failure log length, add details to RPC failure logs (#6754) * Limit the RPC failure log length * Add more specific logs for some RPC tests * Make sure the block hash is always available for logging --- zebra-rpc/src/constants.rs | 3 +++ .../src/methods/get_block_template_rpcs.rs | 6 +++--- zebra-rpc/src/server/rpc_call_compatibility.rs | 18 +++++++++++++++--- .../get_block_template_rpcs/submit_block.rs | 5 ++++- 4 files changed, 25 insertions(+), 7 deletions(-) diff --git a/zebra-rpc/src/constants.rs b/zebra-rpc/src/constants.rs index 9d549767bd5..58b119ad264 100644 --- a/zebra-rpc/src/constants.rs +++ b/zebra-rpc/src/constants.rs @@ -16,3 +16,6 @@ pub const INVALID_PARAMETERS_ERROR_CODE: ErrorCode = ErrorCode::ServerError(-1); /// `lightwalletd` expects error code `-8` when a block is not found: /// pub const MISSING_BLOCK_ERROR_CODE: ErrorCode = ErrorCode::ServerError(-8); + +/// When logging parameter data, only log this much data. +pub const MAX_PARAMS_LOG_LENGTH: usize = 100; diff --git a/zebra-rpc/src/methods/get_block_template_rpcs.rs b/zebra-rpc/src/methods/get_block_template_rpcs.rs index f7ea4e3f98f..f9c5e3f01cb 100644 --- a/zebra-rpc/src/methods/get_block_template_rpcs.rs +++ b/zebra-rpc/src/methods/get_block_template_rpcs.rs @@ -737,7 +737,7 @@ where let block: Block = match block_bytes.zcash_deserialize_into() { Ok(block_bytes) => block_bytes, Err(error) => { - tracing::info!(?error, "submit block failed"); + tracing::info!(?error, "submit block failed: block bytes could not be deserialized into a structurally valid block"); return Ok(submit_block::ErrorResponse::Rejected.into()); } @@ -747,6 +747,7 @@ where .coinbase_height() .map(|height| height.0.to_string()) .unwrap_or_else(|| "invalid coinbase height".to_string()); + let block_hash = block.hash(); let chain_verifier_response = chain_verifier .ready() @@ -778,8 +779,7 @@ where .downcast::() .map(|boxed_chain_error| *boxed_chain_error); - // TODO: add block hash to error? - tracing::info!(?error, ?block_height, "submit block failed"); + tracing::info!(?error, ?block_hash, ?block_height, "submit block failed verification"); error } diff --git a/zebra-rpc/src/server/rpc_call_compatibility.rs b/zebra-rpc/src/server/rpc_call_compatibility.rs index e6f44c1eb98..c3974ac3cf8 100644 --- a/zebra-rpc/src/server/rpc_call_compatibility.rs +++ b/zebra-rpc/src/server/rpc_call_compatibility.rs @@ -12,7 +12,7 @@ use jsonrpc_core::{ BoxFuture, ErrorCode, Metadata, MethodCall, Notification, }; -use crate::constants::INVALID_PARAMETERS_ERROR_CODE; +use crate::constants::{INVALID_PARAMETERS_ERROR_CODE, MAX_PARAMS_LOG_LENGTH}; /// JSON-RPC [`Middleware`] with compatibility workarounds. /// @@ -75,10 +75,22 @@ impl FixRpcResponseMiddleware { fn call_description(call: &Call) -> String { match call { Call::MethodCall(MethodCall { method, params, .. }) => { - format!(r#"method = {method:?}, params = {params:?}"#) + let mut params = format!("{params:?}"); + if params.len() >= MAX_PARAMS_LOG_LENGTH { + params.truncate(MAX_PARAMS_LOG_LENGTH); + params.push_str("..."); + } + + format!(r#"method = {method:?}, params = {params}"#) } Call::Notification(Notification { method, params, .. }) => { - format!(r#"notification = {method:?}, params = {params:?}"#) + let mut params = format!("{params:?}"); + if params.len() >= MAX_PARAMS_LOG_LENGTH { + params.truncate(MAX_PARAMS_LOG_LENGTH); + params.push_str("..."); + } + + format!(r#"notification = {method:?}, params = {params}"#) } Call::Invalid { .. } => "invalid request".to_owned(), } diff --git a/zebrad/tests/common/get_block_template_rpcs/submit_block.rs b/zebrad/tests/common/get_block_template_rpcs/submit_block.rs index 571ffa14f04..de034460c8d 100644 --- a/zebrad/tests/common/get_block_template_rpcs/submit_block.rs +++ b/zebrad/tests/common/get_block_template_rpcs/submit_block.rs @@ -75,7 +75,10 @@ pub(crate) async fn run() -> Result<()> { let res_text = res.text().await?; // Test rpc endpoint response - assert!(res_text.contains(r#""result":null"#)); + assert!( + res_text.contains(r#""result":null"#), + "unexpected response from submitblock RPC, should be null, was: {res_text}" + ); } zebrad.kill(false)?; From dceed5a893cc56fe4a45c364689f3ad503c5b417 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 25 May 2023 20:44:16 +0000 Subject: [PATCH 016/265] build(deps): bump proptest from 1.1.0 to 1.2.0 (#6758) Bumps [proptest](https://github.com/proptest-rs/proptest) from 1.1.0 to 1.2.0. - [Release notes](https://github.com/proptest-rs/proptest/releases) - [Changelog](https://github.com/proptest-rs/proptest/blob/master/CHANGELOG.md) - [Commits](https://github.com/proptest-rs/proptest/compare/v1.1.0...v1.2.0) --- updated-dependencies: - dependency-name: proptest dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 13 +++---------- zebra-chain/Cargo.toml | 4 ++-- zebra-consensus/Cargo.toml | 4 ++-- zebra-network/Cargo.toml | 4 ++-- zebra-rpc/Cargo.toml | 4 ++-- zebra-state/Cargo.toml | 4 ++-- zebra-test/Cargo.toml | 2 +- zebrad/Cargo.toml | 4 ++-- 8 files changed, 16 insertions(+), 23 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 00e8a09b06b..5594fc11e5c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3276,16 +3276,15 @@ dependencies = [ [[package]] name = "proptest" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29f1b898011ce9595050a68e60f90bad083ff2987a695a42357134c8381fba70" +checksum = "4e35c06b98bf36aba164cc17cb25f7e232f5c4aeea73baa14b8a9f0d92dbfa65" dependencies = [ "bit-set", "bitflags 1.3.2", "byteorder", "lazy_static", "num-traits", - "quick-error 2.0.1", "rand 0.8.5", "rand_chacha 0.3.1", "rand_xorshift", @@ -3382,12 +3381,6 @@ version = "1.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" -[[package]] -name = "quick-error" -version = "2.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a993555f31e5a609f617c12db6250dedcac1b0a85076912c436e6fc9b2c8e6a3" - [[package]] name = "quick-xml" version = "0.26.0" @@ -3860,7 +3853,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cb3dcc6e454c328bb824492db107ab7c0ae8fcffe4ad210136ef014458c1bc4f" dependencies = [ "fnv", - "quick-error 1.2.3", + "quick-error", "tempfile", "wait-timeout", ] diff --git a/zebra-chain/Cargo.toml b/zebra-chain/Cargo.toml index 760cfc7e908..ee5c3627bb6 100644 --- a/zebra-chain/Cargo.toml +++ b/zebra-chain/Cargo.toml @@ -101,7 +101,7 @@ serde_json = { version = "1.0.95", optional = true } zcash_address = { version = "0.2.1", optional = true } # Optional testing dependencies -proptest = { version = "1.1.0", optional = true } +proptest = { version = "1.2.0", optional = true } proptest-derive = { version = "0.3.0", optional = true } rand = { version = "0.8.5", optional = true, package = "rand" } @@ -124,7 +124,7 @@ spandoc = "0.2.2" tracing = "0.1.37" # Make the optional testing dependencies required -proptest = "1.1.0" +proptest = "1.2.0" proptest-derive = "0.3.0" rand = { version = "0.8.5", package = "rand" } diff --git a/zebra-consensus/Cargo.toml b/zebra-consensus/Cargo.toml index 885b3f82554..cabd507799a 100644 --- a/zebra-consensus/Cargo.toml +++ b/zebra-consensus/Cargo.toml @@ -65,7 +65,7 @@ zebra-chain = { path = "../zebra-chain" } howudoin = { version = "0.1.2", optional = true } # Test-only dependencies -proptest = { version = "1.1.0", optional = true } +proptest = { version = "1.2.0", optional = true } proptest-derive = { version = "0.3.0", optional = true } [dev-dependencies] @@ -76,7 +76,7 @@ tinyvec = { version = "1.6.0", features = ["rustc_1_55"] } hex = "0.4.3" num-integer = "0.1.45" -proptest = "1.1.0" +proptest = "1.2.0" proptest-derive = "0.3.0" spandoc = "0.2.2" diff --git a/zebra-network/Cargo.toml b/zebra-network/Cargo.toml index 75ab5a80504..7e1a2c7e1dc 100644 --- a/zebra-network/Cargo.toml +++ b/zebra-network/Cargo.toml @@ -59,13 +59,13 @@ howudoin = { version = "0.1.2", optional = true } # tor-rtcompat = { version = "0.0.2", optional = true } # proptest dependencies -proptest = { version = "1.1.0", optional = true } +proptest = { version = "1.2.0", optional = true } proptest-derive = { version = "0.3.0", optional = true } zebra-chain = { path = "../zebra-chain" } [dev-dependencies] -proptest = "1.1.0" +proptest = "1.2.0" proptest-derive = "0.3.0" static_assertions = "1.1.0" diff --git a/zebra-rpc/Cargo.toml b/zebra-rpc/Cargo.toml index 67d17b91c06..fae51928d75 100644 --- a/zebra-rpc/Cargo.toml +++ b/zebra-rpc/Cargo.toml @@ -61,7 +61,7 @@ rand = { version = "0.8.5", package = "rand", optional = true } zcash_address = { version = "0.2.1", optional = true } # Test-only feature proptest-impl -proptest = { version = "1.1.0", optional = true } +proptest = { version = "1.2.0", optional = true } zebra-chain = { path = "../zebra-chain", features = ["json-conversion"] } zebra-consensus = { path = "../zebra-consensus" } @@ -73,7 +73,7 @@ zebra-state = { path = "../zebra-state" } [dev-dependencies] insta = { version = "1.29.0", features = ["redactions", "json", "ron"] } -proptest = "1.1.0" +proptest = "1.2.0" thiserror = "1.0.40" tokio = { version = "1.28.0", features = ["full", "tracing", "test-util"] } diff --git a/zebra-state/Cargo.toml b/zebra-state/Cargo.toml index 3fb94021b75..798d6404592 100644 --- a/zebra-state/Cargo.toml +++ b/zebra-state/Cargo.toml @@ -66,7 +66,7 @@ howudoin = { version = "0.1.2", optional = true } # test feature proptest-impl zebra-test = { path = "../zebra-test/", optional = true } -proptest = { version = "1.1.0", optional = true } +proptest = { version = "1.2.0", optional = true } proptest-derive = { version = "0.3.0", optional = true } [dev-dependencies] @@ -81,7 +81,7 @@ spandoc = "0.2.2" hex = { version = "0.4.3", features = ["serde"] } insta = { version = "1.29.0", features = ["ron"] } -proptest = "1.1.0" +proptest = "1.2.0" proptest-derive = "0.3.0" halo2 = { package = "halo2_proofs", version = "0.3.0" } diff --git a/zebra-test/Cargo.toml b/zebra-test/Cargo.toml index bf74e64865d..0dc676484ad 100644 --- a/zebra-test/Cargo.toml +++ b/zebra-test/Cargo.toml @@ -12,7 +12,7 @@ hex = "0.4.3" indexmap = "1.9.3" lazy_static = "1.4.0" insta = "1.29.0" -proptest = "1.1.0" +proptest = "1.2.0" once_cell = "1.17.1" rand = { version = "0.8.5", package = "rand" } regex = "1.8.2" diff --git a/zebrad/Cargo.toml b/zebrad/Cargo.toml index cef9f440e02..ed79a696a69 100644 --- a/zebrad/Cargo.toml +++ b/zebrad/Cargo.toml @@ -178,7 +178,7 @@ howudoin = { version = "0.1.2", features = ["term-line"], optional = true } indicatif = { version = "0.17.3", optional = true } # test feature proptest-impl -proptest = { version = "1.1.0", optional = true } +proptest = { version = "1.2.0", optional = true } proptest-derive = { version = "0.3.0", optional = true } # test feature tokio-console @@ -212,7 +212,7 @@ tokio-stream = "0.1.14" prost = "0.11.9" tonic = "0.9.2" -proptest = "1.1.0" +proptest = "1.2.0" proptest-derive = "0.3.0" # enable span traces and track caller in tests From 64da8e61357a6461d849fee420de962a023a4dbc Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 25 May 2023 20:44:36 +0000 Subject: [PATCH 017/265] build(deps): bump sentry from 0.31.1 to 0.31.3 (#6764) Bumps [sentry](https://github.com/getsentry/sentry-rust) from 0.31.1 to 0.31.3. - [Release notes](https://github.com/getsentry/sentry-rust/releases) - [Changelog](https://github.com/getsentry/sentry-rust/blob/master/CHANGELOG.md) - [Commits](https://github.com/getsentry/sentry-rust/compare/0.31.1...0.31.3) --- updated-dependencies: - dependency-name: sentry dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 25 +++++++++++++------------ zebrad/Cargo.toml | 2 +- 2 files changed, 14 insertions(+), 13 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5594fc11e5c..acfb7785ed8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3980,9 +3980,9 @@ checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" [[package]] name = "sentry" -version = "0.31.1" +version = "0.31.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37dd6c0cdca6b1d1ca44cde7fff289f2592a97965afec870faa7b81b9fc87745" +checksum = "de31c6e03322af2175d3c850c5b5e11efcadc01948cd1fb7b5ad0a7c7b6c7ff2" dependencies = [ "httpdate", "reqwest", @@ -3998,9 +3998,9 @@ dependencies = [ [[package]] name = "sentry-backtrace" -version = "0.31.1" +version = "0.31.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c029fe8317cdd75cb2b52c600bab4e2ef64c552198e669ba874340447f330962" +checksum = "264e3ad27da3d1ad81b499dbcceae0a50e0e6ffc4b65b93f47d5180d46827644" dependencies = [ "backtrace", "once_cell", @@ -4010,9 +4010,9 @@ dependencies = [ [[package]] name = "sentry-contexts" -version = "0.31.1" +version = "0.31.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc575098d73c8b942b589ab453b06e4c43527556dd8f95532220d1b54d7c6b4b" +checksum = "7144590f7950647e4df5bd95f234c3aa29124729c54bd2457e1224d701d1a91c" dependencies = [ "hostname", "libc", @@ -4024,9 +4024,9 @@ dependencies = [ [[package]] name = "sentry-core" -version = "0.31.1" +version = "0.31.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "20216140001bbf05895f013abd0dae4df58faee24e016d54cbf107f070bac56b" +checksum = "35614ecf115f55d93583baa02a85cb63acb6567cf91b17690d1147bac1739ca4" dependencies = [ "once_cell", "rand 0.8.5", @@ -4037,10 +4037,11 @@ dependencies = [ [[package]] name = "sentry-tracing" -version = "0.31.1" +version = "0.31.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ef4111647923c797687094bc792b8da938c4b0d64fab331d5b7a7de41964de8" +checksum = "eec56ebafd7cfc1175bccdf277be582ccc3308b8c353dca5831261a967a6e28c" dependencies = [ + "sentry-backtrace", "sentry-core", "tracing-core", "tracing-subscriber 0.3.17", @@ -4048,9 +4049,9 @@ dependencies = [ [[package]] name = "sentry-types" -version = "0.31.1" +version = "0.31.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7f6959d8cb3a77be27e588eef6ce9a2a469651a556d9de662e4d07e5ace4232" +checksum = "9c56f616602a3b282bf4b4e8e5b4d10bcf9412a987df91c592b95a1f6ef1ee43" dependencies = [ "debugid", "getrandom 0.2.9", diff --git a/zebrad/Cargo.toml b/zebrad/Cargo.toml index ed79a696a69..95369ce22c5 100644 --- a/zebrad/Cargo.toml +++ b/zebrad/Cargo.toml @@ -152,7 +152,7 @@ num-integer = "0.1.45" rand = { version = "0.8.5", package = "rand" } # prod feature sentry -sentry = { version = "0.31.1", default-features = false, features = ["backtrace", "contexts", "reqwest", "rustls", "tracing"], optional = true } +sentry = { version = "0.31.3", default-features = false, features = ["backtrace", "contexts", "reqwest", "rustls", "tracing"], optional = true } # prod feature flamegraph tracing-flame = { version = "0.2.0", optional = true } From 8eda11e992963ac2ca170d5bf897e3f22994f8a4 Mon Sep 17 00:00:00 2001 From: Marek Date: Fri, 26 May 2023 11:59:34 +0200 Subject: [PATCH 018/265] change(doc): Fix TOC in `README.md` and simplify the "About" section (#6766) * Remove old sections from the TOC * Simplify the introductory section about Zebra --- README.md | 38 +++++++++++--------------------------- 1 file changed, 11 insertions(+), 27 deletions(-) diff --git a/README.md b/README.md index 9090ccb5c5c..12e659e646f 100644 --- a/README.md +++ b/README.md @@ -7,10 +7,7 @@ ## Contents -- [Contents](#contents) - [About](#about) - - [Using Zebra](#using-zebra) -- [Release Candidates](#release-candidates) - [Getting Started](#getting-started) - [Docker](#docker) - [Building Zebra](#building-zebra) @@ -25,27 +22,16 @@ ## About [Zebra](https://zebra.zfnd.org/) is the Zcash Foundation's independent, -consensus-compatible implementation of a Zcash node, currently under -development. It can be used to join the Zcash peer-to-peer network, which helps -keeping Zcash working by validating and broadcasting transactions, and maintaining -the Zcash blockchain state in a distributed manner. +consensus-compatible implementation of a Zcash node. -[Zcash](https://doc.zebra.zfnd.org/zebrad/index.html#about-zcash) -is a cryptocurrency designed to preserve the user's privacy. -If you just want to send and receive Zcash then you don't need to use Zebra -directly. You can download a Zcash wallet application which will handle that -for you. - -Please [join us on Discord](https://discord.gg/na6QZNd) if you'd -like to find out more or get involved! - -### Using Zebra - -You would want to run Zebra if you want to contribute to the -Zcash network: the more nodes are run, the more reliable the network will be -in terms of speed and resistance to denial of service attacks, for example. +Zebra's network stack is interoperable with `zcashd`, and Zebra implements all +the features required to reach Zcash network consensus, including the validation +of all the consensus rules for the NU5 network upgrade. +[Here](https://doc.zebra.zfnd.org/zebrad/index.html#zebra-advantages) are some +benefits of Zebra. -Zebra validates blocks and transactions, but needs extra software to generate them: +Zebra validates blocks and transactions, but needs extra software to generate +them: - To generate transactions, [run Zebra with `lightwalletd`](https://zebra.zfnd.org/user/lightwalletd.html). @@ -54,11 +40,8 @@ Zebra validates blocks and transactions, but needs extra software to generate th miner with Zebra's mining JSON-RPCs. Mining support is currently incomplete, experimental, and off by default. -Zebra's network stack is interoperable with `zcashd`, and Zebra implements all -the features required to reach Zcash network consensus, including the validation -of all the consensus rules for the NU5 network upgrade. -[Here](https://doc.zebra.zfnd.org/zebrad/index.html#zebra-advantages) are some -benefits of Zebra. +Please [join us on Discord](https://discord.gg/na6QZNd) if you'd like to find +out more or get involved! ## Getting Started @@ -133,6 +116,7 @@ book for more details. #### Optional Features You can also build Zebra with additional [Cargo features](https://doc.rust-lang.org/cargo/reference/features.html#command-line-feature-options): + - `sentry` for [Sentry monitoring](https://zebra.zfnd.org/user/requirements.html#sentry-production-monitoring) - `journald` for [`journald` logging](https://zebra.zfnd.org/user/tracing.html#journald-logging) - `prometheus` for [Prometheus metrics](https://doc.zebra.zfnd.org/zebrad/#metrics) From c68a10f8798b7683d1b8c302525473d6c70d227b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 26 May 2023 19:46:02 +0000 Subject: [PATCH 019/265] build(deps): bump tj-actions/changed-files from 35.9.2 to 36.0.4 (#6769) Bumps [tj-actions/changed-files](https://github.com/tj-actions/changed-files) from 35.9.2 to 36.0.4. - [Release notes](https://github.com/tj-actions/changed-files/releases) - [Changelog](https://github.com/tj-actions/changed-files/blob/main/HISTORY.md) - [Commits](https://github.com/tj-actions/changed-files/compare/v35.9.2...v36.0.4) --- updated-dependencies: - dependency-name: tj-actions/changed-files dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/lint.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 8ebe0fca4a4..4c714dd0245 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -37,7 +37,7 @@ jobs: - name: Rust files id: changed-files-rust - uses: tj-actions/changed-files@v35.9.2 + uses: tj-actions/changed-files@v36.0.4 with: files: | **/*.rs @@ -49,7 +49,7 @@ jobs: - name: Workflow files id: changed-files-workflows - uses: tj-actions/changed-files@v35.9.2 + uses: tj-actions/changed-files@v36.0.4 with: files: | .github/workflows/*.yml From e8ae564b1cbebebd2b0038dc90ba3fe286bcebf8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 29 May 2023 11:41:17 +0000 Subject: [PATCH 020/265] build(deps): bump regex from 1.8.2 to 1.8.3 (#6770) Bumps [regex](https://github.com/rust-lang/regex) from 1.8.2 to 1.8.3. - [Release notes](https://github.com/rust-lang/regex/releases) - [Changelog](https://github.com/rust-lang/regex/blob/master/CHANGELOG.md) - [Commits](https://github.com/rust-lang/regex/compare/1.8.2...1.8.3) --- updated-dependencies: - dependency-name: regex dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- zebra-network/Cargo.toml | 2 +- zebra-state/Cargo.toml | 2 +- zebra-test/Cargo.toml | 2 +- zebra-utils/Cargo.toml | 2 +- zebrad/Cargo.toml | 2 +- 6 files changed, 7 insertions(+), 7 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index acfb7785ed8..eece6ba20eb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3610,9 +3610,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.8.2" +version = "1.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1a59b5d8e97dee33696bf13c5ba8ab85341c002922fba050069326b9c498974" +checksum = "81ca098a9821bd52d6b24fd8b10bd081f47d39c22778cafaa75a2857a62c6390" dependencies = [ "aho-corasick 1.0.1", "memchr", diff --git a/zebra-network/Cargo.toml b/zebra-network/Cargo.toml index 7e1a2c7e1dc..1dffb4b8826 100644 --- a/zebra-network/Cargo.toml +++ b/zebra-network/Cargo.toml @@ -35,7 +35,7 @@ ordered-map = "0.4.2" pin-project = "1.1.0" rand = { version = "0.8.5", package = "rand" } rayon = "1.7.0" -regex = "1.8.2" +regex = "1.8.3" serde = { version = "1.0.163", features = ["serde_derive"] } thiserror = "1.0.40" diff --git a/zebra-state/Cargo.toml b/zebra-state/Cargo.toml index 798d6404592..13c54295b4a 100644 --- a/zebra-state/Cargo.toml +++ b/zebra-state/Cargo.toml @@ -43,7 +43,7 @@ itertools = "0.10.5" lazy_static = "1.4.0" metrics = "0.21.0" mset = "0.1.1" -regex = "1.8.2" +regex = "1.8.3" rlimit = "0.9.1" rocksdb = { version = "0.21.0", default_features = false, features = ["lz4"] } serde = { version = "1.0.163", features = ["serde_derive"] } diff --git a/zebra-test/Cargo.toml b/zebra-test/Cargo.toml index 0dc676484ad..310de6ce7ec 100644 --- a/zebra-test/Cargo.toml +++ b/zebra-test/Cargo.toml @@ -15,7 +15,7 @@ insta = "1.29.0" proptest = "1.2.0" once_cell = "1.17.1" rand = { version = "0.8.5", package = "rand" } -regex = "1.8.2" +regex = "1.8.3" tokio = { version = "1.28.0", features = ["full", "tracing", "test-util"] } tower = { version = "0.4.13", features = ["util"] } diff --git a/zebra-utils/Cargo.toml b/zebra-utils/Cargo.toml index dc076ab5c07..8d03357073e 100644 --- a/zebra-utils/Cargo.toml +++ b/zebra-utils/Cargo.toml @@ -67,7 +67,7 @@ zebra-chain = { path = "../zebra-chain" } itertools = { version = "0.10.5", optional = true } # These crates are needed for the search-issue-refs binary -regex = { version = "1.8.2", optional = true } +regex = { version = "1.8.3", optional = true } reqwest = { version = "0.11.18", optional = true } # These crates are needed for the zebra-checkpoints and search-issue-refs binaries diff --git a/zebrad/Cargo.toml b/zebrad/Cargo.toml index 95369ce22c5..6702b905b6d 100644 --- a/zebrad/Cargo.toml +++ b/zebrad/Cargo.toml @@ -195,7 +195,7 @@ abscissa_core = { version = "0.5", features = ["testing"] } hex = "0.4.3" jsonrpc-core = "18.0.0" once_cell = "1.17.1" -regex = "1.8.2" +regex = "1.8.3" semver = "1.0.17" # zebra-rpc needs the preserve_order feature, it also makes test results more stable From 636dd424d521afed48e0129978c12811f2308dc0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 29 May 2023 11:41:36 +0000 Subject: [PATCH 021/265] build(deps): bump criterion from 0.5.0 to 0.5.1 (#6771) Bumps [criterion](https://github.com/bheisler/criterion.rs) from 0.5.0 to 0.5.1. - [Changelog](https://github.com/bheisler/criterion.rs/blob/master/CHANGELOG.md) - [Commits](https://github.com/bheisler/criterion.rs/compare/0.5.0...0.5.1) --- updated-dependencies: - dependency-name: criterion dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- zebra-chain/Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index eece6ba20eb..70377927f2e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -881,9 +881,9 @@ dependencies = [ [[package]] name = "criterion" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f9c16c823fba76d9643cc387e9677d9771abe0827561381815215c47f808da9" +checksum = "f2b12d017a929603d80db1831cd3a24082f8137ce19c69e6447f54f5fc8d692f" dependencies = [ "anes", "cast", diff --git a/zebra-chain/Cargo.toml b/zebra-chain/Cargo.toml index ee5c3627bb6..b54c1cf9f64 100644 --- a/zebra-chain/Cargo.toml +++ b/zebra-chain/Cargo.toml @@ -113,7 +113,7 @@ zebra-test = { path = "../zebra-test/", optional = true } [dev-dependencies] # Benchmarks -criterion = { version = "0.5.0", features = ["html_reports"] } +criterion = { version = "0.5.1", features = ["html_reports"] } # Error Handling & Formatting color-eyre = "0.6.2" From c2516e24e238309c7d828478eb11a28c6b516cf7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 29 May 2023 16:31:26 +0000 Subject: [PATCH 022/265] build(deps): bump tj-actions/changed-files from 36.0.4 to 36.0.8 (#6774) Bumps [tj-actions/changed-files](https://github.com/tj-actions/changed-files) from 36.0.4 to 36.0.8. - [Release notes](https://github.com/tj-actions/changed-files/releases) - [Changelog](https://github.com/tj-actions/changed-files/blob/main/HISTORY.md) - [Commits](https://github.com/tj-actions/changed-files/compare/v36.0.4...v36.0.8) --- updated-dependencies: - dependency-name: tj-actions/changed-files dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/lint.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 4c714dd0245..9ff29da112e 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -37,7 +37,7 @@ jobs: - name: Rust files id: changed-files-rust - uses: tj-actions/changed-files@v36.0.4 + uses: tj-actions/changed-files@v36.0.8 with: files: | **/*.rs @@ -49,7 +49,7 @@ jobs: - name: Workflow files id: changed-files-workflows - uses: tj-actions/changed-files@v36.0.4 + uses: tj-actions/changed-files@v36.0.8 with: files: | .github/workflows/*.yml From ffb83bb318e66ff0b537b01e20d24992c91d5237 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 29 May 2023 19:16:36 +0000 Subject: [PATCH 023/265] build(deps): bump arduino/setup-protoc from 1.2.0 to 1.3.0 (#6773) Bumps [arduino/setup-protoc](https://github.com/arduino/setup-protoc) from 1.2.0 to 1.3.0. - [Release notes](https://github.com/arduino/setup-protoc/releases) - [Commits](https://github.com/arduino/setup-protoc/compare/v1.2.0...v1.3.0) --- updated-dependencies: - dependency-name: arduino/setup-protoc dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/build-crates-individually.yml | 2 +- .github/workflows/continous-integration-os.yml | 4 ++-- .github/workflows/docs.yml | 2 +- .github/workflows/lint.yml | 6 +++--- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/.github/workflows/build-crates-individually.yml b/.github/workflows/build-crates-individually.yml index 3c68261c57d..756a37b088a 100644 --- a/.github/workflows/build-crates-individually.yml +++ b/.github/workflows/build-crates-individually.yml @@ -112,7 +112,7 @@ jobs: - uses: r7kamura/rust-problem-matchers@v1.3.0 - name: Install last version of Protoc - uses: arduino/setup-protoc@v1.2.0 + uses: arduino/setup-protoc@v1.3.0 with: # TODO: increase to latest version after https://github.com/arduino/setup-protoc/issues/33 is fixed version: '3.20.1' diff --git a/.github/workflows/continous-integration-os.yml b/.github/workflows/continous-integration-os.yml index 0fe71185dbc..4534762c429 100644 --- a/.github/workflows/continous-integration-os.yml +++ b/.github/workflows/continous-integration-os.yml @@ -89,7 +89,7 @@ jobs: - uses: r7kamura/rust-problem-matchers@v1.3.0 - name: Install last version of Protoc - uses: arduino/setup-protoc@v1.2.0 + uses: arduino/setup-protoc@v1.3.0 with: # TODO: increase to latest version after https://github.com/arduino/setup-protoc/issues/33 is fixed version: '3.20.1' @@ -225,7 +225,7 @@ jobs: - uses: r7kamura/rust-problem-matchers@v1.3.0 - name: Install last version of Protoc - uses: arduino/setup-protoc@v1.2.0 + uses: arduino/setup-protoc@v1.3.0 with: # TODO: increase to latest version after https://github.com/arduino/setup-protoc/issues/33 is fixed version: '3.20.1' diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index b7e668a258e..e889e7711cd 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -45,7 +45,7 @@ jobs: persist-credentials: false - name: Install last version of Protoc - uses: arduino/setup-protoc@v1.2.0 + uses: arduino/setup-protoc@v1.3.0 with: # TODO: increase to latest version after https://github.com/arduino/setup-protoc/issues/33 is fixed version: '3.20.1' diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 9ff29da112e..9dfc59ba9b0 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -67,7 +67,7 @@ jobs: persist-credentials: false - name: Install last version of Protoc - uses: arduino/setup-protoc@v1.2.0 + uses: arduino/setup-protoc@v1.3.0 with: # TODO: increase to latest version after https://github.com/arduino/setup-protoc/issues/33 is fixed version: '3.20.1' @@ -118,7 +118,7 @@ jobs: - uses: r7kamura/rust-problem-matchers@v1.3.0 - name: Install last version of Protoc - uses: arduino/setup-protoc@v1.2.0 + uses: arduino/setup-protoc@v1.3.0 with: # TODO: increase to latest version after https://github.com/arduino/setup-protoc/issues/33 is fixed version: '3.20.1' @@ -157,7 +157,7 @@ jobs: - uses: r7kamura/rust-problem-matchers@v1.3.0 - name: Install last version of Protoc - uses: arduino/setup-protoc@v1.2.0 + uses: arduino/setup-protoc@v1.3.0 with: # TODO: increase to latest version after https://github.com/arduino/setup-protoc/issues/33 is fixed version: '3.20.1' From 61d3c34a780a9dd1aba760ee992ae6cdc981fce2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 29 May 2023 21:42:30 +0000 Subject: [PATCH 024/265] build(deps): bump tokio from 1.28.1 to 1.28.2 (#6777) Bumps [tokio](https://github.com/tokio-rs/tokio) from 1.28.1 to 1.28.2. - [Release notes](https://github.com/tokio-rs/tokio/releases) - [Commits](https://github.com/tokio-rs/tokio/compare/tokio-1.28.1...tokio-1.28.2) --- updated-dependencies: - dependency-name: tokio dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- tower-batch/Cargo.toml | 4 ++-- tower-fallback/Cargo.toml | 2 +- zebra-chain/Cargo.toml | 4 ++-- zebra-consensus/Cargo.toml | 4 ++-- zebra-network/Cargo.toml | 4 ++-- zebra-rpc/Cargo.toml | 4 ++-- zebra-state/Cargo.toml | 4 ++-- zebra-test/Cargo.toml | 2 +- zebra-utils/Cargo.toml | 2 +- zebrad/Cargo.toml | 4 ++-- 11 files changed, 19 insertions(+), 19 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 70377927f2e..57659cb4ced 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4566,9 +4566,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.28.1" +version = "1.28.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0aa32867d44e6f2ce3385e89dceb990188b8bb0fb25b0cf576647a6f98ac5105" +checksum = "94d7b1cfd2aa4011f2de74c2c4c63665e27a71006b0a192dcd2710272e73dfa2" dependencies = [ "autocfg", "bytes", diff --git a/tower-batch/Cargo.toml b/tower-batch/Cargo.toml index 2c874d54ca6..bfbea9d8468 100644 --- a/tower-batch/Cargo.toml +++ b/tower-batch/Cargo.toml @@ -10,7 +10,7 @@ futures = "0.3.28" futures-core = "0.3.28" pin-project = "1.1.0" rayon = "1.7.0" -tokio = { version = "1.28.0", features = ["time", "sync", "tracing", "macros"] } +tokio = { version = "1.28.2", features = ["time", "sync", "tracing", "macros"] } tokio-util = "0.7.8" tower = { version = "0.4.13", features = ["util", "buffer"] } tracing = "0.1.37" @@ -25,7 +25,7 @@ tinyvec = { version = "1.6.0", features = ["rustc_1_55"] } ed25519-zebra = "3.1.0" rand = { version = "0.8.5", package = "rand" } -tokio = { version = "1.28.0", features = ["full", "tracing", "test-util"] } +tokio = { version = "1.28.2", features = ["full", "tracing", "test-util"] } tokio-test = "0.4.2" tower-fallback = { path = "../tower-fallback/" } tower-test = "0.4.0" diff --git a/tower-fallback/Cargo.toml b/tower-fallback/Cargo.toml index e13cee274e9..0376a8a2b3a 100644 --- a/tower-fallback/Cargo.toml +++ b/tower-fallback/Cargo.toml @@ -12,6 +12,6 @@ futures-core = "0.3.28" tracing = "0.1.37" [dev-dependencies] -tokio = { version = "1.28.0", features = ["full", "tracing", "test-util"] } +tokio = { version = "1.28.2", features = ["full", "tracing", "test-util"] } zebra-test = { path = "../zebra-test/" } diff --git a/zebra-chain/Cargo.toml b/zebra-chain/Cargo.toml index b54c1cf9f64..2aae13734ef 100644 --- a/zebra-chain/Cargo.toml +++ b/zebra-chain/Cargo.toml @@ -107,7 +107,7 @@ proptest-derive = { version = "0.3.0", optional = true } rand = { version = "0.8.5", optional = true, package = "rand" } rand_chacha = { version = "0.3.1", optional = true } -tokio = { version = "1.28.0", features = ["tracing"], optional = true } +tokio = { version = "1.28.2", features = ["tracing"], optional = true } zebra-test = { path = "../zebra-test/", optional = true } @@ -130,7 +130,7 @@ proptest-derive = "0.3.0" rand = { version = "0.8.5", package = "rand" } rand_chacha = "0.3.1" -tokio = { version = "1.28.0", features = ["full", "tracing", "test-util"] } +tokio = { version = "1.28.2", features = ["full", "tracing", "test-util"] } zebra-test = { path = "../zebra-test/" } diff --git a/zebra-consensus/Cargo.toml b/zebra-consensus/Cargo.toml index cabd507799a..268ddf867ef 100644 --- a/zebra-consensus/Cargo.toml +++ b/zebra-consensus/Cargo.toml @@ -44,7 +44,7 @@ futures = "0.3.28" futures-util = "0.3.28" metrics = "0.21.0" thiserror = "1.0.40" -tokio = { version = "1.28.0", features = ["time", "sync", "tracing", "rt-multi-thread"] } +tokio = { version = "1.28.2", features = ["time", "sync", "tracing", "rt-multi-thread"] } tower = { version = "0.4.13", features = ["timeout", "util", "buffer"] } tracing = "0.1.37" tracing-futures = "0.2.5" @@ -80,7 +80,7 @@ proptest = "1.2.0" proptest-derive = "0.3.0" spandoc = "0.2.2" -tokio = { version = "1.28.0", features = ["full", "tracing", "test-util"] } +tokio = { version = "1.28.2", features = ["full", "tracing", "test-util"] } tracing-error = "0.2.0" tracing-subscriber = "0.3.17" diff --git a/zebra-network/Cargo.toml b/zebra-network/Cargo.toml index 1dffb4b8826..c990d362b24 100644 --- a/zebra-network/Cargo.toml +++ b/zebra-network/Cargo.toml @@ -40,7 +40,7 @@ serde = { version = "1.0.163", features = ["serde_derive"] } thiserror = "1.0.40" futures = "0.3.28" -tokio = { version = "1.28.0", features = ["net", "time", "tracing", "macros", "rt-multi-thread"] } +tokio = { version = "1.28.2", features = ["net", "time", "tracing", "macros", "rt-multi-thread"] } tokio-stream = { version = "0.1.14", features = ["sync", "time"] } tokio-util = { version = "0.7.8", features = ["codec"] } tower = { version = "0.4.13", features = ["retry", "discover", "load", "load-shed", "timeout", "util", "buffer"] } @@ -69,7 +69,7 @@ proptest = "1.2.0" proptest-derive = "0.3.0" static_assertions = "1.1.0" -tokio = { version = "1.28.0", features = ["full", "tracing", "test-util"] } +tokio = { version = "1.28.2", features = ["full", "tracing", "test-util"] } toml = "0.7.4" zebra-chain = { path = "../zebra-chain", features = ["proptest-impl"] } diff --git a/zebra-rpc/Cargo.toml b/zebra-rpc/Cargo.toml index fae51928d75..fe9721dbb7a 100644 --- a/zebra-rpc/Cargo.toml +++ b/zebra-rpc/Cargo.toml @@ -47,7 +47,7 @@ num_cpus = "1.15.0" serde_json = { version = "1.0.96", features = ["preserve_order"] } indexmap = { version = "1.9.3", features = ["serde"] } -tokio = { version = "1.28.0", features = ["time", "rt-multi-thread", "macros", "tracing"] } +tokio = { version = "1.28.2", features = ["time", "rt-multi-thread", "macros", "tracing"] } tower = "0.4.13" tracing = "0.1.37" @@ -76,7 +76,7 @@ insta = { version = "1.29.0", features = ["redactions", "json", "ron"] } proptest = "1.2.0" thiserror = "1.0.40" -tokio = { version = "1.28.0", features = ["full", "tracing", "test-util"] } +tokio = { version = "1.28.2", features = ["full", "tracing", "test-util"] } zebra-chain = { path = "../zebra-chain", features = ["proptest-impl"] } zebra-consensus = { path = "../zebra-consensus", features = ["proptest-impl"] } diff --git a/zebra-state/Cargo.toml b/zebra-state/Cargo.toml index 13c54295b4a..5c3fd125225 100644 --- a/zebra-state/Cargo.toml +++ b/zebra-state/Cargo.toml @@ -51,7 +51,7 @@ tempfile = "3.5.0" thiserror = "1.0.40" rayon = "1.7.0" -tokio = { version = "1.28.0", features = ["sync", "tracing"] } +tokio = { version = "1.28.2", features = ["sync", "tracing"] } tower = { version = "0.4.13", features = ["buffer", "util"] } tracing = "0.1.37" @@ -87,7 +87,7 @@ proptest-derive = "0.3.0" halo2 = { package = "halo2_proofs", version = "0.3.0" } jubjub = "0.10.0" -tokio = { version = "1.28.0", features = ["full", "tracing", "test-util"] } +tokio = { version = "1.28.2", features = ["full", "tracing", "test-util"] } zebra-chain = { path = "../zebra-chain", features = ["proptest-impl"] } zebra-test = { path = "../zebra-test/" } diff --git a/zebra-test/Cargo.toml b/zebra-test/Cargo.toml index 310de6ce7ec..48320f1a35b 100644 --- a/zebra-test/Cargo.toml +++ b/zebra-test/Cargo.toml @@ -17,7 +17,7 @@ once_cell = "1.17.1" rand = { version = "0.8.5", package = "rand" } regex = "1.8.3" -tokio = { version = "1.28.0", features = ["full", "tracing", "test-util"] } +tokio = { version = "1.28.2", features = ["full", "tracing", "test-util"] } tower = { version = "0.4.13", features = ["util"] } futures = "0.3.28" diff --git a/zebra-utils/Cargo.toml b/zebra-utils/Cargo.toml index 8d03357073e..962c37b03d3 100644 --- a/zebra-utils/Cargo.toml +++ b/zebra-utils/Cargo.toml @@ -71,7 +71,7 @@ regex = { version = "1.8.3", optional = true } reqwest = { version = "0.11.18", optional = true } # These crates are needed for the zebra-checkpoints and search-issue-refs binaries -tokio = { version = "1.28.0", features = ["full"], optional = true } +tokio = { version = "1.28.2", features = ["full"], optional = true } # These crates are needed for the block-template-to-proposal binary zebra-rpc = { path = "../zebra-rpc", optional = true } diff --git a/zebrad/Cargo.toml b/zebrad/Cargo.toml index 6702b905b6d..7f202405753 100644 --- a/zebrad/Cargo.toml +++ b/zebrad/Cargo.toml @@ -126,7 +126,7 @@ toml = "0.7.4" futures = "0.3.28" rayon = "1.7.0" -tokio = { version = "1.28.0", features = ["time", "rt-multi-thread", "macros", "tracing", "signal"] } +tokio = { version = "1.28.2", features = ["time", "rt-multi-thread", "macros", "tracing", "signal"] } tower = { version = "0.4.13", features = ["hedge", "limit"] } pin-project = "1.1.0" @@ -205,7 +205,7 @@ tempfile = "3.5.0" hyper = { version = "0.14.26", features = ["http1", "http2", "server"]} tracing-test = { version = "0.2.4", features = ["no-env-filter"] } -tokio = { version = "1.28.0", features = ["full", "tracing", "test-util"] } +tokio = { version = "1.28.2", features = ["full", "tracing", "test-util"] } tokio-stream = "0.1.14" # test feature lightwalletd-grpc-tests From 432774c4e402c2f762c07acceb7e2135938693bc Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 29 May 2023 21:42:49 +0000 Subject: [PATCH 025/265] build(deps): bump log from 0.4.17 to 0.4.18 (#6779) Bumps [log](https://github.com/rust-lang/log) from 0.4.17 to 0.4.18. - [Release notes](https://github.com/rust-lang/log/releases) - [Changelog](https://github.com/rust-lang/log/blob/master/CHANGELOG.md) - [Commits](https://github.com/rust-lang/log/compare/0.4.17...0.4.18) --- updated-dependencies: - dependency-name: log dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 7 ++----- zebrad/Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 57659cb4ced..e5bcd8e7216 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2376,12 +2376,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.17" +version = "0.4.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e" -dependencies = [ - "cfg-if 1.0.0", -] +checksum = "518ef76f2f87365916b142844c16d8fefd85039bc5699050210a7778ee1cd1de" [[package]] name = "lz4-sys" diff --git a/zebrad/Cargo.toml b/zebrad/Cargo.toml index 7f202405753..df897cf0b20 100644 --- a/zebrad/Cargo.toml +++ b/zebrad/Cargo.toml @@ -171,7 +171,7 @@ metrics-exporter-prometheus = { version = "0.12.0", default-features = false, fe # # zebrad uses tracing for logging, # we only use `log` to set and print the static log levels in transitive dependencies -log = "0.4.17" +log = "0.4.18" # prod feature progress-bar howudoin = { version = "0.1.2", features = ["term-line"], optional = true } From edc13e336c2401668dd13b338efe79daeea998d4 Mon Sep 17 00:00:00 2001 From: Gustavo Valverde Date: Mon, 29 May 2023 19:58:57 -0400 Subject: [PATCH 026/265] fix(release): do not hardcode the name of our images (#6710) * fix(release): do not hardcode the name of our images Our main image in DockerHub is called `zebra` not `zebrad`. This hardcoded value is also causing the mining image to also be called `zebra` instead of `zebrad-mining-rpcs-testnet` * fix(release): use same naming convention for `.experimental` * chore: fix comment --- .github/workflows/build-docker-image.yml | 2 +- .github/workflows/release-binaries.yml | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/build-docker-image.yml b/.github/workflows/build-docker-image.yml index 2ef710ba416..502ac48cc76 100644 --- a/.github/workflows/build-docker-image.yml +++ b/.github/workflows/build-docker-image.yml @@ -89,7 +89,7 @@ jobs: # list of Docker images to use as base name for tags images: | us-docker.pkg.dev/${{ vars.GCP_PROJECT }}/zebra/${{ inputs.image_name }} - zfnd/zebra,enable=${{ github.event_name == 'release' && !github.event.release.prerelease }} + zfnd/${{ inputs.image_name }},enable=${{ github.event_name == 'release' && !github.event.release.prerelease }} # appends inputs.tag_suffix to image tags/names flavor: | suffix=${{ inputs.tag_suffix }} diff --git a/.github/workflows/release-binaries.yml b/.github/workflows/release-binaries.yml index d307fd1ee51..8366f51d23a 100644 --- a/.github/workflows/release-binaries.yml +++ b/.github/workflows/release-binaries.yml @@ -18,14 +18,14 @@ jobs: # Each time this workflow is executed, a build will be triggered to create a new image # with the corresponding tags using information from git - # The image will be named `zebrad:` + # The image will be named `zebra:` build: name: Build Release Docker uses: ./.github/workflows/build-docker-image.yml with: dockerfile_path: ./docker/Dockerfile dockerfile_target: runtime - image_name: zebrad + image_name: zebra network: Mainnet checkpoint_sync: true rust_backtrace: '1' @@ -34,14 +34,14 @@ jobs: # This step needs access to Docker Hub secrets to run successfully secrets: inherit - # The image will be named `zebrad-mining-rpcs-testnet:.experimental` + # The image will be named `zebra:.experimental` build-mining-testnet: name: Build Release Testnet Mining Docker uses: ./.github/workflows/build-docker-image.yml with: dockerfile_path: ./docker/Dockerfile dockerfile_target: runtime - image_name: zebrad-mining-rpcs-testnet + image_name: zebra # TODO: change this to `-experimental` when we release Zebra `1.0.0` tag_suffix: .experimental network: Testnet From 7b6cceeb45ca206ed6b2fb0574b44cfa0c550d3b Mon Sep 17 00:00:00 2001 From: teor Date: Tue, 30 May 2023 09:59:12 +1000 Subject: [PATCH 027/265] Add private IP address known issue to README (#6783) --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index 12e659e646f..9e3a253a928 100644 --- a/README.md +++ b/README.md @@ -149,6 +149,8 @@ section of the Zebra book for more details. There are a few bugs in Zebra that we're still working on fixing: +- Zebra currently gossips and connects to [private IP addresses](https://en.wikipedia.org/wiki/IP_address#Private_addresses), we want to [disable private IPs but provide a config (#3117)](https://github.com/ZcashFoundation/zebra/issues/3117) in an upcoming release + - If Zebra fails downloading the Zcash parameters, use [the Zcash parameters download script](https://github.com/zcash/zcash/blob/master/zcutil/fetch-params.sh) instead. - Block download and verification sometimes times out during Zebra's initial sync [#5709](https://github.com/ZcashFoundation/zebra/issues/5709). The full sync still finishes reasonably quickly. From 604a96e9adabd90531d391467914ebfa4edfe99f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 30 May 2023 02:01:31 +0000 Subject: [PATCH 028/265] build(deps): bump bs58 from 0.4.0 to 0.5.0 (#6750) * build(deps): bump bs58 from 0.4.0 to 0.5.0 Bumps [bs58](https://github.com/Nullus157/bs58-rs) from 0.4.0 to 0.5.0. - [Changelog](https://github.com/Nullus157/bs58-rs/blob/main/CHANGELOG.md) - [Commits](https://github.com/Nullus157/bs58-rs/compare/0.4.0...0.5.0) --- updated-dependencies: - dependency-name: bs58 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * add `b258` version `0.4.0` to `deny.toml` --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Alfredo Garcia --- Cargo.lock | 14 ++++++++++++-- deny.toml | 3 +++ zebra-chain/Cargo.toml | 2 +- 3 files changed, 16 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e5bcd8e7216..9ea0a796978 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -509,6 +509,16 @@ dependencies = [ "sha2 0.9.9", ] +[[package]] +name = "bs58" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f5353f36341f7451062466f0b755b96ac3a9547e4d7f6b70d603fc721a7d7896" +dependencies = [ + "sha2 0.10.6", + "tinyvec", +] + [[package]] name = "bstr" version = "1.4.0" @@ -5588,7 +5598,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "52be35a205369d480378646bff9c9fedafd8efe8af1e0e54bb858f405883f2b2" dependencies = [ "bech32", - "bs58", + "bs58 0.4.0", "f4jumble", "zcash_encoding", ] @@ -5726,7 +5736,7 @@ dependencies = [ "bitvec", "blake2b_simd", "blake2s_simd", - "bs58", + "bs58 0.5.0", "byteorder", "chrono", "color-eyre", diff --git a/deny.toml b/deny.toml index 721cadef5bc..683aa100196 100644 --- a/deny.toml +++ b/deny.toml @@ -63,6 +63,9 @@ skip-tree = [ # wait for zcash_primitives to remove duplicated dependencies { name = "block-buffer", version = "=0.9.0" }, + # wait for zcash_address to upgrade + { name = "bs58", version = "=0.4.0" }, + # zebra-utils dependencies # wait for structopt upgrade (or upgrade to clap 4) diff --git a/zebra-chain/Cargo.toml b/zebra-chain/Cargo.toml index 2aae13734ef..4f1bfb25723 100644 --- a/zebra-chain/Cargo.toml +++ b/zebra-chain/Cargo.toml @@ -43,7 +43,7 @@ bitflags = "2.2.1" bitflags-serde-legacy = "0.1.1" blake2b_simd = "1.0.1" blake2s_simd = "1.0.1" -bs58 = { version = "0.4.0", features = ["check"] } +bs58 = { version = "0.5.0", features = ["check"] } byteorder = "1.4.3" equihash = "0.2.0" group = "0.13.0" From 964cdde191578698ff704eced6c156b37fcaf137 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 30 May 2023 02:01:58 +0000 Subject: [PATCH 029/265] build(deps): bump indicatif from 0.17.3 to 0.17.4 (#6778) Bumps [indicatif](https://github.com/console-rs/indicatif) from 0.17.3 to 0.17.4. - [Release notes](https://github.com/console-rs/indicatif/releases) - [Commits](https://github.com/console-rs/indicatif/compare/0.17.3...0.17.4) --- updated-dependencies: - dependency-name: indicatif dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 7 ++++--- zebrad/Cargo.toml | 2 +- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9ea0a796978..20426f74024 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2072,13 +2072,14 @@ dependencies = [ [[package]] name = "indicatif" -version = "0.17.3" +version = "0.17.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cef509aa9bc73864d6756f0d34d35504af3cf0844373afe9b8669a5b8005a729" +checksum = "db45317f37ef454e6519b6c3ed7d377e5f23346f0823f86e65ca36912d1d0ef8" dependencies = [ "console", + "instant", "number_prefix", - "portable-atomic 0.3.20", + "portable-atomic 1.3.2", "unicode-width", ] diff --git a/zebrad/Cargo.toml b/zebrad/Cargo.toml index df897cf0b20..70e51dd83f6 100644 --- a/zebrad/Cargo.toml +++ b/zebrad/Cargo.toml @@ -175,7 +175,7 @@ log = "0.4.18" # prod feature progress-bar howudoin = { version = "0.1.2", features = ["term-line"], optional = true } -indicatif = { version = "0.17.3", optional = true } +indicatif = { version = "0.17.4", optional = true } # test feature proptest-impl proptest = { version = "1.2.0", optional = true } From a6f9820aa3f6ebe4d0b9aaf0f502787eda3223a8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 30 May 2023 02:02:27 +0000 Subject: [PATCH 030/265] build(deps): bump once_cell from 1.17.1 to 1.17.2 (#6776) Bumps [once_cell](https://github.com/matklad/once_cell) from 1.17.1 to 1.17.2. - [Changelog](https://github.com/matklad/once_cell/blob/master/CHANGELOG.md) - [Commits](https://github.com/matklad/once_cell/compare/v1.17.1...v1.17.2) --- updated-dependencies: - dependency-name: once_cell dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- zebra-consensus/Cargo.toml | 2 +- zebra-state/Cargo.toml | 2 +- zebra-test/Cargo.toml | 2 +- zebrad/Cargo.toml | 2 +- 5 files changed, 6 insertions(+), 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 20426f74024..d23982d5a8b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2755,9 +2755,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.17.1" +version = "1.17.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7e5500299e16ebb147ae15a00a942af264cf3688f47923b8fc2cd5858f23ad3" +checksum = "9670a07f94779e00908f3e686eab508878ebb390ba6e604d3a284c00e8d0487b" [[package]] name = "oorandom" diff --git a/zebra-consensus/Cargo.toml b/zebra-consensus/Cargo.toml index 268ddf867ef..f77f3399c6a 100644 --- a/zebra-consensus/Cargo.toml +++ b/zebra-consensus/Cargo.toml @@ -37,7 +37,7 @@ rayon = "1.7.0" chrono = { version = "0.4.24", default-features = false, features = ["clock", "std"] } displaydoc = "0.2.4" lazy_static = "1.4.0" -once_cell = "1.17.1" +once_cell = "1.17.2" serde = { version = "1.0.163", features = ["serde_derive"] } futures = "0.3.28" diff --git a/zebra-state/Cargo.toml b/zebra-state/Cargo.toml index 5c3fd125225..34fffc10b72 100644 --- a/zebra-state/Cargo.toml +++ b/zebra-state/Cargo.toml @@ -75,7 +75,7 @@ color-eyre = "0.6.2" # Enable a feature that makes tinyvec compile much faster. tinyvec = { version = "1.6.0", features = ["rustc_1_55"] } -once_cell = "1.17.1" +once_cell = "1.17.2" spandoc = "0.2.2" hex = { version = "0.4.3", features = ["serde"] } diff --git a/zebra-test/Cargo.toml b/zebra-test/Cargo.toml index 48320f1a35b..2d9e973583c 100644 --- a/zebra-test/Cargo.toml +++ b/zebra-test/Cargo.toml @@ -13,7 +13,7 @@ indexmap = "1.9.3" lazy_static = "1.4.0" insta = "1.29.0" proptest = "1.2.0" -once_cell = "1.17.1" +once_cell = "1.17.2" rand = { version = "0.8.5", package = "rand" } regex = "1.8.3" diff --git a/zebrad/Cargo.toml b/zebrad/Cargo.toml index 70e51dd83f6..6a0b40bbd31 100644 --- a/zebrad/Cargo.toml +++ b/zebrad/Cargo.toml @@ -194,7 +194,7 @@ tonic-build = { version = "0.9.2", optional = true } abscissa_core = { version = "0.5", features = ["testing"] } hex = "0.4.3" jsonrpc-core = "18.0.0" -once_cell = "1.17.1" +once_cell = "1.17.2" regex = "1.8.3" semver = "1.0.17" From ed1d15be7af061bbdec51dd4c569cfbb2a715e67 Mon Sep 17 00:00:00 2001 From: teor Date: Tue, 30 May 2023 22:56:44 +1000 Subject: [PATCH 031/265] fix(ci): Disable unnecessary GitHub Actions runner checks (#6789) * Disable unnecessary OS checks * Remove corresponding patch jobs --- .github/workflows/continous-integration-os.patch.yml | 5 ++++- .github/workflows/continous-integration-os.yml | 10 +++++++++- 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/.github/workflows/continous-integration-os.patch.yml b/.github/workflows/continous-integration-os.patch.yml index 8619ae3c541..2f5eea44f98 100644 --- a/.github/workflows/continous-integration-os.patch.yml +++ b/.github/workflows/continous-integration-os.patch.yml @@ -30,6 +30,9 @@ jobs: rust: beta - os: macos-latest features: " --features getblocktemplate-rpcs" + - os: ubuntu-latest + rust: beta + features: " --features getblocktemplate-rpcs" steps: - run: 'echo "No build required"' @@ -56,7 +59,7 @@ jobs: checks: - bans - sources - features: ['', '--all-features', '--no-default-features'] + features: ['', '--all-features'] steps: - run: 'echo "No build required"' diff --git a/.github/workflows/continous-integration-os.yml b/.github/workflows/continous-integration-os.yml index 4534762c429..c6749d23e86 100644 --- a/.github/workflows/continous-integration-os.yml +++ b/.github/workflows/continous-integration-os.yml @@ -81,6 +81,11 @@ jobs: rust: beta - os: macos-latest features: " --features getblocktemplate-rpcs" + # getblocktemplate-rpcs is an experimental feature, so we just need to test it on stable Rust + # beta is unlikely to fail just for this feature, and if it does, we can fix it when it reaches stable. + - os: ubuntu-latest + rust: beta + features: " --features getblocktemplate-rpcs" steps: - uses: actions/checkout@v3.5.2 @@ -252,7 +257,10 @@ jobs: checks: - bans - sources - features: ['', '--all-features', '--no-default-features'] + # We don't need to check `--no-default-features` here, because (except in very rare cases): + # - disabling features isn't going to add duplicate dependencies + # - disabling features isn't going to add more crate sources + features: ['', '--all-features'] # We always want to run the --all-features job, because it gives accurate "skip tree root was not found" warnings fail-fast: false From 54699263a63bc63140216f4527de8b5c28045f2c Mon Sep 17 00:00:00 2001 From: teor Date: Tue, 30 May 2023 22:57:08 +1000 Subject: [PATCH 032/265] Increase config test timeouts (#6787) --- .github/workflows/continous-integration-docker.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/continous-integration-docker.yml b/.github/workflows/continous-integration-docker.yml index 01ff979aa51..b358da96976 100644 --- a/.github/workflows/continous-integration-docker.yml +++ b/.github/workflows/continous-integration-docker.yml @@ -249,7 +249,7 @@ jobs: # Test that Zebra works using the default config with the latest Zebra version test-configuration-file: name: Test Zebra default Docker config file - timeout-minutes: 5 + timeout-minutes: 15 runs-on: ubuntu-latest needs: build if: ${{ github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} @@ -274,7 +274,7 @@ jobs: # Test that Zebra works using the $ZEBRA_CONF_PATH config test-zebra-conf-path: name: Test Zebra custom Docker config file - timeout-minutes: 5 + timeout-minutes: 15 runs-on: ubuntu-latest needs: build if: ${{ github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} From 6f8c98123efd1aa3a98ecae0b648cd522e7ff35f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 30 May 2023 12:57:25 +0000 Subject: [PATCH 033/265] build(deps): bump chrono from 0.4.24 to 0.4.25 (#6775) Bumps [chrono](https://github.com/chronotope/chrono) from 0.4.24 to 0.4.25. - [Release notes](https://github.com/chronotope/chrono/releases) - [Changelog](https://github.com/chronotope/chrono/blob/main/CHANGELOG.md) - [Commits](https://github.com/chronotope/chrono/compare/v0.4.24...v0.4.25) --- updated-dependencies: - dependency-name: chrono dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 12 +++++++++--- zebra-chain/Cargo.toml | 2 +- zebra-consensus/Cargo.toml | 2 +- zebra-network/Cargo.toml | 2 +- zebra-rpc/Cargo.toml | 2 +- zebra-state/Cargo.toml | 2 +- zebrad/Cargo.toml | 2 +- 7 files changed, 15 insertions(+), 9 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d23982d5a8b..a13d7785c45 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -121,6 +121,12 @@ dependencies = [ "memchr", ] +[[package]] +name = "android-tzdata" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0" + [[package]] name = "android_system_properties" version = "0.1.5" @@ -647,13 +653,13 @@ dependencies = [ [[package]] name = "chrono" -version = "0.4.24" +version = "0.4.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e3c5919066adf22df73762e50cffcde3a758f2a848b113b586d1f86728b673b" +checksum = "fdbc37d37da9e5bce8173f3a41b71d9bf3c674deebbaceacd0ebdabde76efb03" dependencies = [ + "android-tzdata", "iana-time-zone", "js-sys", - "num-integer", "num-traits", "serde", "time 0.1.45", diff --git a/zebra-chain/Cargo.toml b/zebra-chain/Cargo.toml index 4f1bfb25723..b20c49a0786 100644 --- a/zebra-chain/Cargo.toml +++ b/zebra-chain/Cargo.toml @@ -69,7 +69,7 @@ zcash_note_encryption = "0.3.0" zcash_primitives = { version = "0.11.0", features = ["transparent-inputs"] } # Time -chrono = { version = "0.4.24", default-features = false, features = ["clock", "std", "serde"] } +chrono = { version = "0.4.25", default-features = false, features = ["clock", "std", "serde"] } humantime = "2.1.0" # Error Handling & Formatting diff --git a/zebra-consensus/Cargo.toml b/zebra-consensus/Cargo.toml index f77f3399c6a..5eb6f37cdd8 100644 --- a/zebra-consensus/Cargo.toml +++ b/zebra-consensus/Cargo.toml @@ -34,7 +34,7 @@ jubjub = "0.10.0" rand = { version = "0.8.5", package = "rand" } rayon = "1.7.0" -chrono = { version = "0.4.24", default-features = false, features = ["clock", "std"] } +chrono = { version = "0.4.25", default-features = false, features = ["clock", "std"] } displaydoc = "0.2.4" lazy_static = "1.4.0" once_cell = "1.17.2" diff --git a/zebra-network/Cargo.toml b/zebra-network/Cargo.toml index c990d362b24..6a24808d612 100644 --- a/zebra-network/Cargo.toml +++ b/zebra-network/Cargo.toml @@ -26,7 +26,7 @@ proptest-impl = ["proptest", "proptest-derive", "zebra-chain/proptest-impl"] bitflags = "2.2.1" byteorder = "1.4.3" bytes = "1.4.0" -chrono = { version = "0.4.24", default-features = false, features = ["clock", "std"] } +chrono = { version = "0.4.25", default-features = false, features = ["clock", "std"] } hex = "0.4.3" humantime-serde = "1.1.1" indexmap = { version = "1.9.3", features = ["serde"] } diff --git a/zebra-rpc/Cargo.toml b/zebra-rpc/Cargo.toml index fe9721dbb7a..c2c7181e860 100644 --- a/zebra-rpc/Cargo.toml +++ b/zebra-rpc/Cargo.toml @@ -32,7 +32,7 @@ proptest-impl = [ ] [dependencies] -chrono = { version = "0.4.24", default-features = false, features = ["clock", "std"] } +chrono = { version = "0.4.25", default-features = false, features = ["clock", "std"] } futures = "0.3.28" # lightwalletd sends JSON-RPC requests over HTTP 1.1 diff --git a/zebra-state/Cargo.toml b/zebra-state/Cargo.toml index 34fffc10b72..03a2fb257b8 100644 --- a/zebra-state/Cargo.toml +++ b/zebra-state/Cargo.toml @@ -34,7 +34,7 @@ elasticsearch = [ [dependencies] bincode = "1.3.3" -chrono = { version = "0.4.24", default-features = false, features = ["clock", "std"] } +chrono = { version = "0.4.25", default-features = false, features = ["clock", "std"] } dirs = "5.0.1" futures = "0.3.28" hex = "0.4.3" diff --git a/zebrad/Cargo.toml b/zebrad/Cargo.toml index 6a0b40bbd31..fa0ea3f5a69 100644 --- a/zebrad/Cargo.toml +++ b/zebrad/Cargo.toml @@ -117,7 +117,7 @@ zebra-state = { path = "../zebra-state" } abscissa_core = "0.5" gumdrop = { version = "0.7", features = ["default_expr"]} -chrono = { version = "0.4.24", default-features = false, features = ["clock", "std"] } +chrono = { version = "0.4.25", default-features = false, features = ["clock", "std"] } humantime-serde = "1.1.1" indexmap = "1.9.3" lazy_static = "1.4.0" From 2cd9455ded0711406aca891029455493ee47d7fe Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 30 May 2023 21:59:04 +0000 Subject: [PATCH 034/265] build(deps): bump tj-actions/changed-files from 36.0.8 to 36.0.9 (#6792) Bumps [tj-actions/changed-files](https://github.com/tj-actions/changed-files) from 36.0.8 to 36.0.9. - [Release notes](https://github.com/tj-actions/changed-files/releases) - [Changelog](https://github.com/tj-actions/changed-files/blob/main/HISTORY.md) - [Commits](https://github.com/tj-actions/changed-files/compare/v36.0.8...v36.0.9) --- updated-dependencies: - dependency-name: tj-actions/changed-files dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/lint.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 9dfc59ba9b0..b5a60b25e06 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -37,7 +37,7 @@ jobs: - name: Rust files id: changed-files-rust - uses: tj-actions/changed-files@v36.0.8 + uses: tj-actions/changed-files@v36.0.9 with: files: | **/*.rs @@ -49,7 +49,7 @@ jobs: - name: Workflow files id: changed-files-workflows - uses: tj-actions/changed-files@v36.0.8 + uses: tj-actions/changed-files@v36.0.9 with: files: | .github/workflows/*.yml From af4d53122fc93f72a9b44da74acab7e362a0f42b Mon Sep 17 00:00:00 2001 From: Deirdre Connolly Date: Tue, 30 May 2023 22:38:56 -0400 Subject: [PATCH 035/265] Replace update_chain_state_with ref with update_chain_tip_with in rustdoc (#6781) * Replace update_chain_state_with ref with update_chain_tip_with in rustdoc Noted in the audit * Update zebra-state/src/request.rs Co-authored-by: teor * Add fully qualified rustdoc link * Use correct Chain path * Use a method that's actually on the struct (not in a private trait impl) --------- Co-authored-by: teor --- zebra-state/src/request.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/zebra-state/src/request.rs b/zebra-state/src/request.rs index 6236eb249a8..5d5b2540ff2 100644 --- a/zebra-state/src/request.rs +++ b/zebra-state/src/request.rs @@ -318,7 +318,7 @@ impl ContextuallyValidBlock { /// including UTXOs created by earlier transactions in this block. /// /// Note: a [`ContextuallyValidBlock`] isn't actually contextually valid until - /// `Chain::update_chain_state_with` returns success. + /// [`Chain::push()`](crate::service::non_finalized_state::Chain::push) returns success. pub fn with_block_and_spent_utxos( prepared: PreparedBlock, mut spent_outputs: HashMap, From 6eaf83b4bf122b14b48a8d3501d316f40da09535 Mon Sep 17 00:00:00 2001 From: teor Date: Thu, 1 Jun 2023 05:04:15 +1000 Subject: [PATCH 036/265] fix(security): Randomly drop connections when inbound service is overloaded (#6790) * fix(security): Randomly drop connections when inbound service is overloaded * Uses progressively higher drop probabilities * Replaces Error::Overloaded with Fatal when internal services shutdown * Applies suggestions from code review. * Quickens initial drop probability decay and updates comment * Applies suggestions from code review. * Fixes drop connection probablity calc * Update connection state metrics for different overload/error outcomes * Split overload handler into separate methods * Add unit test for drop probability function properties * Add respond_error methods to zebra-test to help with type resolution * Initial test that Overloaded errors cause some continues and some closes * Tune the number of test runs and test timing * Fix doctests and replace some confusing example requests --------- Co-authored-by: arya2 --- zebra-network/src/constants.rs | 19 ++ zebra-network/src/peer/connection.rs | 125 +++++++-- .../src/peer/connection/tests/vectors.rs | 239 +++++++++++++++++- zebra-network/src/peer/error.rs | 5 + zebra-test/src/mock_service.rs | 148 ++++++++++- 5 files changed, 498 insertions(+), 38 deletions(-) diff --git a/zebra-network/src/constants.rs b/zebra-network/src/constants.rs index c6dfc0a3de0..7b7f51b5fa7 100644 --- a/zebra-network/src/constants.rs +++ b/zebra-network/src/constants.rs @@ -316,6 +316,25 @@ pub const EWMA_DECAY_TIME_NANOS: f64 = 200.0 * NANOS_PER_SECOND; /// The number of nanoseconds in one second. const NANOS_PER_SECOND: f64 = 1_000_000_000.0; +/// The duration it takes for the drop probability of an overloaded connection to +/// reach [`MIN_OVERLOAD_DROP_PROBABILITY`]. +/// +/// Peer connections that receive multiple overloads have a higher probability of being dropped. +/// +/// The probability of a connection being dropped gradually decreases during this interval +/// until it reaches the default drop probability ([`MIN_OVERLOAD_DROP_PROBABILITY`]). +/// +/// Increasing this number increases the rate at which connections are dropped. +pub const OVERLOAD_PROTECTION_INTERVAL: Duration = MIN_INBOUND_PEER_CONNECTION_INTERVAL; + +/// The minimum probability of dropping a peer connection when it receives an +/// [`Overloaded`](crate::PeerError::Overloaded) error. +pub const MIN_OVERLOAD_DROP_PROBABILITY: f32 = 0.05; + +/// The maximum probability of dropping a peer connection when it receives an +/// [`Overloaded`](crate::PeerError::Overloaded) error. +pub const MAX_OVERLOAD_DROP_PROBABILITY: f32 = 0.95; + lazy_static! { /// The minimum network protocol version accepted by this crate for each network, /// represented as a network upgrade. diff --git a/zebra-network/src/peer/connection.rs b/zebra-network/src/peer/connection.rs index 20429cc7353..568076b0a03 100644 --- a/zebra-network/src/peer/connection.rs +++ b/zebra-network/src/peer/connection.rs @@ -7,15 +7,16 @@ //! And it's unclear if these assumptions match the `zcashd` implementation. //! It should be refactored into a cleaner set of request/response pairs (#1515). -use std::{borrow::Cow, collections::HashSet, fmt, pin::Pin, sync::Arc}; +use std::{borrow::Cow, collections::HashSet, fmt, pin::Pin, sync::Arc, time::Instant}; use futures::{ future::{self, Either}, prelude::*, stream::Stream, }; +use rand::{thread_rng, Rng}; use tokio::time::{sleep, Sleep}; -use tower::Service; +use tower::{load_shed::error::Overloaded, Service, ServiceExt}; use tracing_futures::Instrument; use zebra_chain::{ @@ -25,7 +26,10 @@ use zebra_chain::{ }; use crate::{ - constants, + constants::{ + self, MAX_OVERLOAD_DROP_PROBABILITY, MIN_OVERLOAD_DROP_PROBABILITY, + OVERLOAD_PROTECTION_INTERVAL, + }, meta_addr::MetaAddr, peer::{ connection::peer_tx::PeerTx, error::AlreadyErrored, ClientRequest, ClientRequestReceiver, @@ -508,6 +512,11 @@ pub struct Connection { /// The state for this peer, when the metrics were last updated. pub(super) last_metrics_state: Option>, + + /// The time of the last overload error response from the inbound + /// service to a request from this connection, + /// or None if this connection hasn't yet received an overload error. + last_overload_time: Option, } impl fmt::Debug for Connection { @@ -549,6 +558,7 @@ impl Connection { connection_tracker, metrics_label, last_metrics_state: None, + last_overload_time: None, } } } @@ -1242,7 +1252,6 @@ where /// of connected peers. async fn drive_peer_request(&mut self, req: Request) { trace!(?req); - use tower::{load_shed::error::Overloaded, ServiceExt}; // Add a metric for inbound requests metrics::counter!( @@ -1258,29 +1267,18 @@ where tokio::task::yield_now().await; if self.svc.ready().await.is_err() { - // Treat all service readiness errors as Overloaded - // TODO: treat `TryRecvError::Closed` in `Inbound::poll_ready` as a fatal error (#1655) - self.fail_with(PeerError::Overloaded); + self.fail_with(PeerError::ServiceShutdown); return; } let rsp = match self.svc.call(req.clone()).await { Err(e) => { if e.is::() { - tracing::info!( - remote_user_agent = ?self.connection_info.remote.user_agent, - negotiated_version = ?self.connection_info.negotiated_version, - peer = ?self.metrics_label, - last_peer_state = ?self.last_metrics_state, - // TODO: remove this detailed debug info once #6506 is fixed - remote_height = ?self.connection_info.remote.start_height, - cached_addrs = ?self.cached_addrs.len(), - connection_state = ?self.state, - "inbound service is overloaded, closing connection", - ); + tracing::debug!("inbound service is overloaded, may close connection"); - metrics::counter!("pool.closed.loadshed", 1); - self.fail_with(PeerError::Overloaded); + let now = Instant::now(); + + self.handle_inbound_overload(req, now).await; } else { // We could send a reject to the remote peer, but that might cause // them to disconnect, and we might be using them to sync blocks. @@ -1292,7 +1290,9 @@ where client_receiver = ?self.client_rx, "error processing peer request", ); + self.update_state_metrics(format!("In::Req::{}/Rsp::Error", req.command())); } + return; } Ok(rsp) => rsp, @@ -1307,6 +1307,7 @@ where ); self.update_state_metrics(format!("In::Rsp::{}", rsp.command())); + // TODO: split response handler into its own method match rsp.clone() { Response::Nil => { /* generic success, do nothing */ } Response::Peers(addrs) => { @@ -1412,6 +1413,90 @@ where // before checking the connection for the next inbound or outbound request. tokio::task::yield_now().await; } + + /// Handle inbound service overload error responses by randomly terminating some connections. + /// + /// # Security + /// + /// When the inbound service is overloaded with requests, Zebra needs to drop some connections, + /// to reduce the load on the application. But dropping every connection that receives an + /// `Overloaded` error from the inbound service could cause Zebra to drop too many peer + /// connections, and stop itself downloading blocks or transactions. + /// + /// Malicious or misbehaving peers can also overload the inbound service, and make Zebra drop + /// its connections to other peers. + /// + /// So instead, Zebra drops some overloaded connections at random. If a connection has recently + /// overloaded the inbound service, it is more likely to be dropped. This makes it harder for a + /// single peer (or multiple peers) to perform a denial of service attack. + /// + /// The inbound connection rate-limit also makes it hard for multiple peers to perform this + /// attack, because each inbound connection can only send one inbound request before its + /// probability of being disconnected increases. + async fn handle_inbound_overload(&mut self, req: Request, now: Instant) { + let prev = self.last_overload_time.replace(now); + let drop_connection_probability = overload_drop_connection_probability(now, prev); + + if thread_rng().gen::() < drop_connection_probability { + metrics::counter!("pool.closed.loadshed", 1); + + tracing::info!( + drop_connection_probability, + remote_user_agent = ?self.connection_info.remote.user_agent, + negotiated_version = ?self.connection_info.negotiated_version, + peer = ?self.metrics_label, + last_peer_state = ?self.last_metrics_state, + // TODO: remove this detailed debug info once #6506 is fixed + remote_height = ?self.connection_info.remote.start_height, + cached_addrs = ?self.cached_addrs.len(), + connection_state = ?self.state, + "inbound service is overloaded, closing connection", + ); + + self.update_state_metrics(format!("In::Req::{}/Rsp::Overload::Error", req.command())); + self.fail_with(PeerError::Overloaded); + } else { + self.update_state_metrics(format!("In::Req::{}/Rsp::Overload::Ignored", req.command())); + metrics::counter!("pool.ignored.loadshed", 1); + } + } +} + +/// Returns the probability of dropping a connection where the last overload was at `prev`, +/// and the current overload is `now`. +/// +/// # Security +/// +/// Connections that haven't seen an overload error in the past OVERLOAD_PROTECTION_INTERVAL +/// have a small chance of being closed (MIN_OVERLOAD_DROP_PROBABILITY). +/// +/// Connections that have seen a previous overload error in that time +/// have a higher chance of being dropped up to MAX_OVERLOAD_DROP_PROBABILITY. +/// This probability increases quadratically, so peers that send lots of inbound +/// requests are more likely to be dropped. +/// +/// ## Examples +/// +/// If a connection sends multiple overloads close together, it is very likely to be +/// disconnected. If a connection has two overloads multiple seconds apart, it is unlikely +/// to be disconnected. +fn overload_drop_connection_probability(now: Instant, prev: Option) -> f32 { + let Some(prev) = prev else { + return MIN_OVERLOAD_DROP_PROBABILITY; + }; + + let protection_fraction_since_last_overload = + (now - prev).as_secs_f32() / OVERLOAD_PROTECTION_INTERVAL.as_secs_f32(); + + // Quadratically increase the disconnection probability for very recent overloads. + // Negative values are ignored by clamping to MIN_OVERLOAD_DROP_PROBABILITY. + let overload_fraction = protection_fraction_since_last_overload.powi(2); + + let probability_range = MAX_OVERLOAD_DROP_PROBABILITY - MIN_OVERLOAD_DROP_PROBABILITY; + let raw_drop_probability = + MAX_OVERLOAD_DROP_PROBABILITY - (overload_fraction * probability_range); + + raw_drop_probability.clamp(MIN_OVERLOAD_DROP_PROBABILITY, MAX_OVERLOAD_DROP_PROBABILITY) } impl Connection { diff --git a/zebra-network/src/peer/connection/tests/vectors.rs b/zebra-network/src/peer/connection/tests/vectors.rs index 85ac7c854d1..cca8c8b2064 100644 --- a/zebra-network/src/peer/connection/tests/vectors.rs +++ b/zebra-network/src/peer/connection/tests/vectors.rs @@ -4,22 +4,27 @@ //! - inbound message as request //! - inbound message, but not a request (or a response) -use std::{collections::HashSet, task::Poll, time::Duration}; +use std::{ + collections::HashSet, + task::Poll, + time::{Duration, Instant}, +}; use futures::{ channel::{mpsc, oneshot}, sink::SinkMapErr, - FutureExt, StreamExt, + FutureExt, SinkExt, StreamExt, }; - +use tower::load_shed::error::Overloaded; use tracing::Span; + use zebra_chain::serialization::SerializationError; use zebra_test::mock_service::{MockService, PanicAssertion}; use crate::{ - constants::REQUEST_TIMEOUT, + constants::{MAX_OVERLOAD_DROP_PROBABILITY, MIN_OVERLOAD_DROP_PROBABILITY, REQUEST_TIMEOUT}, peer::{ - connection::{Connection, State}, + connection::{overload_drop_connection_probability, Connection, State}, ClientRequest, ErrorSlot, }, protocol::external::Message, @@ -656,6 +661,230 @@ async fn connection_run_loop_receive_timeout() { assert_eq!(outbound_message, None); } +/// Check basic properties of overload probabilities +#[test] +fn overload_probability_reduces_over_time() { + let now = Instant::now(); + + // Edge case: previous is in the future due to OS monotonic clock bugs + let prev = now + Duration::from_secs(1); + assert_eq!( + overload_drop_connection_probability(now, Some(prev)), + MAX_OVERLOAD_DROP_PROBABILITY, + "if the overload time is in the future (OS bugs?), it should have maximum drop probability", + ); + + // Overload/DoS case/edge case: rapidly repeated overloads + let prev = now; + assert_eq!( + overload_drop_connection_probability(now, Some(prev)), + MAX_OVERLOAD_DROP_PROBABILITY, + "if the overload times are the same, overloads should have maximum drop probability", + ); + + // Overload/DoS case: rapidly repeated overloads + let prev = now - Duration::from_micros(1); + let drop_probability = overload_drop_connection_probability(now, Some(prev)); + assert!( + drop_probability <= MAX_OVERLOAD_DROP_PROBABILITY, + "if the overloads are very close together, drops can optionally decrease", + ); + assert!( + MAX_OVERLOAD_DROP_PROBABILITY - drop_probability < 0.001, + "if the overloads are very close together, drops can only decrease slightly", + ); + let last_probability = drop_probability; + + // Overload/DoS case: rapidly repeated overloads + let prev = now - Duration::from_millis(1); + let drop_probability = overload_drop_connection_probability(now, Some(prev)); + assert!( + drop_probability < last_probability, + "if the overloads decrease, drops should decrease", + ); + assert!( + MAX_OVERLOAD_DROP_PROBABILITY - drop_probability < 0.001, + "if the overloads are very close together, drops can only decrease slightly", + ); + let last_probability = drop_probability; + + // Overload/DoS case: rapidly repeated overloads + let prev = now - Duration::from_millis(10); + let drop_probability = overload_drop_connection_probability(now, Some(prev)); + assert!( + drop_probability < last_probability, + "if the overloads decrease, drops should decrease", + ); + assert!( + MAX_OVERLOAD_DROP_PROBABILITY - drop_probability < 0.001, + "if the overloads are very close together, drops can only decrease slightly", + ); + let last_probability = drop_probability; + + // Overload case: frequent overloads + let prev = now - Duration::from_millis(100); + let drop_probability = overload_drop_connection_probability(now, Some(prev)); + assert!( + drop_probability < last_probability, + "if the overloads decrease, drops should decrease", + ); + assert!( + MAX_OVERLOAD_DROP_PROBABILITY - drop_probability < 0.01, + "if the overloads are very close together, drops can only decrease slightly", + ); + let last_probability = drop_probability; + + // Overload case: occasional but repeated overloads + let prev = now - Duration::from_secs(1); + let drop_probability = overload_drop_connection_probability(now, Some(prev)); + assert!( + drop_probability < last_probability, + "if the overloads decrease, drops should decrease", + ); + assert!( + MAX_OVERLOAD_DROP_PROBABILITY - drop_probability > 0.5, + "if the overloads are distant, drops should decrease a lot", + ); + let last_probability = drop_probability; + + // Overload case: occasional overloads + let prev = now - Duration::from_secs(5); + let drop_probability = overload_drop_connection_probability(now, Some(prev)); + assert!( + drop_probability < last_probability, + "if the overloads decrease, drops should decrease", + ); + assert!( + MAX_OVERLOAD_DROP_PROBABILITY - drop_probability > 0.7, + "if the overloads are distant, drops should decrease a lot", + ); + let _last_probability = drop_probability; + + // Base case: infrequent overloads + let prev = now - Duration::from_secs(10); + let drop_probability = overload_drop_connection_probability(now, Some(prev)); + assert_eq!( + drop_probability, MIN_OVERLOAD_DROP_PROBABILITY, + "if overloads are far apart, drops should have minimum drop probability", + ); + + // Base case: no previous overload + let drop_probability = overload_drop_connection_probability(now, None); + assert_eq!( + drop_probability, MIN_OVERLOAD_DROP_PROBABILITY, + "if there is no previous overload time, overloads should have minimum drop probability", + ); +} + +/// Test that connections are randomly terminated in response to `Overloaded` errors. +/// +/// TODO: do a similar test on the real service stack created in the `start` command. +#[tokio::test(flavor = "multi_thread")] +async fn connection_is_randomly_disconnected_on_overload() { + let _init_guard = zebra_test::init(); + + // The number of times we repeat the test + const TEST_RUNS: usize = 220; + // The expected number of tests before a test failure due to random chance. + // Based on 10 tests per PR, 100 PR pushes per week, 50 weeks per year. + const TESTS_BEFORE_FAILURE: f32 = 50_000.0; + + let test_runs = TEST_RUNS.try_into().expect("constant fits in i32"); + // The probability of random test failure is: + // MIN_OVERLOAD_DROP_PROBABILITY^TEST_RUNS + MAX_OVERLOAD_DROP_PROBABILITY^TEST_RUNS + assert!( + 1.0 / MIN_OVERLOAD_DROP_PROBABILITY.powi(test_runs) > TESTS_BEFORE_FAILURE, + "not enough test runs: failures must be frequent enough to happen in almost all tests" + ); + assert!( + 1.0 / MAX_OVERLOAD_DROP_PROBABILITY.powi(test_runs) > TESTS_BEFORE_FAILURE, + "not enough test runs: successes must be frequent enough to happen in almost all tests" + ); + + let mut connection_continues = 0; + let mut connection_closes = 0; + + for _ in 0..TEST_RUNS { + // The real stream and sink are from a split TCP connection, + // but that doesn't change how the state machine behaves. + let (mut peer_tx, peer_rx) = mpsc::channel(1); + + let ( + connection, + _client_tx, + mut inbound_service, + mut peer_outbound_messages, + shared_error_slot, + ) = new_test_connection(); + + // The connection hasn't run so it must not have errors + let error = shared_error_slot.try_get_error(); + assert!( + error.is_none(), + "unexpected error before starting the connection event loop: {error:?}", + ); + + // Start the connection run loop future in a spawned task + let connection_handle = tokio::spawn(connection.run(peer_rx)); + tokio::time::sleep(Duration::from_millis(1)).await; + + // The connection hasn't received any messages, so it must not have errors + let error = shared_error_slot.try_get_error(); + assert!( + error.is_none(), + "unexpected error before sending messages to the connection event loop: {error:?}", + ); + + // Simulate an overloaded connection error in response to an inbound request. + let inbound_req = Message::GetAddr; + peer_tx + .send(Ok(inbound_req)) + .await + .expect("send to channel always succeeds"); + tokio::time::sleep(Duration::from_millis(1)).await; + + // The connection hasn't got a response, so it must not have errors + let error = shared_error_slot.try_get_error(); + assert!( + error.is_none(), + "unexpected error before sending responses to the connection event loop: {error:?}", + ); + + inbound_service + .expect_request(Request::Peers) + .await + .respond_error(Overloaded::new().into()); + tokio::time::sleep(Duration::from_millis(1)).await; + + let outbound_result = peer_outbound_messages.try_next(); + assert!( + !matches!(outbound_result, Ok(Some(_))), + "unexpected outbound message after Overloaded error:\n\ + {outbound_result:?}\n\ + note: TryRecvErr means there are no messages, Ok(None) means the channel is closed" + ); + + let error = shared_error_slot.try_get_error(); + if error.is_some() { + connection_closes += 1; + } else { + connection_continues += 1; + } + + // We need to terminate the spawned task + connection_handle.abort(); + } + + assert!( + connection_closes > 0, + "some overloaded connections must be closed at random" + ); + assert!( + connection_continues > 0, + "some overloaded errors must be ignored at random" + ); +} + /// Creates a new [`Connection`] instance for unit tests. fn new_test_connection() -> ( Connection< diff --git a/zebra-network/src/peer/error.rs b/zebra-network/src/peer/error.rs index 0180c377d6b..4d842ba5cc9 100644 --- a/zebra-network/src/peer/error.rs +++ b/zebra-network/src/peer/error.rs @@ -82,6 +82,10 @@ pub enum PeerError { #[error("Internal services over capacity")] Overloaded, + /// This node's internal services are no longer able to service requests. + #[error("Internal services have failed or shutdown")] + ServiceShutdown, + /// We requested data, but the peer replied with a `notfound` message. /// (Or it didn't respond before the request finished.) /// @@ -138,6 +142,7 @@ impl PeerError { PeerError::Serialization(inner) => format!("Serialization({inner})").into(), PeerError::DuplicateHandshake => "DuplicateHandshake".into(), PeerError::Overloaded => "Overloaded".into(), + PeerError::ServiceShutdown => "ServiceShutdown".into(), PeerError::NotFoundResponse(_) => "NotFoundResponse".into(), PeerError::NotFoundRegistry(_) => "NotFoundRegistry".into(), } diff --git a/zebra-test/src/mock_service.rs b/zebra-test/src/mock_service.rs index 21debf97c13..d92e6f8b4ba 100644 --- a/zebra-test/src/mock_service.rs +++ b/zebra-test/src/mock_service.rs @@ -740,7 +740,10 @@ impl ResponseSender { /// This method takes ownership of the [`ResponseSender`] so that only one response can be /// sent. /// - /// If `respond` or `respond_with` are not called, the caller will panic. + /// # Panics + /// + /// If one of the `respond*` methods isn't called, the [`MockService`] might panic with a + /// timeout error. /// /// # Example /// @@ -748,6 +751,9 @@ impl ResponseSender { /// # use zebra_test::mock_service::MockService; /// # use tower::{Service, ServiceExt}; /// # + /// # #[derive(Debug, PartialEq, Eq)] + /// # struct Request; + /// # /// # let reactor = tokio::runtime::Builder::new_current_thread() /// # .enable_all() /// # .build() @@ -760,19 +766,19 @@ impl ResponseSender { /// /// # let mut service = mock_service.clone(); /// # let task = tokio::spawn(async move { - /// # let first_call_result = (&mut service).oneshot(1).await; - /// # let second_call_result = service.oneshot(1).await; + /// # let first_call_result = (&mut service).oneshot(Request).await; + /// # let second_call_result = service.oneshot(Request).await; /// # /// # (first_call_result, second_call_result) /// # }); /// # /// mock_service - /// .expect_request(1) + /// .expect_request(Request) /// .await - /// .respond("Received one".to_owned()); + /// .respond("Received Request".to_owned()); /// /// mock_service - /// .expect_request(1) + /// .expect_request(Request) /// .await /// .respond(Err("Duplicate request")); /// # }); @@ -789,7 +795,10 @@ impl ResponseSender { /// This method takes ownership of the [`ResponseSender`] so that only one response can be /// sent. /// - /// If `respond` or `respond_with` are not called, the caller will panic. + /// # Panics + /// + /// If one of the `respond*` methods isn't called, the [`MockService`] might panic with a + /// timeout error. /// /// # Example /// @@ -797,6 +806,9 @@ impl ResponseSender { /// # use zebra_test::mock_service::MockService; /// # use tower::{Service, ServiceExt}; /// # + /// # #[derive(Debug, PartialEq, Eq)] + /// # struct Request; + /// # /// # let reactor = tokio::runtime::Builder::new_current_thread() /// # .enable_all() /// # .build() @@ -809,21 +821,21 @@ impl ResponseSender { /// /// # let mut service = mock_service.clone(); /// # let task = tokio::spawn(async move { - /// # let first_call_result = (&mut service).oneshot(1).await; - /// # let second_call_result = service.oneshot(1).await; + /// # let first_call_result = (&mut service).oneshot(Request).await; + /// # let second_call_result = service.oneshot(Request).await; /// # /// # (first_call_result, second_call_result) /// # }); /// # /// mock_service - /// .expect_request(1) + /// .expect_request(Request) /// .await - /// .respond_with(|req| format!("Received: {}", req)); + /// .respond_with(|req| format!("Received: {req:?}")); /// /// mock_service - /// .expect_request(1) + /// .expect_request(Request) /// .await - /// .respond_with(|req| Err(format!("Duplicate request: {}", req))); + /// .respond_with(|req| Err(format!("Duplicate request: {req:?}"))); /// # }); /// ``` pub fn respond_with(self, response_fn: F) @@ -834,6 +846,116 @@ impl ResponseSender { let response_result = response_fn(self.request()).into_result(); let _ = self.response_sender.send(response_result); } + + /// Respond to the request using a fixed error value. + /// + /// The `error` must be the `Error` type. This helps avoid type resolution issues in the + /// compiler. + /// + /// This method takes ownership of the [`ResponseSender`] so that only one response can be + /// sent. + /// + /// # Panics + /// + /// If one of the `respond*` methods isn't called, the [`MockService`] might panic with a + /// timeout error. + /// + /// # Example + /// + /// ``` + /// # use zebra_test::mock_service::MockService; + /// # use tower::{Service, ServiceExt}; + /// # + /// # #[derive(Debug, PartialEq, Eq)] + /// # struct Request; + /// # struct Response; + /// # + /// # let reactor = tokio::runtime::Builder::new_current_thread() + /// # .enable_all() + /// # .build() + /// # .expect("Failed to build Tokio runtime"); + /// # + /// # reactor.block_on(async { + /// // Mock a service with a `String` as the service `Error` type. + /// let mut mock_service: MockService = + /// MockService::build().for_unit_tests(); + /// + /// # let mut service = mock_service.clone(); + /// # let task = tokio::spawn(async move { + /// # let first_call_result = (&mut service).oneshot(Request).await; + /// # let second_call_result = service.oneshot(Request).await; + /// # + /// # (first_call_result, second_call_result) + /// # }); + /// # + /// mock_service + /// .expect_request(Request) + /// .await + /// .respond_error("Duplicate request".to_string()); + /// # }); + /// ``` + pub fn respond_error(self, error: Error) { + // TODO: impl ResponseResult for BoxError/Error trait when overlapping impls are + // better supported by the compiler + let _ = self.response_sender.send(Err(error)); + } + + /// Respond to the request by calculating an error from the request. + /// + /// The `error` must be the `Error` type. This helps avoid type resolution issues in the + /// compiler. + /// + /// This method takes ownership of the [`ResponseSender`] so that only one response can be + /// sent. + /// + /// # Panics + /// + /// If one of the `respond*` methods isn't called, the [`MockService`] might panic with a + /// timeout error. + /// + /// # Example + /// + /// ``` + /// # use zebra_test::mock_service::MockService; + /// # use tower::{Service, ServiceExt}; + /// # + /// # #[derive(Debug, PartialEq, Eq)] + /// # struct Request; + /// # struct Response; + /// # + /// # let reactor = tokio::runtime::Builder::new_current_thread() + /// # .enable_all() + /// # .build() + /// # .expect("Failed to build Tokio runtime"); + /// # + /// # reactor.block_on(async { + /// // Mock a service with a `String` as the service `Error` type. + /// let mut mock_service: MockService = + /// MockService::build().for_unit_tests(); + /// + /// # let mut service = mock_service.clone(); + /// # let task = tokio::spawn(async move { + /// # let first_call_result = (&mut service).oneshot(Request).await; + /// # let second_call_result = service.oneshot(Request).await; + /// # + /// # (first_call_result, second_call_result) + /// # }); + /// # + /// mock_service + /// .expect_request(Request) + /// .await + /// .respond_with_error(|req| format!("Duplicate request: {req:?}")); + /// # }); + /// ``` + pub fn respond_with_error(self, response_fn: F) + where + F: FnOnce(&Request) -> Error, + { + // TODO: impl ResponseResult for BoxError/Error trait when overlapping impls are + // better supported by the compiler + let response_result = Err(response_fn(self.request())); + let _ = self.response_sender.send(response_result); + } } /// A representation of an assertion type. From 618d3fcca0f27a38ef0d7ce1c1d8e0436cc0117b Mon Sep 17 00:00:00 2001 From: teor Date: Thu, 1 Jun 2023 06:55:59 +1000 Subject: [PATCH 037/265] Downgrade notfound logs to debug (#6795) --- zebra-network/src/peer/connection.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/zebra-network/src/peer/connection.rs b/zebra-network/src/peer/connection.rs index 568076b0a03..5d8f83039e6 100644 --- a/zebra-network/src/peer/connection.rs +++ b/zebra-network/src/peer/connection.rs @@ -230,11 +230,11 @@ impl Handler { if missing_transaction_ids != pending_ids { trace!(?missing_invs, ?missing_transaction_ids, ?pending_ids); // if these errors are noisy, we should replace them with debugs - info!("unexpected notfound message from peer: all remaining transaction hashes should be listed in the notfound. Using partial received transactions as the peer response"); + debug!("unexpected notfound message from peer: all remaining transaction hashes should be listed in the notfound. Using partial received transactions as the peer response"); } if missing_transaction_ids.len() != missing_invs.len() { trace!(?missing_invs, ?missing_transaction_ids, ?pending_ids); - info!("unexpected notfound message from peer: notfound contains duplicate hashes or non-transaction hashes. Using partial received transactions as the peer response"); + debug!("unexpected notfound message from peer: notfound contains duplicate hashes or non-transaction hashes. Using partial received transactions as the peer response"); } if transactions.is_empty() { @@ -334,11 +334,11 @@ impl Handler { if missing_blocks != pending_hashes { trace!(?missing_invs, ?missing_blocks, ?pending_hashes); // if these errors are noisy, we should replace them with debugs - info!("unexpected notfound message from peer: all remaining block hashes should be listed in the notfound. Using partial received blocks as the peer response"); + debug!("unexpected notfound message from peer: all remaining block hashes should be listed in the notfound. Using partial received blocks as the peer response"); } if missing_blocks.len() != missing_invs.len() { trace!(?missing_invs, ?missing_blocks, ?pending_hashes); - info!("unexpected notfound message from peer: notfound contains duplicate hashes or non-block hashes. Using partial received blocks as the peer response"); + debug!("unexpected notfound message from peer: notfound contains duplicate hashes or non-block hashes. Using partial received blocks as the peer response"); } if blocks.is_empty() { From eb07bb31d6ec051549d8a6a51a3d1d719dfe9094 Mon Sep 17 00:00:00 2001 From: Alfredo Garcia Date: Thu, 1 Jun 2023 09:29:03 -0300 Subject: [PATCH 038/265] rename(state): Rename state verifiers and related code (#6762) * rename verifiers * rename `PreparedBlock` to `SemanticallyVerifiedBlock` * rename `CommitBlock` to `SemanticallyVerifiedBlock` * rename `FinalizedBlock` to `CheckpointVerifiedBlock` * rename `CommitFinalizedBlock` to `CommitCheckpointVerifiedBlock` * rename `FinalizedWithTrees` to `ContextuallyVerifiedBlockWithTrees` * rename `ContextuallyValidBlock` to `ContextuallyVerifiedBlock` * change some `finalized` variables or function arguments to `checkpoint_verified` * fix docs * document the difference between `CheckpointVerifiedBlock` and `ContextuallyVerifiedBlock` * fix doc links * apply suggestions to request Co-authored-by: Marek * apply suggestions to service Co-authored-by: Marek * apply suggestions to finalized_state.rs and write.rs Co-authored-by: Marek * fmt * change some more variable names * change a few missing generics * fix checkpoint log issue * rename more `prepared` vars `semantically_verified` * fix test regex * fix test regex 2 --------- Co-authored-by: Marek --- .../src/dev/diagrams/service-dependencies.svg | 26 ++--- book/src/dev/overview.md | 10 +- zebra-consensus/src/block.rs | 16 +-- zebra-consensus/src/block/tests.rs | 2 +- zebra-consensus/src/checkpoint.rs | 19 ++-- zebra-consensus/src/checkpoint/tests.rs | 2 +- zebra-consensus/src/lib.rs | 4 +- zebra-consensus/src/{chain.rs => router.rs} | 54 +++++----- .../src/{chain => router}/tests.rs | 46 ++++---- zebra-consensus/src/transaction.rs | 2 +- .../src/methods/get_block_template_rpcs.rs | 46 ++++---- .../get_block_template.rs | 12 +-- .../tests/snapshot/get_block_template_rpcs.rs | 20 ++-- zebra-rpc/src/methods/tests/vectors.rs | 38 +++---- zebra-rpc/src/queue/tests/prop.rs | 2 +- zebra-rpc/src/server.rs | 10 +- zebra-rpc/src/server/tests/vectors.rs | 28 ++--- zebra-state/src/arbitrary.rs | 58 +++++----- zebra-state/src/error.rs | 8 +- zebra-state/src/lib.rs | 9 +- zebra-state/src/request.rs | 102 ++++++++++-------- zebra-state/src/response.rs | 2 +- zebra-state/src/service.rs | 84 ++++++++------- zebra-state/src/service/arbitrary.rs | 10 +- zebra-state/src/service/chain_tip.rs | 15 +-- zebra-state/src/service/check.rs | 6 +- zebra-state/src/service/check/anchors.rs | 14 +-- zebra-state/src/service/check/nullifier.rs | 6 +- .../src/service/check/tests/anchors.rs | 7 +- .../src/service/check/tests/nullifier.rs | 14 +-- zebra-state/src/service/check/tests/utxo.rs | 12 +-- zebra-state/src/service/check/utxo.rs | 8 +- zebra-state/src/service/finalized_state.rs | 33 +++--- .../src/service/finalized_state/tests/prop.rs | 14 +-- .../service/finalized_state/zebra_db/block.rs | 20 ++-- .../zebra_db/block/tests/vectors.rs | 4 +- .../service/finalized_state/zebra_db/chain.rs | 10 +- .../finalized_state/zebra_db/shielded.rs | 14 +-- .../finalized_state/zebra_db/transparent.rs | 6 +- .../src/service/non_finalized_state.rs | 20 ++-- .../src/service/non_finalized_state/chain.rs | 30 +++--- .../service/non_finalized_state/tests/prop.rs | 26 ++--- .../non_finalized_state/tests/vectors.rs | 4 +- zebra-state/src/service/queued_blocks.rs | 10 +- zebra-state/src/service/tests.rs | 12 +-- zebra-state/src/service/write.rs | 10 +- zebra-state/src/tests/setup.rs | 8 +- zebra-state/tests/basic.rs | 4 +- zebrad/src/commands/copy_state.rs | 4 +- zebrad/src/commands/start.rs | 14 +-- zebrad/src/components/inbound.rs | 10 +- .../components/inbound/tests/fake_peer_set.rs | 12 +-- .../components/inbound/tests/real_peer_set.rs | 4 +- zebrad/src/components/mempool/tests/prop.rs | 6 +- zebrad/src/components/mempool/tests/vector.rs | 12 +-- zebrad/src/components/sync/downloads.rs | 4 +- zebrad/src/components/sync/tests/vectors.rs | 70 ++++++------ zebrad/tests/acceptance.rs | 5 +- zebrad/tests/common/sync.rs | 2 +- 59 files changed, 552 insertions(+), 508 deletions(-) rename zebra-consensus/src/{chain.rs => router.rs} (91%) rename zebra-consensus/src/{chain => router}/tests.rs (84%) diff --git a/book/src/dev/diagrams/service-dependencies.svg b/book/src/dev/diagrams/service-dependencies.svg index 3ebd62bdf56..979b44846ba 100644 --- a/book/src/dev/diagrams/service-dependencies.svg +++ b/book/src/dev/diagrams/service-dependencies.svg @@ -74,15 +74,15 @@ - + -chain_verifier +router_verifier -chain_verifier +router_verifier - + -inbound->chain_verifier +inbound->router_verifier @@ -104,9 +104,9 @@ - + -rpc_server->chain_verifier +rpc_server->router_verifier @@ -116,9 +116,9 @@ checkpoint_verifier - + -chain_verifier->checkpoint_verifier +router_verifier->checkpoint_verifier @@ -128,9 +128,9 @@ block_verifier - + -chain_verifier->block_verifier +router_verifier->block_verifier @@ -146,9 +146,9 @@ syncer - + -syncer->chain_verifier +syncer->router_verifier diff --git a/book/src/dev/overview.md b/book/src/dev/overview.md index 7117f936081..b6508acfb88 100644 --- a/book/src/dev/overview.md +++ b/book/src/dev/overview.md @@ -56,18 +56,18 @@ digraph services { inbound -> state rpc_server -> state mempool -> transaction_verifier - chain_verifier -> checkpoint_verifier + router_verifier -> checkpoint_verifier inbound -> mempool rpc_server -> mempool - inbound -> chain_verifier - syncer -> chain_verifier - rpc_server -> chain_verifier [style=dotted] + inbound -> router_verifier + syncer -> router_verifier + rpc_server -> router_verifier [style=dotted] syncer -> peer_set mempool -> peer_set block_verifier -> state checkpoint_verifier -> state block_verifier -> transaction_verifier - chain_verifier -> block_verifier + router_verifier -> block_verifier rpc_server -> inbound [style=invis] // for layout of the diagram } diff --git a/zebra-consensus/src/block.rs b/zebra-consensus/src/block.rs index 80d4bea9c59..3b694ac6773 100644 --- a/zebra-consensus/src/block.rs +++ b/zebra-consensus/src/block.rs @@ -35,9 +35,9 @@ pub use request::Request; #[cfg(test)] mod tests; -/// Asynchronous block verification. +/// Asynchronous semantic block verification. #[derive(Debug)] -pub struct BlockVerifier { +pub struct SemanticBlockVerifier { /// The network to be verified. network: Network, state_service: S, @@ -100,14 +100,14 @@ impl VerifyBlockError { /// pub const MAX_BLOCK_SIGOPS: u64 = 20_000; -impl BlockVerifier +impl SemanticBlockVerifier where S: Service + Send + Clone + 'static, S::Future: Send + 'static, V: Service + Send + Clone + 'static, V::Future: Send + 'static, { - /// Creates a new BlockVerifier + /// Creates a new SemanticBlockVerifier pub fn new(network: Network, state_service: S, transaction_verifier: V) -> Self { Self { network, @@ -117,7 +117,7 @@ where } } -impl Service for BlockVerifier +impl Service for SemanticBlockVerifier where S: Service + Send + Clone + 'static, S::Future: Send + 'static, @@ -283,7 +283,7 @@ where let new_outputs = Arc::try_unwrap(known_utxos) .expect("all verification tasks using known_utxos are complete"); - let prepared_block = zs::PreparedBlock { + let prepared_block = zs::SemanticallyVerifiedBlock { block, hash, height, @@ -311,7 +311,7 @@ where .ready() .await .map_err(VerifyBlockError::Commit)? - .call(zs::Request::CommitBlock(prepared_block)) + .call(zs::Request::CommitSemanticallyVerifiedBlock(prepared_block)) .await .map_err(VerifyBlockError::Commit)? { @@ -319,7 +319,7 @@ where assert_eq!(committed_hash, hash, "state must commit correct hash"); Ok(hash) } - _ => unreachable!("wrong response for CommitBlock"), + _ => unreachable!("wrong response for CommitSemanticallyVerifiedBlock"), } } .instrument(span) diff --git a/zebra-consensus/src/block/tests.rs b/zebra-consensus/src/block/tests.rs index bad6ab40630..13e8be79cf0 100644 --- a/zebra-consensus/src/block/tests.rs +++ b/zebra-consensus/src/block/tests.rs @@ -144,7 +144,7 @@ async fn check_transcripts() -> Result<(), Report> { let transaction = transaction::Verifier::new(network, state_service.clone()); let transaction = Buffer::new(BoxService::new(transaction), 1); let block_verifier = Buffer::new( - BlockVerifier::new(network, state_service.clone(), transaction), + SemanticBlockVerifier::new(network, state_service.clone(), transaction), 1, ); diff --git a/zebra-consensus/src/checkpoint.rs b/zebra-consensus/src/checkpoint.rs index ce31ac29a77..b575d79d8b7 100644 --- a/zebra-consensus/src/checkpoint.rs +++ b/zebra-consensus/src/checkpoint.rs @@ -32,7 +32,7 @@ use zebra_chain::{ parameters::{Network, GENESIS_PREVIOUS_BLOCK_HASH}, work::equihash, }; -use zebra_state::{self as zs, FinalizedBlock}; +use zebra_state::{self as zs, CheckpointVerifiedBlock}; use crate::{ block::VerifyBlockError, @@ -59,7 +59,7 @@ pub use list::CheckpointList; #[derive(Debug)] struct QueuedBlock { /// The block, with additional precalculated data. - block: FinalizedBlock, + block: CheckpointVerifiedBlock, /// The transmitting end of the oneshot channel for this block's result. tx: oneshot::Sender>, } @@ -68,7 +68,7 @@ struct QueuedBlock { #[derive(Debug)] struct RequestBlock { /// The block, with additional precalculated data. - block: FinalizedBlock, + block: CheckpointVerifiedBlock, /// The receiving end of the oneshot channel for this block's result. rx: oneshot::Receiver>, } @@ -580,7 +580,7 @@ where /// Check that the block height, proof of work, and Merkle root are valid. /// - /// Returns a [`FinalizedBlock`] with precalculated block data. + /// Returns a [`CheckpointVerifiedBlock`] with precalculated block data. /// /// ## Security /// @@ -590,7 +590,10 @@ where /// Checking the Merkle root ensures that the block hash binds the block /// contents. To prevent malleability (CVE-2012-2459), we also need to check /// whether the transaction hashes are unique. - fn check_block(&self, block: Arc) -> Result { + fn check_block( + &self, + block: Arc, + ) -> Result { let hash = block.hash(); let height = block .coinbase_height() @@ -601,7 +604,7 @@ where crate::block::check::equihash_solution_is_valid(&block.header)?; // don't do precalculation until the block passes basic difficulty checks - let block = FinalizedBlock::with_hash(block, hash); + let block = CheckpointVerifiedBlock::with_hash(block, hash); crate::block::check::merkle_root_validity( self.network, @@ -1092,7 +1095,7 @@ where // We use a `ServiceExt::oneshot`, so that every state service // `poll_ready` has a corresponding `call`. See #1593. match state_service - .oneshot(zs::Request::CommitFinalizedBlock(req_block.block)) + .oneshot(zs::Request::CommitCheckpointVerifiedBlock(req_block.block)) .map_err(VerifyCheckpointError::CommitFinalized) .await? { @@ -1100,7 +1103,7 @@ where assert_eq!(committed_hash, hash, "state must commit correct hash"); Ok(hash) } - _ => unreachable!("wrong response for CommitFinalizedBlock"), + _ => unreachable!("wrong response for CommitCheckpointVerifiedBlock"), } }); diff --git a/zebra-consensus/src/checkpoint/tests.rs b/zebra-consensus/src/checkpoint/tests.rs index 66331310735..2dbefab1979 100644 --- a/zebra-consensus/src/checkpoint/tests.rs +++ b/zebra-consensus/src/checkpoint/tests.rs @@ -326,7 +326,7 @@ async fn continuous_blockchain( // SPANDOC: Add block directly to the state {?height} ready_state_service - .call(zebra_state::Request::CommitFinalizedBlock( + .call(zebra_state::Request::CommitCheckpointVerifiedBlock( block.clone().into(), )) .await diff --git a/zebra-consensus/src/lib.rs b/zebra-consensus/src/lib.rs index cb53cedb9aa..7b8f58f9c90 100644 --- a/zebra-consensus/src/lib.rs +++ b/zebra-consensus/src/lib.rs @@ -41,8 +41,8 @@ mod parameters; mod primitives; mod script; -pub mod chain; pub mod error; +pub mod router; pub mod transaction; pub use block::{ @@ -55,7 +55,6 @@ pub use block::{ }, Request, VerifyBlockError, MAX_BLOCK_SIGOPS, }; -pub use chain::VerifyChainError; pub use checkpoint::{ CheckpointList, VerifyCheckpointError, MAX_CHECKPOINT_BYTE_COUNT, MAX_CHECKPOINT_HEIGHT_GAP, }; @@ -63,6 +62,7 @@ pub use config::Config; pub use error::BlockError; pub use parameters::FundingStreamReceiver; pub use primitives::{ed25519, groth16, halo2, redjubjub, redpallas}; +pub use router::RouterError; /// A boxed [`std::error::Error`]. pub type BoxError = Box; diff --git a/zebra-consensus/src/chain.rs b/zebra-consensus/src/router.rs similarity index 91% rename from zebra-consensus/src/chain.rs rename to zebra-consensus/src/router.rs index 28f490cea31..28fac00c03f 100644 --- a/zebra-consensus/src/chain.rs +++ b/zebra-consensus/src/router.rs @@ -1,6 +1,6 @@ //! Top-level semantic block verification for Zebra. //! -//! Verifies blocks using the [`CheckpointVerifier`] or full [`BlockVerifier`], +//! Verifies blocks using the [`CheckpointVerifier`] or full [`SemanticBlockVerifier`], //! depending on the config and block height. //! //! # Correctness @@ -33,7 +33,7 @@ use zebra_chain::{ use zebra_state as zs; use crate::{ - block::{BlockVerifier, Request, VerifyBlockError}, + block::{Request, SemanticBlockVerifier, VerifyBlockError}, checkpoint::{CheckpointList, CheckpointVerifier, VerifyCheckpointError}, error::TransactionError, transaction, BoxError, Config, @@ -56,15 +56,15 @@ mod tests; /// memory, but missing slots can significantly slow down Zebra. const VERIFIER_BUFFER_BOUND: usize = 5; -/// The chain verifier routes requests to either the checkpoint verifier or the -/// block verifier, depending on the maximum checkpoint height. +/// The block verifier router routes requests to either the checkpoint verifier or the +/// semantic block verifier, depending on the maximum checkpoint height. /// /// # Correctness /// /// Block verification requests should be wrapped in a timeout, so that -/// out-of-order and invalid requests do not hang indefinitely. See the [`chain`](`crate::chain`) +/// out-of-order and invalid requests do not hang indefinitely. See the [`router`](`crate::router`) /// module documentation for details. -struct ChainVerifier +struct BlockVerifierRouter where S: Service + Send + Clone + 'static, S::Future: Send + 'static, @@ -84,8 +84,8 @@ where /// This height must be in the `checkpoint` verifier's checkpoint list. max_checkpoint_height: block::Height, - /// The full block verifier, used for blocks after `max_checkpoint_height`. - block: BlockVerifier, + /// The full semantic block verifier, used for blocks after `max_checkpoint_height`. + block: SemanticBlockVerifier, } /// An error while semantically verifying a block. @@ -93,41 +93,41 @@ where // One or both of these error variants are at least 140 bytes #[derive(Debug, Display, Error)] #[allow(missing_docs)] -pub enum VerifyChainError { +pub enum RouterError { /// Block could not be checkpointed Checkpoint { source: Box }, /// Block could not be full-verified Block { source: Box }, } -impl From for VerifyChainError { +impl From for RouterError { fn from(err: VerifyCheckpointError) -> Self { - VerifyChainError::Checkpoint { + RouterError::Checkpoint { source: Box::new(err), } } } -impl From for VerifyChainError { +impl From for RouterError { fn from(err: VerifyBlockError) -> Self { - VerifyChainError::Block { + RouterError::Block { source: Box::new(err), } } } -impl VerifyChainError { +impl RouterError { /// Returns `true` if this is definitely a duplicate request. /// Some duplicate requests might not be detected, and therefore return `false`. pub fn is_duplicate_request(&self) -> bool { match self { - VerifyChainError::Checkpoint { source, .. } => source.is_duplicate_request(), - VerifyChainError::Block { source, .. } => source.is_duplicate_request(), + RouterError::Checkpoint { source, .. } => source.is_duplicate_request(), + RouterError::Block { source, .. } => source.is_duplicate_request(), } } } -impl Service for ChainVerifier +impl Service for BlockVerifierRouter where S: Service + Send + Clone + 'static, S::Future: Send + 'static, @@ -138,7 +138,7 @@ where V::Future: Send + 'static, { type Response = block::Hash; - type Error = VerifyChainError; + type Error = RouterError; type Future = Pin> + Send + 'static>>; @@ -224,7 +224,7 @@ where /// /// Block and transaction verification requests should be wrapped in a timeout, /// so that out-of-order and invalid requests do not hang indefinitely. -/// See the [`chain`](`crate::chain`) module documentation for details. +/// See the [`router`](`crate::router`) module documentation for details. #[instrument(skip(state_service))] pub async fn init( config: Config, @@ -232,7 +232,7 @@ pub async fn init( mut state_service: S, debug_skip_parameter_preload: bool, ) -> ( - Buffer, Request>, + Buffer, Request>, Buffer< BoxService, transaction::Request, @@ -364,24 +364,28 @@ where zs::Response::Tip(tip) => tip, _ => unreachable!("wrong response to Request::Tip"), }; - tracing::info!(?tip, ?max_checkpoint_height, "initializing chain verifier"); + tracing::info!( + ?tip, + ?max_checkpoint_height, + "initializing block verifier router" + ); - let block = BlockVerifier::new(network, state_service.clone(), transaction.clone()); + let block = SemanticBlockVerifier::new(network, state_service.clone(), transaction.clone()); let checkpoint = CheckpointVerifier::from_checkpoint_list(list, network, tip, state_service); - let chain = ChainVerifier { + let router = BlockVerifierRouter { checkpoint, max_checkpoint_height, block, }; - let chain = Buffer::new(BoxService::new(chain), VERIFIER_BUFFER_BOUND); + let router = Buffer::new(BoxService::new(router), VERIFIER_BUFFER_BOUND); let task_handles = BackgroundTaskHandles { groth16_download_handle, state_checkpoint_verify_handle, }; - (chain, transaction, task_handles, max_checkpoint_height) + (router, transaction, task_handles, max_checkpoint_height) } /// Parses the checkpoint list for `network` and `config`. diff --git a/zebra-consensus/src/chain/tests.rs b/zebra-consensus/src/router/tests.rs similarity index 84% rename from zebra-consensus/src/chain/tests.rs rename to zebra-consensus/src/router/tests.rs index 308be754bdb..fd35b88031e 100644 --- a/zebra-consensus/src/chain/tests.rs +++ b/zebra-consensus/src/router/tests.rs @@ -66,14 +66,14 @@ async fn verifiers_from_network( + 'static, ) { let state_service = zs::init_test(network); - let (chain_verifier, _transaction_verifier, _groth16_download_handle, _max_checkpoint_height) = - crate::chain::init(Config::default(), network, state_service.clone(), true).await; + let (router_verifier, _transaction_verifier, _groth16_download_handle, _max_checkpoint_height) = + crate::router::init(Config::default(), network, state_service.clone(), true).await; // We can drop the download task handle here, because: // - if the download task fails, the tests will panic, and // - if the download task hangs, the tests will hang. - (chain_verifier, state_service) + (router_verifier, state_service) } static BLOCK_VERIFY_TRANSCRIPT_GENESIS: Lazy< @@ -165,15 +165,15 @@ async fn verify_checkpoint(config: Config) -> Result<(), Report> { // init_from_verifiers. // // Download task panics and timeouts are propagated to the tests that use Groth16 verifiers. - let (chain_verifier, _transaction_verifier, _groth16_download_handle, _max_checkpoint_height) = + let (router_verifier, _transaction_verifier, _groth16_download_handle, _max_checkpoint_height) = super::init(config.clone(), network, zs::init_test(network), true).await; // Add a timeout layer - let chain_verifier = - TimeoutLayer::new(Duration::from_secs(VERIFY_TIMEOUT_SECONDS)).layer(chain_verifier); + let router_verifier = + TimeoutLayer::new(Duration::from_secs(VERIFY_TIMEOUT_SECONDS)).layer(router_verifier); let transcript = Transcript::from(BLOCK_VERIFY_TRANSCRIPT_GENESIS.iter().cloned()); - transcript.check(chain_verifier).await.unwrap(); + transcript.check(router_verifier).await.unwrap(); Ok(()) } @@ -183,22 +183,22 @@ async fn verify_fail_no_coinbase_test() -> Result<(), Report> { verify_fail_no_coinbase().await } -/// Test that blocks with no coinbase height are rejected by the ChainVerifier +/// Test that blocks with no coinbase height are rejected by the BlockVerifierRouter /// -/// ChainVerifier uses the block height to decide between the CheckpointVerifier -/// and BlockVerifier. This is the error case, where there is no height. +/// BlockVerifierRouter uses the block height to decide between the CheckpointVerifier +/// and SemanticBlockVerifier. This is the error case, where there is no height. #[spandoc::spandoc] async fn verify_fail_no_coinbase() -> Result<(), Report> { let _init_guard = zebra_test::init(); - let (chain_verifier, state_service) = verifiers_from_network(Network::Mainnet).await; + let (router, state_service) = verifiers_from_network(Network::Mainnet).await; // Add a timeout layer - let chain_verifier = - TimeoutLayer::new(Duration::from_secs(VERIFY_TIMEOUT_SECONDS)).layer(chain_verifier); + let router_verifier = + TimeoutLayer::new(Duration::from_secs(VERIFY_TIMEOUT_SECONDS)).layer(router); let transcript = Transcript::from(NO_COINBASE_TRANSCRIPT.iter().cloned()); - transcript.check(chain_verifier).await.unwrap(); + transcript.check(router_verifier).await.unwrap(); let transcript = Transcript::from(NO_COINBASE_STATE_TRANSCRIPT.iter().cloned()); transcript.check(state_service).await.unwrap(); @@ -216,14 +216,14 @@ async fn round_trip_checkpoint_test() -> Result<(), Report> { async fn round_trip_checkpoint() -> Result<(), Report> { let _init_guard = zebra_test::init(); - let (chain_verifier, state_service) = verifiers_from_network(Network::Mainnet).await; + let (router_verifier, state_service) = verifiers_from_network(Network::Mainnet).await; // Add a timeout layer - let chain_verifier = - TimeoutLayer::new(Duration::from_secs(VERIFY_TIMEOUT_SECONDS)).layer(chain_verifier); + let router_verifier = + TimeoutLayer::new(Duration::from_secs(VERIFY_TIMEOUT_SECONDS)).layer(router_verifier); let transcript = Transcript::from(BLOCK_VERIFY_TRANSCRIPT_GENESIS.iter().cloned()); - transcript.check(chain_verifier).await.unwrap(); + transcript.check(router_verifier).await.unwrap(); let transcript = Transcript::from(STATE_VERIFY_TRANSCRIPT_GENESIS.iter().cloned()); transcript.check(state_service).await.unwrap(); @@ -241,20 +241,20 @@ async fn verify_fail_add_block_checkpoint_test() -> Result<(), Report> { async fn verify_fail_add_block_checkpoint() -> Result<(), Report> { let _init_guard = zebra_test::init(); - let (chain_verifier, state_service) = verifiers_from_network(Network::Mainnet).await; + let (router_verifier, state_service) = verifiers_from_network(Network::Mainnet).await; // Add a timeout layer - let chain_verifier = - TimeoutLayer::new(Duration::from_secs(VERIFY_TIMEOUT_SECONDS)).layer(chain_verifier); + let router_verifier = + TimeoutLayer::new(Duration::from_secs(VERIFY_TIMEOUT_SECONDS)).layer(router_verifier); let transcript = Transcript::from(BLOCK_VERIFY_TRANSCRIPT_GENESIS.iter().cloned()); - transcript.check(chain_verifier.clone()).await.unwrap(); + transcript.check(router_verifier.clone()).await.unwrap(); let transcript = Transcript::from(STATE_VERIFY_TRANSCRIPT_GENESIS.iter().cloned()); transcript.check(state_service.clone()).await.unwrap(); let transcript = Transcript::from(BLOCK_VERIFY_TRANSCRIPT_GENESIS_FAIL.iter().cloned()); - transcript.check(chain_verifier.clone()).await.unwrap(); + transcript.check(router_verifier.clone()).await.unwrap(); let transcript = Transcript::from(STATE_VERIFY_TRANSCRIPT_GENESIS.iter().cloned()); transcript.check(state_service.clone()).await.unwrap(); diff --git a/zebra-consensus/src/transaction.rs b/zebra-consensus/src/transaction.rs index 674438c23c0..90c549a3b00 100644 --- a/zebra-consensus/src/transaction.rs +++ b/zebra-consensus/src/transaction.rs @@ -58,7 +58,7 @@ const UTXO_LOOKUP_TIMEOUT: std::time::Duration = std::time::Duration::from_secs( /// # Correctness /// /// Transaction verification requests should be wrapped in a timeout, so that -/// out-of-order and invalid requests do not hang indefinitely. See the [`chain`](`crate::chain`) +/// out-of-order and invalid requests do not hang indefinitely. See the [`router`](`crate::router`) /// module documentation for details. #[derive(Debug, Clone)] pub struct Verifier { diff --git a/zebra-rpc/src/methods/get_block_template_rpcs.rs b/zebra-rpc/src/methods/get_block_template_rpcs.rs index f9c5e3f01cb..5afc977c16b 100644 --- a/zebra-rpc/src/methods/get_block_template_rpcs.rs +++ b/zebra-rpc/src/methods/get_block_template_rpcs.rs @@ -24,7 +24,7 @@ use zebra_chain::{ }; use zebra_consensus::{ funding_stream_address, funding_stream_values, height_for_first_halving, miner_subsidy, - VerifyChainError, + RouterError, }; use zebra_network::AddressBookPeers; use zebra_node_services::mempool; @@ -217,8 +217,14 @@ pub trait GetBlockTemplateRpc { } /// RPC method implementations. -pub struct GetBlockTemplateRpcImpl -where +pub struct GetBlockTemplateRpcImpl< + Mempool, + State, + Tip, + BlockVerifierRouter, + SyncStatus, + AddressBook, +> where Mempool: Service< mempool::Request, Response = mempool::Response, @@ -229,7 +235,7 @@ where Response = zebra_state::ReadResponse, Error = zebra_state::BoxError, >, - ChainVerifier: Service + BlockVerifierRouter: Service + Clone + Send + Sync @@ -267,7 +273,7 @@ where latest_chain_tip: Tip, /// The chain verifier, used for submitting blocks. - chain_verifier: ChainVerifier, + router_verifier: BlockVerifierRouter, /// The chain sync status, used for checking if Zebra is likely close to the network chain tip. sync_status: SyncStatus, @@ -276,8 +282,8 @@ where address_book: AddressBook, } -impl - GetBlockTemplateRpcImpl +impl + GetBlockTemplateRpcImpl where Mempool: Service< mempool::Request, @@ -293,7 +299,7 @@ where + Sync + 'static, Tip: ChainTip + Clone + Send + Sync + 'static, - ChainVerifier: Service + BlockVerifierRouter: Service + Clone + Send + Sync @@ -313,7 +319,7 @@ where mempool: Buffer, state: State, latest_chain_tip: Tip, - chain_verifier: ChainVerifier, + router_verifier: BlockVerifierRouter, sync_status: SyncStatus, address_book: AddressBook, ) -> Self { @@ -352,15 +358,15 @@ where mempool, state, latest_chain_tip, - chain_verifier, + router_verifier, sync_status, address_book, } } } -impl GetBlockTemplateRpc - for GetBlockTemplateRpcImpl +impl GetBlockTemplateRpc + for GetBlockTemplateRpcImpl where Mempool: Service< mempool::Request, @@ -378,12 +384,12 @@ where + 'static, >::Future: Send, Tip: ChainTip + Clone + Send + Sync + 'static, - ChainVerifier: Service + BlockVerifierRouter: Service + Clone + Send + Sync + 'static, - >::Future: Send, + >::Future: Send, SyncStatus: ChainSyncStatus + Clone + Send + Sync + 'static, AddressBook: AddressBookPeers + Clone + Send + Sync + 'static, { @@ -448,7 +454,7 @@ where .and_then(get_block_template::JsonParameters::block_proposal_data) { return validate_block_proposal( - self.chain_verifier.clone(), + self.router_verifier.clone(), block_proposal_bytes, network, latest_chain_tip, @@ -731,7 +737,7 @@ where HexData(block_bytes): HexData, _parameters: Option, ) -> BoxFuture> { - let mut chain_verifier = self.chain_verifier.clone(); + let mut router_verifier = self.router_verifier.clone(); async move { let block: Block = match block_bytes.zcash_deserialize_into() { @@ -749,7 +755,7 @@ where .unwrap_or_else(|| "invalid coinbase height".to_string()); let block_hash = block.hash(); - let chain_verifier_response = chain_verifier + let router_verifier_response = router_verifier .ready() .await .map_err(|error| Error { @@ -760,7 +766,7 @@ where .call(zebra_consensus::Request::Commit(Arc::new(block))) .await; - let chain_error = match chain_verifier_response { + let chain_error = match router_verifier_response { // Currently, this match arm returns `null` (Accepted) for blocks committed // to any chain, but Accepted is only for blocks in the best chain. // @@ -776,7 +782,7 @@ where // by downcasting from Any to VerifyChainError. Err(box_error) => { let error = box_error - .downcast::() + .downcast::() .map(|boxed_chain_error| *boxed_chain_error); tracing::info!(?error, ?block_hash, ?block_height, "submit block failed verification"); @@ -802,7 +808,7 @@ where // and return a duplicate error for the newer request immediately. // This improves the speed of the RPC response. // - // Checking the download queues and ChainVerifier buffer for duplicates + // Checking the download queues and BlockVerifierRouter buffer for duplicates // might require architectural changes to Zebra, so we should only do it // if mining pools really need it. Ok(_verify_chain_error) => submit_block::ErrorResponse::Rejected, diff --git a/zebra-rpc/src/methods/get_block_template_rpcs/get_block_template.rs b/zebra-rpc/src/methods/get_block_template_rpcs/get_block_template.rs index 8439808fe70..f18242b7cdd 100644 --- a/zebra-rpc/src/methods/get_block_template_rpcs/get_block_template.rs +++ b/zebra-rpc/src/methods/get_block_template_rpcs/get_block_template.rs @@ -97,15 +97,15 @@ pub fn check_miner_address( /// usual acceptance rules (except proof-of-work). /// /// Returns a `getblocktemplate` [`Response`]. -pub async fn validate_block_proposal( - mut chain_verifier: ChainVerifier, +pub async fn validate_block_proposal( + mut router_verifier: BlockVerifierRouter, block_proposal_bytes: Vec, network: Network, latest_chain_tip: Tip, sync_status: SyncStatus, ) -> Result where - ChainVerifier: Service + BlockVerifierRouter: Service + Clone + Send + Sync @@ -129,7 +129,7 @@ where } }; - let chain_verifier_response = chain_verifier + let router_verifier_response = router_verifier .ready() .await .map_err(|error| Error { @@ -140,12 +140,12 @@ where .call(zebra_consensus::Request::CheckProposal(Arc::new(block))) .await; - Ok(chain_verifier_response + Ok(router_verifier_response .map(|_hash| ProposalResponse::Valid) .unwrap_or_else(|verify_chain_error| { tracing::info!( ?verify_chain_error, - "error response from chain_verifier in CheckProposal request" + "error response from router_verifier in CheckProposal request" ); ProposalResponse::rejected("invalid proposal", verify_chain_error) diff --git a/zebra-rpc/src/methods/tests/snapshot/get_block_template_rpcs.rs b/zebra-rpc/src/methods/tests/snapshot/get_block_template_rpcs.rs index c827d5f0ccb..05e12874d61 100644 --- a/zebra-rpc/src/methods/tests/snapshot/get_block_template_rpcs.rs +++ b/zebra-rpc/src/methods/tests/snapshot/get_block_template_rpcs.rs @@ -85,11 +85,11 @@ pub async fn test_responses( >::Future: Send, { let ( - chain_verifier, + router_verifier, _transaction_verifier, _parameter_download_task_handle, _max_checkpoint_height, - ) = zebra_consensus::chain::init( + ) = zebra_consensus::router::init( zebra_consensus::Config::default(), network, state.clone(), @@ -145,7 +145,7 @@ pub async fn test_responses( Buffer::new(mempool.clone(), 1), read_state, mock_chain_tip.clone(), - chain_verifier.clone(), + router_verifier.clone(), mock_sync_status.clone(), mock_address_book, ); @@ -267,7 +267,7 @@ pub async fn test_responses( Buffer::new(mempool.clone(), 1), read_state.clone(), mock_chain_tip.clone(), - chain_verifier, + router_verifier, mock_sync_status.clone(), MockAddressBookPeers::default(), ); @@ -365,16 +365,16 @@ pub async fn test_responses( snapshot_rpc_getblocktemplate("invalid-proposal", get_block_template, None, &settings); - // the following snapshots use a mock read_state and chain_verifier + // the following snapshots use a mock read_state and router_verifier - let mut mock_chain_verifier = MockService::build().for_unit_tests(); + let mut mock_router_verifier = MockService::build().for_unit_tests(); let get_block_template_rpc_mock_state_verifier = GetBlockTemplateRpcImpl::new( network, mining_config, Buffer::new(mempool.clone(), 1), read_state.clone(), mock_chain_tip, - mock_chain_verifier.clone(), + mock_router_verifier.clone(), mock_sync_status, MockAddressBookPeers::default(), ); @@ -387,15 +387,15 @@ pub async fn test_responses( }), ); - let mock_chain_verifier_request_handler = async move { - mock_chain_verifier + let mock_router_verifier_request_handler = async move { + mock_router_verifier .expect_request_that(|req| matches!(req, zebra_consensus::Request::CheckProposal(_))) .await .respond(Hash::from([0; 32])); }; let (get_block_template, ..) = - tokio::join!(get_block_template_fut, mock_chain_verifier_request_handler,); + tokio::join!(get_block_template_fut, mock_router_verifier_request_handler,); let get_block_template = get_block_template.expect("unexpected error in getblocktemplate RPC call"); diff --git a/zebra-rpc/src/methods/tests/vectors.rs b/zebra-rpc/src/methods/tests/vectors.rs index ccc018b5e7f..2185a74d158 100644 --- a/zebra-rpc/src/methods/tests/vectors.rs +++ b/zebra-rpc/src/methods/tests/vectors.rs @@ -830,11 +830,11 @@ async fn rpc_getblockcount() { zebra_state::populated_state(blocks.clone(), Mainnet).await; let ( - chain_verifier, + router_verifier, _transaction_verifier, _parameter_download_task_handle, _max_checkpoint_height, - ) = zebra_consensus::chain::init( + ) = zebra_consensus::router::init( zebra_consensus::Config::default(), Mainnet, state.clone(), @@ -849,7 +849,7 @@ async fn rpc_getblockcount() { Buffer::new(mempool.clone(), 1), read_state, latest_chain_tip.clone(), - chain_verifier, + router_verifier, MockSyncStatus::default(), MockAddressBookPeers::default(), ); @@ -880,11 +880,11 @@ async fn rpc_getblockcount_empty_state() { zebra_state::init_test_services(Mainnet); let ( - chain_verifier, + router_verifier, _transaction_verifier, _parameter_download_task_handle, _max_checkpoint_height, - ) = zebra_consensus::chain::init( + ) = zebra_consensus::router::init( zebra_consensus::Config::default(), Mainnet, state.clone(), @@ -899,7 +899,7 @@ async fn rpc_getblockcount_empty_state() { Buffer::new(mempool.clone(), 1), read_state, latest_chain_tip.clone(), - chain_verifier, + router_verifier, MockSyncStatus::default(), MockAddressBookPeers::default(), ); @@ -932,11 +932,11 @@ async fn rpc_getpeerinfo() { zebra_state::init_test_services(Mainnet); let ( - chain_verifier, + router_verifier, _transaction_verifier, _parameter_download_task_handle, _max_checkpoint_height, - ) = zebra_consensus::chain::init( + ) = zebra_consensus::router::init( zebra_consensus::Config::default(), network, state.clone(), @@ -965,7 +965,7 @@ async fn rpc_getpeerinfo() { Buffer::new(mempool.clone(), 1), read_state, latest_chain_tip.clone(), - chain_verifier, + router_verifier, MockSyncStatus::default(), mock_address_book, ); @@ -1007,11 +1007,11 @@ async fn rpc_getblockhash() { zebra_state::populated_state(blocks.clone(), Mainnet).await; let ( - chain_verifier, + router_verifier, _transaction_verifier, _parameter_download_task_handle, _max_checkpoint_height, - ) = zebra_consensus::chain::init( + ) = zebra_consensus::router::init( zebra_consensus::Config::default(), Mainnet, state.clone(), @@ -1026,7 +1026,7 @@ async fn rpc_getblockhash() { Buffer::new(mempool.clone(), 1), read_state, latest_chain_tip.clone(), - tower::ServiceBuilder::new().service(chain_verifier), + tower::ServiceBuilder::new().service(router_verifier), MockSyncStatus::default(), MockAddressBookPeers::default(), ); @@ -1195,7 +1195,7 @@ async fn rpc_getblocktemplate_mining_address(use_p2pkh: bool) { let mut mempool: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); let read_state = MockService::build().for_unit_tests(); - let chain_verifier = MockService::build().for_unit_tests(); + let router_verifier = MockService::build().for_unit_tests(); let mut mock_sync_status = MockSyncStatus::default(); mock_sync_status.set_is_close_to_tip(true); @@ -1236,7 +1236,7 @@ async fn rpc_getblocktemplate_mining_address(use_p2pkh: bool) { Buffer::new(mempool.clone(), 1), read_state.clone(), mock_chain_tip, - chain_verifier, + router_verifier, mock_sync_status.clone(), MockAddressBookPeers::default(), ); @@ -1481,11 +1481,11 @@ async fn rpc_submitblock_errors() { // Init RPCs let ( - chain_verifier, + router_verifier, _transaction_verifier, _parameter_download_task_handle, _max_checkpoint_height, - ) = zebra_consensus::chain::init( + ) = zebra_consensus::router::init( zebra_consensus::Config::default(), Mainnet, state.clone(), @@ -1500,7 +1500,7 @@ async fn rpc_submitblock_errors() { Buffer::new(mempool.clone(), 1), read_state, latest_chain_tip.clone(), - chain_verifier, + router_verifier, MockSyncStatus::default(), MockAddressBookPeers::default(), ); @@ -1648,7 +1648,7 @@ async fn rpc_getdifficulty() { let mempool: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); let read_state = MockService::build().for_unit_tests(); - let chain_verifier = MockService::build().for_unit_tests(); + let router_verifier = MockService::build().for_unit_tests(); let mut mock_sync_status = MockSyncStatus::default(); mock_sync_status.set_is_close_to_tip(true); @@ -1683,7 +1683,7 @@ async fn rpc_getdifficulty() { Buffer::new(mempool.clone(), 1), read_state.clone(), mock_chain_tip, - chain_verifier, + router_verifier, mock_sync_status.clone(), MockAddressBookPeers::default(), ); diff --git a/zebra-rpc/src/queue/tests/prop.rs b/zebra-rpc/src/queue/tests/prop.rs index c250af68e90..1db9a340f2e 100644 --- a/zebra-rpc/src/queue/tests/prop.rs +++ b/zebra-rpc/src/queue/tests/prop.rs @@ -277,7 +277,7 @@ proptest! { block.transactions.push(Arc::new(transaction.clone())); // commit the created block - let request = zebra_state::Request::CommitFinalizedBlock(zebra_state::FinalizedBlock::from(Arc::new(block.clone()))); + let request = zebra_state::Request::CommitCheckpointVerifiedBlock(zebra_state::CheckpointVerifiedBlock::from(Arc::new(block.clone()))); let send_task = tokio::spawn(write_state.clone().oneshot(request.clone())); let response = zebra_state::Response::Committed(block.hash()); diff --git a/zebra-rpc/src/server.rs b/zebra-rpc/src/server.rs index 0a52a1ecabb..43e7397d534 100644 --- a/zebra-rpc/src/server.rs +++ b/zebra-rpc/src/server.rs @@ -73,7 +73,7 @@ impl RpcServer { // // TODO: put some of the configs or services in their own struct? #[allow(clippy::too_many_arguments)] - pub fn spawn( + pub fn spawn( config: Config, #[cfg(feature = "getblocktemplate-rpcs")] mining_config: get_block_template_rpcs::config::Config, @@ -84,7 +84,7 @@ impl RpcServer { mempool: Buffer, state: State, #[cfg_attr(not(feature = "getblocktemplate-rpcs"), allow(unused_variables))] - chain_verifier: ChainVerifier, + router_verifier: BlockVerifierRouter, #[cfg_attr(not(feature = "getblocktemplate-rpcs"), allow(unused_variables))] sync_status: SyncStatus, #[cfg_attr(not(feature = "getblocktemplate-rpcs"), allow(unused_variables))] @@ -110,7 +110,7 @@ impl RpcServer { + 'static, State::Future: Send, Tip: ChainTip + Clone + Send + Sync + 'static, - ChainVerifier: Service< + BlockVerifierRouter: Service< zebra_consensus::Request, Response = block::Hash, Error = zebra_consensus::BoxError, @@ -118,7 +118,7 @@ impl RpcServer { + Send + Sync + 'static, - >::Future: Send, + >::Future: Send, SyncStatus: ChainSyncStatus + Clone + Send + Sync + 'static, AddressBook: AddressBookPeers + Clone + Send + Sync + 'static, { @@ -149,7 +149,7 @@ impl RpcServer { mempool.clone(), state.clone(), latest_chain_tip.clone(), - chain_verifier, + router_verifier, sync_status, address_book, ); diff --git a/zebra-rpc/src/server/tests/vectors.rs b/zebra-rpc/src/server/tests/vectors.rs index 89fa8333ceb..6ad2594eb84 100644 --- a/zebra-rpc/src/server/tests/vectors.rs +++ b/zebra-rpc/src/server/tests/vectors.rs @@ -52,7 +52,7 @@ fn rpc_server_spawn(parallel_cpu_threads: bool) { rt.block_on(async { let mut mempool: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); let mut state: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); - let mut chain_verifier: MockService<_, _, _, BoxError> = + let mut router_verifier: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); info!("spawning RPC server..."); @@ -63,7 +63,7 @@ fn rpc_server_spawn(parallel_cpu_threads: bool) { "RPC server test", Buffer::new(mempool.clone(), 1), Buffer::new(state.clone(), 1), - Buffer::new(chain_verifier.clone(), 1), + Buffer::new(router_verifier.clone(), 1), MockSyncStatus::default(), MockAddressBookPeers::default(), NoChainTip, @@ -74,7 +74,7 @@ fn rpc_server_spawn(parallel_cpu_threads: bool) { mempool.expect_no_requests().await; state.expect_no_requests().await; - chain_verifier.expect_no_requests().await; + router_verifier.expect_no_requests().await; // The server and queue tasks should continue without errors or panics let rpc_server_task_result = rpc_server_task_handle.now_or_never(); @@ -138,7 +138,7 @@ fn rpc_server_spawn_unallocated_port(parallel_cpu_threads: bool, do_shutdown: bo rt.block_on(async { let mut mempool: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); let mut state: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); - let mut chain_verifier: MockService<_, _, _, BoxError> = + let mut router_verifier: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); info!("spawning RPC server..."); @@ -149,7 +149,7 @@ fn rpc_server_spawn_unallocated_port(parallel_cpu_threads: bool, do_shutdown: bo "RPC server test", Buffer::new(mempool.clone(), 1), Buffer::new(state.clone(), 1), - Buffer::new(chain_verifier.clone(), 1), + Buffer::new(router_verifier.clone(), 1), MockSyncStatus::default(), MockAddressBookPeers::default(), NoChainTip, @@ -160,7 +160,7 @@ fn rpc_server_spawn_unallocated_port(parallel_cpu_threads: bool, do_shutdown: bo mempool.expect_no_requests().await; state.expect_no_requests().await; - chain_verifier.expect_no_requests().await; + router_verifier.expect_no_requests().await; if do_shutdown { rpc_server @@ -217,7 +217,7 @@ fn rpc_server_spawn_port_conflict() { let test_task_handle = rt.spawn(async { let mut mempool: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); let mut state: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); - let mut chain_verifier: MockService<_, _, _, BoxError> = + let mut router_verifier: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); info!("spawning RPC server 1..."); @@ -229,7 +229,7 @@ fn rpc_server_spawn_port_conflict() { "RPC server 1 test", Buffer::new(mempool.clone(), 1), Buffer::new(state.clone(), 1), - Buffer::new(chain_verifier.clone(), 1), + Buffer::new(router_verifier.clone(), 1), MockSyncStatus::default(), MockAddressBookPeers::default(), NoChainTip, @@ -246,7 +246,7 @@ fn rpc_server_spawn_port_conflict() { "RPC server 2 conflict test", Buffer::new(mempool.clone(), 1), Buffer::new(state.clone(), 1), - Buffer::new(chain_verifier.clone(), 1), + Buffer::new(router_verifier.clone(), 1), MockSyncStatus::default(), MockAddressBookPeers::default(), NoChainTip, @@ -257,7 +257,7 @@ fn rpc_server_spawn_port_conflict() { mempool.expect_no_requests().await; state.expect_no_requests().await; - chain_verifier.expect_no_requests().await; + router_verifier.expect_no_requests().await; // Because there is a panic inside a multi-threaded executor, // we can't depend on the exact behaviour of the other tasks, @@ -325,7 +325,7 @@ fn rpc_server_spawn_port_conflict_parallel_auto() { let test_task_handle = rt.spawn(async { let mut mempool: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); let mut state: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); - let mut chain_verifier: MockService<_, _, _, BoxError> = + let mut router_verifier: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); info!("spawning parallel RPC server 1..."); @@ -337,7 +337,7 @@ fn rpc_server_spawn_port_conflict_parallel_auto() { "RPC server 1 test", Buffer::new(mempool.clone(), 1), Buffer::new(state.clone(), 1), - Buffer::new(chain_verifier.clone(), 1), + Buffer::new(router_verifier.clone(), 1), MockSyncStatus::default(), MockAddressBookPeers::default(), NoChainTip, @@ -354,7 +354,7 @@ fn rpc_server_spawn_port_conflict_parallel_auto() { "RPC server 2 conflict test", Buffer::new(mempool.clone(), 1), Buffer::new(state.clone(), 1), - Buffer::new(chain_verifier.clone(), 1), + Buffer::new(router_verifier.clone(), 1), MockSyncStatus::default(), MockAddressBookPeers::default(), NoChainTip, @@ -365,7 +365,7 @@ fn rpc_server_spawn_port_conflict_parallel_auto() { mempool.expect_no_requests().await; state.expect_no_requests().await; - chain_verifier.expect_no_requests().await; + router_verifier.expect_no_requests().await; // Because there might be a panic inside a multi-threaded executor, // we can't depend on the exact behaviour of the other tasks, diff --git a/zebra-state/src/arbitrary.rs b/zebra-state/src/arbitrary.rs index 849c047bff3..f151e2c4128 100644 --- a/zebra-state/src/arbitrary.rs +++ b/zebra-state/src/arbitrary.rs @@ -11,19 +11,19 @@ use zebra_chain::{ }; use crate::{ - request::ContextuallyValidBlock, service::chain_tip::ChainTipBlock, FinalizedBlock, - PreparedBlock, + request::ContextuallyVerifiedBlock, service::chain_tip::ChainTipBlock, CheckpointVerifiedBlock, + SemanticallyVerifiedBlock, }; /// Mocks computation done during semantic validation pub trait Prepare { /// Runs block semantic validation computation, and returns the result. /// Test-only method. - fn prepare(self) -> PreparedBlock; + fn prepare(self) -> SemanticallyVerifiedBlock; } impl Prepare for Arc { - fn prepare(self) -> PreparedBlock { + fn prepare(self) -> SemanticallyVerifiedBlock { let block = self; let hash = block.hash(); let height = block.coinbase_height().unwrap(); @@ -31,7 +31,7 @@ impl Prepare for Arc { let new_outputs = transparent::new_ordered_outputs_with_height(&block, height, &transaction_hashes); - PreparedBlock { + SemanticallyVerifiedBlock { block, hash, height, @@ -50,9 +50,9 @@ where } } -impl From for ChainTipBlock { - fn from(prepared: PreparedBlock) -> Self { - let PreparedBlock { +impl From for ChainTipBlock { + fn from(prepared: SemanticallyVerifiedBlock) -> Self { + let SemanticallyVerifiedBlock { block, hash, height, @@ -71,17 +71,17 @@ impl From for ChainTipBlock { } } -impl PreparedBlock { - /// Returns a [`ContextuallyValidBlock`] created from this block, +impl SemanticallyVerifiedBlock { + /// Returns a [`ContextuallyVerifiedBlock`] created from this block, /// with fake zero-valued spent UTXOs. /// /// Only for use in tests. #[cfg(test)] - pub fn test_with_zero_spent_utxos(&self) -> ContextuallyValidBlock { - ContextuallyValidBlock::test_with_zero_spent_utxos(self) + pub fn test_with_zero_spent_utxos(&self) -> ContextuallyVerifiedBlock { + ContextuallyVerifiedBlock::test_with_zero_spent_utxos(self) } - /// Returns a [`ContextuallyValidBlock`] created from this block, + /// Returns a [`ContextuallyVerifiedBlock`] created from this block, /// using a fake chain value pool change. /// /// Only for use in tests. @@ -89,26 +89,26 @@ impl PreparedBlock { pub fn test_with_chain_pool_change( &self, fake_chain_value_pool_change: ValueBalance, - ) -> ContextuallyValidBlock { - ContextuallyValidBlock::test_with_chain_pool_change(self, fake_chain_value_pool_change) + ) -> ContextuallyVerifiedBlock { + ContextuallyVerifiedBlock::test_with_chain_pool_change(self, fake_chain_value_pool_change) } - /// Returns a [`ContextuallyValidBlock`] created from this block, + /// Returns a [`ContextuallyVerifiedBlock`] created from this block, /// with no chain value pool change. /// /// Only for use in tests. #[cfg(test)] - pub fn test_with_zero_chain_pool_change(&self) -> ContextuallyValidBlock { - ContextuallyValidBlock::test_with_zero_chain_pool_change(self) + pub fn test_with_zero_chain_pool_change(&self) -> ContextuallyVerifiedBlock { + ContextuallyVerifiedBlock::test_with_zero_chain_pool_change(self) } } -impl ContextuallyValidBlock { +impl ContextuallyVerifiedBlock { /// Create a block that's ready for non-finalized `Chain` contextual - /// validation, using a [`PreparedBlock`] and fake zero-valued spent UTXOs. + /// validation, using a [`SemanticallyVerifiedBlock`] and fake zero-valued spent UTXOs. /// /// Only for use in tests. - pub fn test_with_zero_spent_utxos(block: impl Into) -> Self { + pub fn test_with_zero_spent_utxos(block: impl Into) -> Self { let block = block.into(); let zero_output = transparent::Output { @@ -128,19 +128,19 @@ impl ContextuallyValidBlock { .map(|outpoint| (outpoint, zero_utxo.clone())) .collect(); - ContextuallyValidBlock::with_block_and_spent_utxos(block, zero_spent_utxos) + ContextuallyVerifiedBlock::with_block_and_spent_utxos(block, zero_spent_utxos) .expect("all UTXOs are provided with zero values") } - /// Create a [`ContextuallyValidBlock`] from a [`Block`] or [`PreparedBlock`], + /// Create a [`ContextuallyVerifiedBlock`] from a [`Block`] or [`SemanticallyVerifiedBlock`], /// using a fake chain value pool change. /// /// Only for use in tests. pub fn test_with_chain_pool_change( - block: impl Into, + block: impl Into, fake_chain_value_pool_change: ValueBalance, ) -> Self { - let PreparedBlock { + let SemanticallyVerifiedBlock { block, hash, height, @@ -162,20 +162,20 @@ impl ContextuallyValidBlock { } } - /// Create a [`ContextuallyValidBlock`] from a [`Block`] or [`PreparedBlock`], + /// Create a [`ContextuallyVerifiedBlock`] from a [`Block`] or [`SemanticallyVerifiedBlock`], /// with no chain value pool change. /// /// Only for use in tests. - pub fn test_with_zero_chain_pool_change(block: impl Into) -> Self { + pub fn test_with_zero_chain_pool_change(block: impl Into) -> Self { Self::test_with_chain_pool_change(block, ValueBalance::zero()) } } -impl FinalizedBlock { +impl CheckpointVerifiedBlock { /// Create a block that's ready to be committed to the finalized state, /// using a precalculated [`block::Hash`] and [`block::Height`]. /// - /// This is a test-only method, prefer [`FinalizedBlock::with_hash`]. + /// This is a test-only method, prefer [`CheckpointVerifiedBlock::with_hash`]. #[cfg(any(test, feature = "proptest-impl"))] pub fn with_hash_and_height( block: Arc, diff --git a/zebra-state/src/error.rs b/zebra-state/src/error.rs index f75f0386810..f4cd7213e47 100644 --- a/zebra-state/src/error.rs +++ b/zebra-state/src/error.rs @@ -24,8 +24,8 @@ pub struct CloneError { source: Arc, } -impl From for CloneError { - fn from(source: CommitBlockError) -> Self { +impl From for CloneError { + fn from(source: CommitSemanticallyVerifiedError) -> Self { let source = Arc::new(source); Self { source } } @@ -41,10 +41,10 @@ impl From for CloneError { /// A boxed [`std::error::Error`]. pub type BoxError = Box; -/// An error describing the reason a block could not be committed to the state. +/// An error describing the reason a semantically verified block could not be committed to the state. #[derive(Debug, Error, PartialEq, Eq)] #[error("block is not contextually valid: {}", .0)] -pub struct CommitBlockError(#[from] ValidateContextError); +pub struct CommitSemanticallyVerifiedError(#[from] ValidateContextError); /// An error describing why a block failed contextual validation. #[derive(Debug, Error, Clone, PartialEq, Eq)] diff --git a/zebra-state/src/lib.rs b/zebra-state/src/lib.rs index 8b7dbd8ecfe..f75721c5d57 100644 --- a/zebra-state/src/lib.rs +++ b/zebra-state/src/lib.rs @@ -32,9 +32,12 @@ mod tests; pub use config::{check_and_delete_old_databases, Config}; pub use constants::MAX_BLOCK_REORG_HEIGHT; pub use error::{ - BoxError, CloneError, CommitBlockError, DuplicateNullifierError, ValidateContextError, + BoxError, CloneError, CommitSemanticallyVerifiedError, DuplicateNullifierError, + ValidateContextError, +}; +pub use request::{ + CheckpointVerifiedBlock, HashOrHeight, ReadRequest, Request, SemanticallyVerifiedBlock, }; -pub use request::{FinalizedBlock, HashOrHeight, PreparedBlock, ReadRequest, Request}; pub use response::{KnownBlock, MinedTx, ReadResponse, Response}; pub use service::{ chain_tip::{ChainTipChange, LatestChainTip, TipAction}, @@ -54,4 +57,4 @@ pub use service::{ init_test, init_test_services, ReadStateService, }; -pub(crate) use request::ContextuallyValidBlock; +pub(crate) use request::ContextuallyVerifiedBlock; diff --git a/zebra-state/src/request.rs b/zebra-state/src/request.rs index 5d5b2540ff2..6ffa7d3e04d 100644 --- a/zebra-state/src/request.rs +++ b/zebra-state/src/request.rs @@ -137,7 +137,7 @@ impl std::str::FromStr for HashOrHeight { /// the *service caller*'s task, not inside the service call itself. This allows /// moving work out of the single-threaded state service. #[derive(Clone, Debug, PartialEq, Eq)] -pub struct PreparedBlock { +pub struct SemanticallyVerifiedBlock { /// The block to commit to the state. pub block: Arc, /// The hash of the block. @@ -165,12 +165,16 @@ pub struct PreparedBlock { // Some fields are pub(crate), so we can add whatever db-format-dependent // precomputation we want here without leaking internal details. -/// A contextually validated block, ready to be committed directly to the finalized state with -/// no checks, if it becomes the root of the best non-finalized chain. +/// A contextually verified block, ready to be committed directly to the finalized state with no +/// checks, if it becomes the root of the best non-finalized chain. /// /// Used by the state service and non-finalized `Chain`. +/// +/// Note: The difference between a `CheckpointVerifiedBlock` and a `ContextuallyVerifiedBlock` is +/// that the `CheckpointVerifier` doesn't bind the transaction authorizing data to the +/// `ChainHistoryBlockTxAuthCommitmentHash`, but the `NonFinalizedState` and `FinalizedState` do. #[derive(Clone, Debug, PartialEq, Eq)] -pub struct ContextuallyValidBlock { +pub struct ContextuallyVerifiedBlock { /// The block to commit to the state. pub(crate) block: Arc, @@ -207,12 +211,16 @@ pub struct ContextuallyValidBlock { pub(crate) chain_value_pool_change: ValueBalance, } -/// A finalized block, ready to be committed directly to the finalized state with +/// A block ready to be committed directly to the finalized state with /// no checks. /// /// This is exposed for use in checkpointing. +/// +/// Note: The difference between a `CheckpointVerifiedBlock` and a `ContextuallyVerifiedBlock` is +/// that the `CheckpointVerifier` doesn't bind the transaction authorizing data to the +/// `ChainHistoryBlockTxAuthCommitmentHash`, but the `NonFinalizedState` and `FinalizedState` do. #[derive(Clone, Debug, PartialEq, Eq)] -pub struct FinalizedBlock { +pub struct CheckpointVerifiedBlock { /// The block to commit to the state. pub block: Arc, /// The hash of the block. @@ -266,42 +274,42 @@ impl Treestate { /// when committing a block. The associated treestate is passed so that the /// finalized state does not have to retrieve the previous treestate from the /// database and recompute the new one. -pub struct FinalizedWithTrees { +pub struct ContextuallyVerifiedBlockWithTrees { /// A block ready to be committed. - pub finalized: FinalizedBlock, + pub checkpoint_verified: CheckpointVerifiedBlock, /// The tresstate associated with the block. pub treestate: Option, } -impl FinalizedWithTrees { - pub fn new(block: ContextuallyValidBlock, treestate: Treestate) -> Self { - let finalized = FinalizedBlock::from(block); +impl ContextuallyVerifiedBlockWithTrees { + pub fn new(block: ContextuallyVerifiedBlock, treestate: Treestate) -> Self { + let checkpoint_verified = CheckpointVerifiedBlock::from(block); Self { - finalized, + checkpoint_verified, treestate: Some(treestate), } } } -impl From> for FinalizedWithTrees { +impl From> for ContextuallyVerifiedBlockWithTrees { fn from(block: Arc) -> Self { - Self::from(FinalizedBlock::from(block)) + Self::from(CheckpointVerifiedBlock::from(block)) } } -impl From for FinalizedWithTrees { - fn from(block: FinalizedBlock) -> Self { +impl From for ContextuallyVerifiedBlockWithTrees { + fn from(block: CheckpointVerifiedBlock) -> Self { Self { - finalized: block, + checkpoint_verified: block, treestate: None, } } } -impl From<&PreparedBlock> for PreparedBlock { - fn from(prepared: &PreparedBlock) -> Self { - prepared.clone() +impl From<&SemanticallyVerifiedBlock> for SemanticallyVerifiedBlock { + fn from(semantically_verified: &SemanticallyVerifiedBlock) -> Self { + semantically_verified.clone() } } @@ -309,27 +317,27 @@ impl From<&PreparedBlock> for PreparedBlock { // the *service caller*'s task, not inside the service call itself. // This allows moving work out of the single-threaded state service. -impl ContextuallyValidBlock { +impl ContextuallyVerifiedBlock { /// Create a block that's ready for non-finalized `Chain` contextual validation, - /// using a [`PreparedBlock`] and the UTXOs it spends. + /// using a [`SemanticallyVerifiedBlock`] and the UTXOs it spends. /// - /// When combined, `prepared.new_outputs` and `spent_utxos` must contain + /// When combined, `semantically_verified.new_outputs` and `spent_utxos` must contain /// the [`Utxo`](transparent::Utxo)s spent by every transparent input in this block, /// including UTXOs created by earlier transactions in this block. /// - /// Note: a [`ContextuallyValidBlock`] isn't actually contextually valid until + /// Note: a [`ContextuallyVerifiedBlock`] isn't actually contextually valid until /// [`Chain::push()`](crate::service::non_finalized_state::Chain::push) returns success. pub fn with_block_and_spent_utxos( - prepared: PreparedBlock, + semantically_verified: SemanticallyVerifiedBlock, mut spent_outputs: HashMap, ) -> Result { - let PreparedBlock { + let SemanticallyVerifiedBlock { block, hash, height, new_outputs, transaction_hashes, - } = prepared; + } = semantically_verified; // This is redundant for the non-finalized state, // but useful to make some tests pass more easily. @@ -350,12 +358,12 @@ impl ContextuallyValidBlock { } } -impl FinalizedBlock { +impl CheckpointVerifiedBlock { /// Create a block that's ready to be committed to the finalized state, /// using a precalculated [`block::Hash`]. /// - /// Note: a [`FinalizedBlock`] isn't actually finalized - /// until [`Request::CommitFinalizedBlock`] returns success. + /// Note: a [`CheckpointVerifiedBlock`] isn't actually finalized + /// until [`Request::CommitCheckpointVerifiedBlock`] returns success. pub fn with_hash(block: Arc, hash: block::Hash) -> Self { let height = block .coinbase_height() @@ -373,17 +381,17 @@ impl FinalizedBlock { } } -impl From> for FinalizedBlock { +impl From> for CheckpointVerifiedBlock { fn from(block: Arc) -> Self { let hash = block.hash(); - FinalizedBlock::with_hash(block, hash) + CheckpointVerifiedBlock::with_hash(block, hash) } } -impl From for FinalizedBlock { - fn from(contextually_valid: ContextuallyValidBlock) -> Self { - let ContextuallyValidBlock { +impl From for CheckpointVerifiedBlock { + fn from(contextually_valid: ContextuallyVerifiedBlock) -> Self { + let ContextuallyVerifiedBlock { block, hash, height, @@ -428,7 +436,7 @@ pub enum Request { /// Block commit requests should be wrapped in a timeout, so that /// out-of-order and invalid requests do not hang indefinitely. See the [`crate`] /// documentation for details. - CommitBlock(PreparedBlock), + CommitSemanticallyVerifiedBlock(SemanticallyVerifiedBlock), /// Commit a checkpointed block to the state, skipping most block validation. /// @@ -474,7 +482,7 @@ pub enum Request { /// Block commit requests should be wrapped in a timeout, so that /// out-of-order and invalid requests do not hang indefinitely. See the [`crate`] /// documentation for details. - CommitFinalizedBlock(FinalizedBlock), + CommitCheckpointVerifiedBlock(CheckpointVerifiedBlock), /// Computes the depth in the current best chain of the block identified by the given hash. /// @@ -619,14 +627,15 @@ pub enum Request { /// /// Returns [`Response::ValidBlockProposal`] when successful. /// See `[ReadRequest::CheckBlockProposalValidity]` for details. - CheckBlockProposalValidity(PreparedBlock), + CheckBlockProposalValidity(SemanticallyVerifiedBlock), } impl Request { fn variant_name(&self) -> &'static str { match self { - Request::CommitBlock(_) => "commit_block", - Request::CommitFinalizedBlock(_) => "commit_finalized_block", + Request::CommitSemanticallyVerifiedBlock(_) => "commit_semantically_verified_block", + Request::CommitCheckpointVerifiedBlock(_) => "commit_checkpoint_verified_block", + Request::AwaitUtxo(_) => "await_utxo", Request::Depth(_) => "depth", Request::Tip => "tip", @@ -870,7 +879,7 @@ pub enum ReadRequest { /// /// Returns [`ReadResponse::ValidBlockProposal`] when successful, or an error if /// the block fails contextual validation. - CheckBlockProposalValidity(PreparedBlock), + CheckBlockProposalValidity(SemanticallyVerifiedBlock), } impl ReadRequest { @@ -947,9 +956,8 @@ impl TryFrom for ReadRequest { Ok(ReadRequest::CheckBestChainTipNullifiersAndAnchors(tx)) } - Request::CommitBlock(_) | Request::CommitFinalizedBlock(_) => { - Err("ReadService does not write blocks") - } + Request::CommitSemanticallyVerifiedBlock(_) + | Request::CommitCheckpointVerifiedBlock(_) => Err("ReadService does not write blocks"), Request::AwaitUtxo(_) => Err("ReadService does not track pending UTXOs. \ Manually convert the request to ReadRequest::AnyChainUtxo, \ @@ -958,9 +966,9 @@ impl TryFrom for ReadRequest { Request::KnownBlock(_) => Err("ReadService does not track queued blocks"), #[cfg(feature = "getblocktemplate-rpcs")] - Request::CheckBlockProposalValidity(prepared) => { - Ok(ReadRequest::CheckBlockProposalValidity(prepared)) - } + Request::CheckBlockProposalValidity(semantically_verified) => Ok( + ReadRequest::CheckBlockProposalValidity(semantically_verified), + ), } } } diff --git a/zebra-state/src/response.rs b/zebra-state/src/response.rs index 7f1ea935e29..c238258f559 100644 --- a/zebra-state/src/response.rs +++ b/zebra-state/src/response.rs @@ -24,7 +24,7 @@ use crate::{service::read::AddressUtxos, TransactionLocation}; #[derive(Clone, Debug, PartialEq, Eq)] /// A response to a [`StateService`](crate::service::StateService) [`Request`]. pub enum Response { - /// Response to [`Request::CommitBlock`] indicating that a block was + /// Response to [`Request::CommitSemanticallyVerifiedBlock`] indicating that a block was /// successfully committed to the state. Committed(block::Hash), diff --git a/zebra-state/src/service.rs b/zebra-state/src/service.rs index 48a841d0cdf..2f229da9908 100644 --- a/zebra-state/src/service.rs +++ b/zebra-state/src/service.rs @@ -61,8 +61,8 @@ use crate::{ queued_blocks::QueuedBlocks, watch_receiver::WatchReceiver, }, - BoxError, CloneError, Config, FinalizedBlock, PreparedBlock, ReadRequest, ReadResponse, - Request, Response, + BoxError, CheckpointVerifiedBlock, CloneError, Config, ReadRequest, ReadResponse, Request, + Response, SemanticallyVerifiedBlock, }; pub mod block_iter; @@ -345,7 +345,7 @@ impl StateService { let initial_tip = finalized_state .db .tip_block() - .map(FinalizedBlock::from) + .map(CheckpointVerifiedBlock::from) .map(ChainTipBlock::from); timer.finish(module_path!(), line!(), "fetching database tip"); @@ -459,25 +459,25 @@ impl StateService { /// Returns a channel receiver that provides the result of the block commit. fn queue_and_commit_finalized( &mut self, - finalized: FinalizedBlock, + checkpoint_verified: CheckpointVerifiedBlock, ) -> oneshot::Receiver> { // # Correctness & Performance // // This method must not block, access the database, or perform CPU-intensive tasks, // because it is called directly from the tokio executor's Future threads. - let queued_prev_hash = finalized.block.header.previous_block_hash; - let queued_height = finalized.height; + let queued_prev_hash = checkpoint_verified.block.header.previous_block_hash; + let queued_height = checkpoint_verified.height; // If we're close to the final checkpoint, make the block's UTXOs available for // full verification of non-finalized blocks, even when it is in the channel. if self.is_close_to_final_checkpoint(queued_height) { self.sent_non_finalized_block_hashes - .add_finalized(&finalized) + .add_finalized(&checkpoint_verified) } let (rsp_tx, rsp_rx) = oneshot::channel(); - let queued = (finalized, rsp_tx); + let queued = (checkpoint_verified, rsp_tx); if self.finalized_block_write_sender.is_some() { // We're still committing finalized blocks @@ -636,17 +636,17 @@ impl StateService { /// in RFC0005. /// /// [1]: https://zebra.zfnd.org/dev/rfcs/0005-state-updates.html#committing-non-finalized-blocks - #[instrument(level = "debug", skip(self, prepared))] + #[instrument(level = "debug", skip(self, semantically_verrified))] fn queue_and_commit_non_finalized( &mut self, - prepared: PreparedBlock, + semantically_verrified: SemanticallyVerifiedBlock, ) -> oneshot::Receiver> { - tracing::debug!(block = %prepared.block, "queueing block for contextual verification"); - let parent_hash = prepared.block.header.previous_block_hash; + tracing::debug!(block = %semantically_verrified.block, "queueing block for contextual verification"); + let parent_hash = semantically_verrified.block.header.previous_block_hash; if self .sent_non_finalized_block_hashes - .contains(&prepared.hash) + .contains(&semantically_verrified.hash) { let (rsp_tx, rsp_rx) = oneshot::channel(); let _ = rsp_tx.send(Err( @@ -655,7 +655,11 @@ impl StateService { return rsp_rx; } - if self.read_service.db.contains_height(prepared.height) { + if self + .read_service + .db + .contains_height(semantically_verrified.height) + { let (rsp_tx, rsp_rx) = oneshot::channel(); let _ = rsp_tx.send(Err( "block height is in the finalized state: block is already committed to the state" @@ -664,11 +668,12 @@ impl StateService { return rsp_rx; } - // Request::CommitBlock contract: a request to commit a block which has - // been queued but not yet committed to the state fails the older - // request and replaces it with the newer request. - let rsp_rx = if let Some((_, old_rsp_tx)) = - self.queued_non_finalized_blocks.get_mut(&prepared.hash) + // [`Request::CommitSemanticallyVerifiedBlock`] contract: a request to commit a block which + // has been queued but not yet committed to the state fails the older request and replaces + // it with the newer request. + let rsp_rx = if let Some((_, old_rsp_tx)) = self + .queued_non_finalized_blocks + .get_mut(&semantically_verrified.hash) { tracing::debug!("replacing older queued request with new request"); let (mut rsp_tx, rsp_rx) = oneshot::channel(); @@ -677,7 +682,8 @@ impl StateService { rsp_rx } else { let (rsp_tx, rsp_rx) = oneshot::channel(); - self.queued_non_finalized_blocks.queue((prepared, rsp_tx)); + self.queued_non_finalized_blocks + .queue((semantically_verrified, rsp_tx)); rsp_rx }; @@ -763,7 +769,7 @@ impl StateService { .dequeue_children(parent_hash); for queued_child in queued_children { - let (PreparedBlock { hash, .. }, _) = queued_child; + let (SemanticallyVerifiedBlock { hash, .. }, _) = queued_child; self.sent_non_finalized_block_hashes.add(&queued_child.0); let send_result = non_finalized_block_write_sender.send(queued_child); @@ -798,9 +804,9 @@ impl StateService { ) } - /// Assert some assumptions about the prepared `block` before it is queued. - fn assert_block_can_be_validated(&self, block: &PreparedBlock) { - // required by CommitBlock call + /// Assert some assumptions about the semantically verified `block` before it is queued. + fn assert_block_can_be_validated(&self, block: &SemanticallyVerifiedBlock) { + // required by `Request::CommitSemanticallyVerifiedBlock` call assert!( block.height > self.network.mandatory_checkpoint_height(), "invalid non-finalized block height: the canopy checkpoint is mandatory, pre-canopy \ @@ -901,11 +907,11 @@ impl Service for StateService { match req { // Uses queued_non_finalized_blocks and pending_utxos in the StateService // Accesses shared writeable state in the StateService, NonFinalizedState, and ZebraDb. - Request::CommitBlock(prepared) => { - self.assert_block_can_be_validated(&prepared); + Request::CommitSemanticallyVerifiedBlock(semantically_verified) => { + self.assert_block_can_be_validated(&semantically_verified); self.pending_utxos - .check_against_ordered(&prepared.new_outputs); + .check_against_ordered(&semantically_verified.new_outputs); // # Performance // @@ -919,7 +925,7 @@ impl Service for StateService { // https://docs.rs/tokio/latest/tokio/task/fn.block_in_place.html let rsp_rx = tokio::task::block_in_place(move || { - span.in_scope(|| self.queue_and_commit_non_finalized(prepared)) + span.in_scope(|| self.queue_and_commit_non_finalized(semantically_verified)) }); // TODO: @@ -927,14 +933,16 @@ impl Service for StateService { // as well as in poll_ready() // The work is all done, the future just waits on a channel for the result - timer.finish(module_path!(), line!(), "CommitBlock"); + timer.finish(module_path!(), line!(), "CommitSemanticallyVerifiedBlock"); let span = Span::current(); async move { rsp_rx .await .map_err(|_recv_error| { - BoxError::from("block was dropped from the state CommitBlock queue") + BoxError::from( + "block was dropped from the queue of non-finalized blocks", + ) }) // TODO: replace with Result::flatten once it stabilises // https://github.com/rust-lang/rust/issues/70142 @@ -948,7 +956,7 @@ impl Service for StateService { // Uses queued_finalized_blocks and pending_utxos in the StateService. // Accesses shared writeable state in the StateService. - Request::CommitFinalizedBlock(finalized) => { + Request::CommitCheckpointVerifiedBlock(finalized) => { // # Consensus // // A non-finalized block verification could have called AwaitUtxo @@ -970,15 +978,13 @@ impl Service for StateService { // as well as in poll_ready() // The work is all done, the future just waits on a channel for the result - timer.finish(module_path!(), line!(), "CommitFinalizedBlock"); + timer.finish(module_path!(), line!(), "CommitCheckpointVerifiedBlock"); async move { rsp_rx .await .map_err(|_recv_error| { - BoxError::from( - "block was dropped from the state CommitFinalizedBlock queue", - ) + BoxError::from("block was dropped from the queue of finalized blocks") }) // TODO: replace with Result::flatten once it stabilises // https://github.com/rust-lang/rust/issues/70142 @@ -1753,7 +1759,7 @@ impl Service for ReadStateService { } #[cfg(feature = "getblocktemplate-rpcs")] - ReadRequest::CheckBlockProposalValidity(prepared) => { + ReadRequest::CheckBlockProposalValidity(semantically_verified) => { let state = self.clone(); // # Performance @@ -1770,7 +1776,7 @@ impl Service for ReadStateService { return Err("state is empty: wait for Zebra to sync before submitting a proposal".into()); }; - if prepared.block.header.previous_block_hash != best_tip_hash { + if semantically_verified.block.header.previous_block_hash != best_tip_hash { return Err("proposal is not based on the current best chain tip: previous block hash must be the best chain tip".into()); } @@ -1778,13 +1784,13 @@ impl Service for ReadStateService { // The non-finalized state that's used in the rest of the state (including finalizing // blocks into the db) is not mutated here. // - // TODO: Convert `CommitBlockError` to a new `ValidateProposalError`? + // TODO: Convert `CommitSemanticallyVerifiedError` to a new `ValidateProposalError`? latest_non_finalized_state.disable_metrics(); write::validate_and_commit_non_finalized( &state.db, &mut latest_non_finalized_state, - prepared, + semantically_verified, )?; // The work is done in the future. diff --git a/zebra-state/src/service/arbitrary.rs b/zebra-state/src/service/arbitrary.rs index accc9db7a2d..f7d4e4ddba1 100644 --- a/zebra-state/src/service/arbitrary.rs +++ b/zebra-state/src/service/arbitrary.rs @@ -23,7 +23,7 @@ use zebra_chain::{ use crate::{ arbitrary::Prepare, service::{check, ReadStateService, StateService}, - BoxError, ChainTipChange, Config, LatestChainTip, PreparedBlock, Request, Response, + BoxError, ChainTipChange, Config, LatestChainTip, Request, Response, SemanticallyVerifiedBlock, }; pub use zebra_chain::block::arbitrary::MAX_PARTIAL_CHAIN_BLOCKS; @@ -33,7 +33,7 @@ pub const CHAIN_TIP_UPDATE_WAIT_LIMIT: Duration = Duration::from_secs(2); #[derive(Debug)] pub struct PreparedChainTree { - chain: Arc>>, + chain: Arc>>, count: BinarySearch, network: Network, history_tree: Arc, @@ -41,7 +41,7 @@ pub struct PreparedChainTree { impl ValueTree for PreparedChainTree { type Value = ( - Arc>>, + Arc>>, ::Value, Network, Arc, @@ -71,7 +71,7 @@ pub struct PreparedChain { chain: std::sync::Mutex< Option<( Network, - Arc>>, + Arc>>, Arc, )>, >, @@ -199,7 +199,7 @@ pub async fn populated_state( ) { let requests = blocks .into_iter() - .map(|block| Request::CommitFinalizedBlock(block.into())); + .map(|block| Request::CommitCheckpointVerifiedBlock(block.into())); // TODO: write a test that checks the finalized to non-finalized transition with UTXOs, // and set max_checkpoint_height and checkpoint_verify_concurrency_limit correctly. diff --git a/zebra-state/src/service/chain_tip.rs b/zebra-state/src/service/chain_tip.rs index 80675609aae..c08571c76e3 100644 --- a/zebra-state/src/service/chain_tip.rs +++ b/zebra-state/src/service/chain_tip.rs @@ -20,7 +20,8 @@ use zebra_chain::{ }; use crate::{ - request::ContextuallyValidBlock, service::watch_receiver::WatchReceiver, FinalizedBlock, + request::ContextuallyVerifiedBlock, service::watch_receiver::WatchReceiver, + CheckpointVerifiedBlock, }; use TipAction::*; @@ -85,9 +86,9 @@ impl fmt::Display for ChainTipBlock { } } -impl From for ChainTipBlock { - fn from(contextually_valid: ContextuallyValidBlock) -> Self { - let ContextuallyValidBlock { +impl From for ChainTipBlock { + fn from(contextually_valid: ContextuallyVerifiedBlock) -> Self { + let ContextuallyVerifiedBlock { block, hash, height, @@ -106,9 +107,9 @@ impl From for ChainTipBlock { } } -impl From for ChainTipBlock { - fn from(finalized: FinalizedBlock) -> Self { - let FinalizedBlock { +impl From for ChainTipBlock { + fn from(finalized: CheckpointVerifiedBlock) -> Self { + let CheckpointVerifiedBlock { block, hash, height, diff --git a/zebra-state/src/service/check.rs b/zebra-state/src/service/check.rs index d9db02c154b..07abce9895b 100644 --- a/zebra-state/src/service/check.rs +++ b/zebra-state/src/service/check.rs @@ -16,7 +16,7 @@ use crate::{ block_iter::any_ancestor_blocks, check::difficulty::POW_ADJUSTMENT_BLOCK_SPAN, finalized_state::ZebraDb, non_finalized_state::NonFinalizedState, }, - BoxError, PreparedBlock, ValidateContextError, + BoxError, SemanticallyVerifiedBlock, ValidateContextError, }; // use self as check @@ -52,7 +52,7 @@ pub(crate) use difficulty::AdjustedDifficulty; /// If the state contains less than 28 ([`POW_ADJUSTMENT_BLOCK_SPAN`]) blocks. #[tracing::instrument(skip(prepared, finalized_tip_height, relevant_chain))] pub(crate) fn block_is_valid_for_recent_chain( - prepared: &PreparedBlock, + prepared: &SemanticallyVerifiedBlock, network: Network, finalized_tip_height: Option, relevant_chain: C, @@ -369,7 +369,7 @@ where pub(crate) fn initial_contextual_validity( finalized_state: &ZebraDb, non_finalized_state: &NonFinalizedState, - prepared: &PreparedBlock, + prepared: &SemanticallyVerifiedBlock, ) -> Result<(), ValidateContextError> { let relevant_chain = any_ancestor_blocks( non_finalized_state, diff --git a/zebra-state/src/service/check/anchors.rs b/zebra-state/src/service/check/anchors.rs index a2467693a3e..f410abd89b2 100644 --- a/zebra-state/src/service/check/anchors.rs +++ b/zebra-state/src/service/check/anchors.rs @@ -13,7 +13,7 @@ use zebra_chain::{ use crate::{ service::{finalized_state::ZebraDb, non_finalized_state::Chain}, - PreparedBlock, ValidateContextError, + SemanticallyVerifiedBlock, ValidateContextError, }; /// Checks the final Sapling and Orchard anchors specified by `transaction` @@ -312,9 +312,9 @@ fn sprout_anchors_refer_to_treestates( Ok(()) } -/// Accepts a [`ZebraDb`], [`Chain`], and [`PreparedBlock`]. +/// Accepts a [`ZebraDb`], [`Chain`], and [`SemanticallyVerifiedBlock`]. /// -/// Iterates over the transactions in the [`PreparedBlock`] checking the final Sapling and Orchard anchors. +/// Iterates over the transactions in the [`SemanticallyVerifiedBlock`] checking the final Sapling and Orchard anchors. /// /// This method checks for anchors computed from the final treestate of each block in /// the `parent_chain` or `finalized_state`. @@ -322,7 +322,7 @@ fn sprout_anchors_refer_to_treestates( pub(crate) fn block_sapling_orchard_anchors_refer_to_final_treestates( finalized_state: &ZebraDb, parent_chain: &Arc, - prepared: &PreparedBlock, + prepared: &SemanticallyVerifiedBlock, ) -> Result<(), ValidateContextError> { prepared.block.transactions.iter().enumerate().try_for_each( |(tx_index_in_block, transaction)| { @@ -338,9 +338,9 @@ pub(crate) fn block_sapling_orchard_anchors_refer_to_final_treestates( ) } -/// Accepts a [`ZebraDb`], [`Arc`](Chain), and [`PreparedBlock`]. +/// Accepts a [`ZebraDb`], [`Arc`](Chain), and [`SemanticallyVerifiedBlock`]. /// -/// Iterates over the transactions in the [`PreparedBlock`], and fetches the Sprout final treestates +/// Iterates over the transactions in the [`SemanticallyVerifiedBlock`], and fetches the Sprout final treestates /// from the state. /// /// Returns a `HashMap` of the Sprout final treestates from the state for [`sprout_anchors_refer_to_treestates()`] @@ -353,7 +353,7 @@ pub(crate) fn block_sapling_orchard_anchors_refer_to_final_treestates( pub(crate) fn block_fetch_sprout_final_treestates( finalized_state: &ZebraDb, parent_chain: &Arc, - prepared: &PreparedBlock, + prepared: &SemanticallyVerifiedBlock, ) -> HashMap> { let mut sprout_final_treestates = HashMap::new(); diff --git a/zebra-state/src/service/check/nullifier.rs b/zebra-state/src/service/check/nullifier.rs index f3ea6853fe5..4f638b24ff7 100644 --- a/zebra-state/src/service/check/nullifier.rs +++ b/zebra-state/src/service/check/nullifier.rs @@ -8,7 +8,7 @@ use zebra_chain::transaction::Transaction; use crate::{ error::DuplicateNullifierError, service::{finalized_state::ZebraDb, non_finalized_state::Chain}, - PreparedBlock, ValidateContextError, + SemanticallyVerifiedBlock, ValidateContextError, }; // Tidy up some doc links @@ -16,7 +16,7 @@ use crate::{ use crate::service; /// Reject double-spends of nullifers: -/// - one from this [`PreparedBlock`], and the other already committed to the +/// - one from this [`SemanticallyVerifiedBlock`], and the other already committed to the /// [`FinalizedState`](service::FinalizedState). /// /// (Duplicate non-finalized nullifiers are rejected during the chain update, @@ -32,7 +32,7 @@ use crate::service; /// #[tracing::instrument(skip(prepared, finalized_state))] pub(crate) fn no_duplicates_in_finalized_chain( - prepared: &PreparedBlock, + prepared: &SemanticallyVerifiedBlock, finalized_state: &ZebraDb, ) -> Result<(), ValidateContextError> { for nullifier in prepared.block.sprout_nullifiers() { diff --git a/zebra-state/src/service/check/tests/anchors.rs b/zebra-state/src/service/check/tests/anchors.rs index 11564201e12..d96c8b0410b 100644 --- a/zebra-state/src/service/check/tests/anchors.rs +++ b/zebra-state/src/service/check/tests/anchors.rs @@ -18,7 +18,7 @@ use crate::{ write::validate_and_commit_non_finalized, }, tests::setup::{new_state_with_mainnet_genesis, transaction_v4_from_coinbase}, - PreparedBlock, ValidateContextError, + SemanticallyVerifiedBlock, ValidateContextError, }; // Sprout @@ -105,7 +105,10 @@ fn check_sprout_anchors() { ); } -fn prepare_sprout_block(mut block_to_prepare: Block, reference_block: Block) -> PreparedBlock { +fn prepare_sprout_block( + mut block_to_prepare: Block, + reference_block: Block, +) -> SemanticallyVerifiedBlock { // Convert the coinbase transaction to a version that the non-finalized state will accept. block_to_prepare.transactions[0] = transaction_v4_from_coinbase(&block_to_prepare.transactions[0]).into(); diff --git a/zebra-state/src/service/check/tests/nullifier.rs b/zebra-state/src/service/check/tests/nullifier.rs index 2cfa81c3b08..e522ec479a3 100644 --- a/zebra-state/src/service/check/tests/nullifier.rs +++ b/zebra-state/src/service/check/tests/nullifier.rs @@ -23,7 +23,7 @@ use crate::{ check::nullifier::tx_no_duplicates_in_chain, read, write::validate_and_commit_non_finalized, }, tests::setup::{new_state_with_mainnet_genesis, transaction_v4_from_coinbase}, - FinalizedBlock, + CheckpointVerifiedBlock, ValidateContextError::{ DuplicateOrchardNullifier, DuplicateSaplingNullifier, DuplicateSproutNullifier, }, @@ -84,7 +84,7 @@ proptest! { // randomly choose to commit the block to the finalized or non-finalized state if use_finalized_state { - let block1 = FinalizedBlock::from(Arc::new(block1)); + let block1 = CheckpointVerifiedBlock::from(Arc::new(block1)); let commit_result = finalized_state.commit_finalized_direct(block1.clone().into(), "test"); // the block was committed @@ -351,7 +351,7 @@ proptest! { let block1_hash; // randomly choose to commit the next block to the finalized or non-finalized state if duplicate_in_finalized_state { - let block1 = FinalizedBlock::from(Arc::new(block1)); + let block1 = CheckpointVerifiedBlock::from(Arc::new(block1)); let commit_result = finalized_state.commit_finalized_direct(block1.clone().into(), "test"); prop_assert_eq!(Some((Height(1), block1.hash)), read::best_tip(&non_finalized_state, &finalized_state.db)); @@ -451,7 +451,7 @@ proptest! { // randomly choose to commit the block to the finalized or non-finalized state if use_finalized_state { - let block1 = FinalizedBlock::from(Arc::new(block1)); + let block1 = CheckpointVerifiedBlock::from(Arc::new(block1)); let commit_result = finalized_state.commit_finalized_direct(block1.clone().into(), "test"); prop_assert_eq!(Some((Height(1), block1.hash)), read::best_tip(&non_finalized_state, &finalized_state.db)); @@ -633,7 +633,7 @@ proptest! { let block1_hash; // randomly choose to commit the next block to the finalized or non-finalized state if duplicate_in_finalized_state { - let block1 = FinalizedBlock::from(Arc::new(block1)); + let block1 = CheckpointVerifiedBlock::from(Arc::new(block1)); let commit_result = finalized_state.commit_finalized_direct(block1.clone().into(), "test"); prop_assert_eq!(Some((Height(1), block1.hash)), read::best_tip(&non_finalized_state, &finalized_state.db)); @@ -731,7 +731,7 @@ proptest! { // randomly choose to commit the block to the finalized or non-finalized state if use_finalized_state { - let block1 = FinalizedBlock::from(Arc::new(block1)); + let block1 = CheckpointVerifiedBlock::from(Arc::new(block1)); let commit_result = finalized_state.commit_finalized_direct(block1.clone().into(), "test"); prop_assert_eq!(Some((Height(1), block1.hash)), read::best_tip(&non_finalized_state, &finalized_state.db)); @@ -922,7 +922,7 @@ proptest! { let block1_hash; // randomly choose to commit the next block to the finalized or non-finalized state if duplicate_in_finalized_state { - let block1 = FinalizedBlock::from(Arc::new(block1)); + let block1 = CheckpointVerifiedBlock::from(Arc::new(block1)); let commit_result = finalized_state.commit_finalized_direct(block1.clone().into(), "test"); prop_assert_eq!(Some((Height(1), block1.hash)), read::best_tip(&non_finalized_state, &finalized_state.db)); diff --git a/zebra-state/src/service/check/tests/utxo.rs b/zebra-state/src/service/check/tests/utxo.rs index d35441381e5..4f7e1e13bc1 100644 --- a/zebra-state/src/service/check/tests/utxo.rs +++ b/zebra-state/src/service/check/tests/utxo.rs @@ -21,7 +21,7 @@ use crate::{ write::validate_and_commit_non_finalized, }, tests::setup::{new_state_with_mainnet_genesis, transaction_v4_from_coinbase}, - FinalizedBlock, + CheckpointVerifiedBlock, ValidateContextError::{ DuplicateTransparentSpend, EarlyTransparentSpend, ImmatureTransparentCoinbaseSpend, MissingTransparentOutput, UnshieldedTransparentCoinbaseSpend, @@ -184,7 +184,7 @@ proptest! { // randomly choose to commit the block to the finalized or non-finalized state if use_finalized_state { - let block1 = FinalizedBlock::from(Arc::new(block1)); + let block1 = CheckpointVerifiedBlock::from(Arc::new(block1)); let commit_result = finalized_state.commit_finalized_direct(block1.clone().into(), "test"); // the block was committed @@ -272,7 +272,7 @@ proptest! { block2.transactions.push(spend_transaction.into()); if use_finalized_state_spend { - let block2 = FinalizedBlock::from(Arc::new(block2)); + let block2 = CheckpointVerifiedBlock::from(Arc::new(block2)); let commit_result = finalized_state.commit_finalized_direct(block2.clone().into(), "test"); // the block was committed @@ -611,7 +611,7 @@ proptest! { let block2 = Arc::new(block2); if use_finalized_state_spend { - let block2 = FinalizedBlock::from(block2.clone()); + let block2 = CheckpointVerifiedBlock::from(block2.clone()); let commit_result = finalized_state.commit_finalized_direct(block2.clone().into(), "test"); // the block was committed @@ -842,7 +842,7 @@ struct TestState { /// The genesis block that has already been committed to the `state` service's /// finalized state. #[allow(dead_code)] - genesis: FinalizedBlock, + genesis: CheckpointVerifiedBlock, /// A block at height 1, that has already been committed to the `state` service. block1: Arc, @@ -883,7 +883,7 @@ fn new_state_with_mainnet_transparent_data( let block1 = Arc::new(block1); if use_finalized_state { - let block1 = FinalizedBlock::from(block1.clone()); + let block1 = CheckpointVerifiedBlock::from(block1.clone()); let commit_result = finalized_state.commit_finalized_direct(block1.clone().into(), "test"); // the block was committed diff --git a/zebra-state/src/service/check/utxo.rs b/zebra-state/src/service/check/utxo.rs index 0cd8a0f0dc2..c8a79852b8a 100644 --- a/zebra-state/src/service/check/utxo.rs +++ b/zebra-state/src/service/check/utxo.rs @@ -10,14 +10,14 @@ use zebra_chain::{ use crate::{ constants::MIN_TRANSPARENT_COINBASE_MATURITY, service::finalized_state::ZebraDb, - PreparedBlock, + SemanticallyVerifiedBlock, ValidateContextError::{ self, DuplicateTransparentSpend, EarlyTransparentSpend, ImmatureTransparentCoinbaseSpend, MissingTransparentOutput, UnshieldedTransparentCoinbaseSpend, }, }; -/// Lookup all the [`transparent::Utxo`]s spent by a [`PreparedBlock`]. +/// Lookup all the [`transparent::Utxo`]s spent by a [`SemanticallyVerifiedBlock`]. /// If any of the spends are invalid, return an error. /// Otherwise, return the looked up UTXOs. /// @@ -36,7 +36,7 @@ use crate::{ /// - spends of an immature transparent coinbase output, /// - unshielded spends of a transparent coinbase output. pub fn transparent_spend( - prepared: &PreparedBlock, + prepared: &SemanticallyVerifiedBlock, non_finalized_chain_unspent_utxos: &HashMap, non_finalized_chain_spent_utxos: &HashSet, finalized_state: &ZebraDb, @@ -225,7 +225,7 @@ pub fn transparent_coinbase_spend( /// /// pub fn remaining_transaction_value( - prepared: &PreparedBlock, + prepared: &SemanticallyVerifiedBlock, utxos: &HashMap, ) -> Result<(), ValidateContextError> { for (tx_index_in_block, transaction) in prepared.block.transactions.iter().enumerate() { diff --git a/zebra-state/src/service/finalized_state.rs b/zebra-state/src/service/finalized_state.rs index 2a355646f95..1ac34e2c5db 100644 --- a/zebra-state/src/service/finalized_state.rs +++ b/zebra-state/src/service/finalized_state.rs @@ -23,9 +23,9 @@ use std::{ use zebra_chain::{block, parameters::Network}; use crate::{ - request::FinalizedWithTrees, + request::ContextuallyVerifiedBlockWithTrees, service::{check, QueuedFinalized}, - BoxError, CloneError, Config, FinalizedBlock, + BoxError, CheckpointVerifiedBlock, CloneError, Config, }; mod disk_db; @@ -161,23 +161,25 @@ impl FinalizedState { self.network } - /// Commit a finalized block to the state. + /// Commit a checkpoint-verified block to the state. /// /// It's the caller's responsibility to ensure that blocks are committed in /// order. pub fn commit_finalized( &mut self, ordered_block: QueuedFinalized, - ) -> Result { - let (finalized, rsp_tx) = ordered_block; - let result = - self.commit_finalized_direct(finalized.clone().into(), "CommitFinalized request"); + ) -> Result { + let (checkpoint_verified, rsp_tx) = ordered_block; + let result = self.commit_finalized_direct( + checkpoint_verified.clone().into(), + "commit checkpoint-verified request", + ); if result.is_ok() { metrics::counter!("state.checkpoint.finalized.block.count", 1); metrics::gauge!( "state.checkpoint.finalized.block.height", - finalized.height.0 as f64, + checkpoint_verified.height.0 as f64, ); // This height gauge is updated for both fully verified and checkpoint blocks. @@ -185,14 +187,14 @@ impl FinalizedState { // are committed in order. metrics::gauge!( "zcash.chain.verified.block.height", - finalized.height.0 as f64, + checkpoint_verified.height.0 as f64, ); metrics::counter!("zcash.chain.verified.block.total", 1); } else { metrics::counter!("state.checkpoint.error.block.count", 1); metrics::gauge!( "state.checkpoint.error.block.height", - finalized.height.0 as f64, + checkpoint_verified.height.0 as f64, ); }; @@ -202,7 +204,9 @@ impl FinalizedState { let _ = rsp_tx.send(result.clone().map_err(BoxError::from)); - result.map(|_hash| finalized).map_err(BoxError::from) + result + .map(|_hash| checkpoint_verified) + .map_err(BoxError::from) } /// Immediately commit a `finalized` block to the finalized state. @@ -221,10 +225,10 @@ impl FinalizedState { #[allow(clippy::unwrap_in_result)] pub fn commit_finalized_direct( &mut self, - finalized_with_trees: FinalizedWithTrees, + contextually_verified_with_trees: ContextuallyVerifiedBlockWithTrees, source: &str, ) -> Result { - let finalized = finalized_with_trees.finalized; + let finalized = contextually_verified_with_trees.checkpoint_verified; let committed_tip_hash = self.db.finalized_tip_hash(); let committed_tip_height = self.db.finalized_tip_height(); @@ -252,7 +256,8 @@ impl FinalizedState { ); } - let (history_tree, note_commitment_trees) = match finalized_with_trees.treestate { + let (history_tree, note_commitment_trees) = match contextually_verified_with_trees.treestate + { // If the treestate associated with the block was supplied, use it // without recomputing it. Some(ref treestate) => ( diff --git a/zebra-state/src/service/finalized_state/tests/prop.rs b/zebra-state/src/service/finalized_state/tests/prop.rs index bdc6438f551..e849f42fe35 100644 --- a/zebra-state/src/service/finalized_state/tests/prop.rs +++ b/zebra-state/src/service/finalized_state/tests/prop.rs @@ -9,7 +9,7 @@ use crate::{ config::Config, service::{ arbitrary::PreparedChain, - finalized_state::{FinalizedBlock, FinalizedState}, + finalized_state::{CheckpointVerifiedBlock, FinalizedState}, }, tests::FakeChainHelper, }; @@ -28,9 +28,9 @@ fn blocks_with_v5_transactions() -> Result<()> { let mut height = Height(0); // use `count` to minimize test failures, so they are easier to diagnose for block in chain.iter().take(count) { - let finalized = FinalizedBlock::from(block.block.clone()); + let checkpoint_verified = CheckpointVerifiedBlock::from(block.block.clone()); let hash = state.commit_finalized_direct( - finalized.into(), + checkpoint_verified.into(), "blocks_with_v5_transactions test" ); prop_assert_eq!(Some(height), state.finalized_tip_height()); @@ -83,18 +83,18 @@ fn all_upgrades_and_wrong_commitments_with_fake_activation_heights() -> Result<( h == nu5_height || h == nu5_height_plus1 => { let block = block.block.clone().set_block_commitment([0x42; 32]); - let finalized = FinalizedBlock::from(block); + let checkpoint_verified = CheckpointVerifiedBlock::from(block); state.commit_finalized_direct( - finalized.into(), + checkpoint_verified.into(), "all_upgrades test" ).expect_err("Must fail commitment check"); failure_count += 1; }, _ => {}, } - let finalized = FinalizedBlock::from(block.block.clone()); + let checkpoint_verified = CheckpointVerifiedBlock::from(block.block.clone()); let hash = state.commit_finalized_direct( - finalized.into(), + checkpoint_verified.into(), "all_upgrades test" ).unwrap(); prop_assert_eq!(Some(height), state.finalized_tip_height()); diff --git a/zebra-state/src/service/finalized_state/zebra_db/block.rs b/zebra-state/src/service/finalized_state/zebra_db/block.rs index b6affaf1610..8edc89e0258 100644 --- a/zebra-state/src/service/finalized_state/zebra_db/block.rs +++ b/zebra-state/src/service/finalized_state/zebra_db/block.rs @@ -38,7 +38,7 @@ use crate::{ transparent::{AddressBalanceLocation, OutputLocation}, }, zebra_db::{metrics::block_precommit_metrics, ZebraDb}, - FinalizedBlock, + CheckpointVerifiedBlock, }, BoxError, HashOrHeight, }; @@ -282,7 +282,7 @@ impl ZebraDb { /// - Propagates any errors from updating history and note commitment trees pub(in super::super) fn write_block( &mut self, - finalized: FinalizedBlock, + finalized: CheckpointVerifiedBlock, history_tree: Arc, note_commitment_trees: NoteCommitmentTrees, network: Network, @@ -420,7 +420,7 @@ impl DiskWriteBatch { pub fn prepare_block_batch( &mut self, db: &DiskDb, - finalized: FinalizedBlock, + finalized: CheckpointVerifiedBlock, new_outputs_by_out_loc: BTreeMap, spent_utxos_by_outpoint: HashMap, spent_utxos_by_out_loc: BTreeMap, @@ -429,7 +429,7 @@ impl DiskWriteBatch { note_commitment_trees: NoteCommitmentTrees, value_pool: ValueBalance, ) -> Result<(), BoxError> { - let FinalizedBlock { + let CheckpointVerifiedBlock { block, hash, height, @@ -485,7 +485,7 @@ impl DiskWriteBatch { pub fn prepare_block_header_and_transaction_data_batch( &mut self, db: &DiskDb, - finalized: &FinalizedBlock, + finalized: &CheckpointVerifiedBlock, ) -> Result<(), BoxError> { // Blocks let block_header_by_height = db.cf_handle("block_header_by_height").unwrap(); @@ -497,7 +497,7 @@ impl DiskWriteBatch { let hash_by_tx_loc = db.cf_handle("hash_by_tx_loc").unwrap(); let tx_loc_by_hash = db.cf_handle("tx_loc_by_hash").unwrap(); - let FinalizedBlock { + let CheckpointVerifiedBlock { block, hash, height, @@ -541,8 +541,12 @@ impl DiskWriteBatch { /// If `finalized.block` is not a genesis block, does nothing. /// /// This method never returns an error. - pub fn prepare_genesis_batch(&mut self, db: &DiskDb, finalized: &FinalizedBlock) -> bool { - let FinalizedBlock { block, .. } = finalized; + pub fn prepare_genesis_batch( + &mut self, + db: &DiskDb, + finalized: &CheckpointVerifiedBlock, + ) -> bool { + let CheckpointVerifiedBlock { block, .. } = finalized; if block.header.previous_block_hash == GENESIS_PREVIOUS_BLOCK_HASH { self.prepare_genesis_note_commitment_tree_batch(db, finalized); diff --git a/zebra-state/src/service/finalized_state/zebra_db/block/tests/vectors.rs b/zebra-state/src/service/finalized_state/zebra_db/block/tests/vectors.rs index 51bad905da2..ea4a623748f 100644 --- a/zebra-state/src/service/finalized_state/zebra_db/block/tests/vectors.rs +++ b/zebra-state/src/service/finalized_state/zebra_db/block/tests/vectors.rs @@ -27,7 +27,7 @@ use zebra_test::vectors::{MAINNET_BLOCKS, TESTNET_BLOCKS}; use crate::{ service::finalized_state::{disk_db::DiskWriteBatch, FinalizedState}, - Config, FinalizedBlock, + CheckpointVerifiedBlock, Config, }; /// Storage round-trip test for block and transaction data in the finalized state database. @@ -112,7 +112,7 @@ fn test_block_db_round_trip_with( original_block.clone().into() } else { // Fake a zero height - FinalizedBlock::with_hash_and_height( + CheckpointVerifiedBlock::with_hash_and_height( original_block.clone(), original_block.hash(), Height(0), diff --git a/zebra-state/src/service/finalized_state/zebra_db/chain.rs b/zebra-state/src/service/finalized_state/zebra_db/chain.rs index a9a63cb3fbf..b8db8d717d3 100644 --- a/zebra-state/src/service/finalized_state/zebra_db/chain.rs +++ b/zebra-state/src/service/finalized_state/zebra_db/chain.rs @@ -24,7 +24,7 @@ use crate::{ service::finalized_state::{ disk_db::{DiskDb, DiskWriteBatch, ReadDisk, WriteDisk}, zebra_db::ZebraDb, - FinalizedBlock, + CheckpointVerifiedBlock, }, BoxError, }; @@ -70,12 +70,12 @@ impl DiskWriteBatch { pub fn prepare_history_batch( &mut self, db: &DiskDb, - finalized: &FinalizedBlock, + finalized: &CheckpointVerifiedBlock, history_tree: Arc, ) -> Result<(), BoxError> { let history_tree_cf = db.cf_handle("history_tree").unwrap(); - let FinalizedBlock { height, .. } = finalized; + let CheckpointVerifiedBlock { height, .. } = finalized; // Update the tree in state let current_tip_height = *height - 1; @@ -108,13 +108,13 @@ impl DiskWriteBatch { pub fn prepare_chain_value_pools_batch( &mut self, db: &DiskDb, - finalized: &FinalizedBlock, + finalized: &CheckpointVerifiedBlock, utxos_spent_by_block: HashMap, value_pool: ValueBalance, ) -> Result<(), BoxError> { let tip_chain_value_pool = db.cf_handle("tip_chain_value_pool").unwrap(); - let FinalizedBlock { block, .. } = finalized; + let CheckpointVerifiedBlock { block, .. } = finalized; let new_pool = value_pool.add_block(block.borrow(), &utxos_spent_by_block)?; self.zs_insert(&tip_chain_value_pool, (), new_pool); diff --git a/zebra-state/src/service/finalized_state/zebra_db/shielded.rs b/zebra-state/src/service/finalized_state/zebra_db/shielded.rs index 42803585e49..1e6b2fdf6c5 100644 --- a/zebra-state/src/service/finalized_state/zebra_db/shielded.rs +++ b/zebra-state/src/service/finalized_state/zebra_db/shielded.rs @@ -23,7 +23,7 @@ use crate::{ service::finalized_state::{ disk_db::{DiskDb, DiskWriteBatch, ReadDisk, WriteDisk}, zebra_db::ZebraDb, - FinalizedBlock, + CheckpointVerifiedBlock, }, BoxError, }; @@ -179,9 +179,9 @@ impl DiskWriteBatch { pub fn prepare_shielded_transaction_batch( &mut self, db: &DiskDb, - finalized: &FinalizedBlock, + finalized: &CheckpointVerifiedBlock, ) -> Result<(), BoxError> { - let FinalizedBlock { block, .. } = finalized; + let CheckpointVerifiedBlock { block, .. } = finalized; // Index each transaction's shielded data for transaction in &block.transactions { @@ -234,7 +234,7 @@ impl DiskWriteBatch { pub fn prepare_note_commitment_batch( &mut self, db: &DiskDb, - finalized: &FinalizedBlock, + finalized: &CheckpointVerifiedBlock, note_commitment_trees: NoteCommitmentTrees, history_tree: Arc, ) -> Result<(), BoxError> { @@ -246,7 +246,7 @@ impl DiskWriteBatch { let sapling_note_commitment_tree_cf = db.cf_handle("sapling_note_commitment_tree").unwrap(); let orchard_note_commitment_tree_cf = db.cf_handle("orchard_note_commitment_tree").unwrap(); - let FinalizedBlock { height, .. } = finalized; + let CheckpointVerifiedBlock { height, .. } = finalized; // Use the cached values that were previously calculated in parallel. let sprout_root = note_commitment_trees.sprout.root(); @@ -297,13 +297,13 @@ impl DiskWriteBatch { pub fn prepare_genesis_note_commitment_tree_batch( &mut self, db: &DiskDb, - finalized: &FinalizedBlock, + finalized: &CheckpointVerifiedBlock, ) { let sprout_note_commitment_tree_cf = db.cf_handle("sprout_note_commitment_tree").unwrap(); let sapling_note_commitment_tree_cf = db.cf_handle("sapling_note_commitment_tree").unwrap(); let orchard_note_commitment_tree_cf = db.cf_handle("orchard_note_commitment_tree").unwrap(); - let FinalizedBlock { height, .. } = finalized; + let CheckpointVerifiedBlock { height, .. } = finalized; // Insert empty note commitment trees. Note that these can't be // used too early (e.g. the Orchard tree before Nu5 activates) diff --git a/zebra-state/src/service/finalized_state/zebra_db/transparent.rs b/zebra-state/src/service/finalized_state/zebra_db/transparent.rs index 6e2ac9808b4..91509631d26 100644 --- a/zebra-state/src/service/finalized_state/zebra_db/transparent.rs +++ b/zebra-state/src/service/finalized_state/zebra_db/transparent.rs @@ -35,7 +35,7 @@ use crate::{ }, zebra_db::ZebraDb, }, - BoxError, FinalizedBlock, + BoxError, CheckpointVerifiedBlock, }; impl ZebraDb { @@ -369,13 +369,13 @@ impl DiskWriteBatch { pub fn prepare_transparent_transaction_batch( &mut self, db: &DiskDb, - finalized: &FinalizedBlock, + finalized: &CheckpointVerifiedBlock, new_outputs_by_out_loc: &BTreeMap, spent_utxos_by_outpoint: &HashMap, spent_utxos_by_out_loc: &BTreeMap, mut address_balances: HashMap, ) -> Result<(), BoxError> { - let FinalizedBlock { block, height, .. } = finalized; + let CheckpointVerifiedBlock { block, height, .. } = finalized; // Update created and spent transparent outputs self.prepare_new_transparent_outputs_batch( diff --git a/zebra-state/src/service/non_finalized_state.rs b/zebra-state/src/service/non_finalized_state.rs index 3b036ae57b9..ffc6c41b897 100644 --- a/zebra-state/src/service/non_finalized_state.rs +++ b/zebra-state/src/service/non_finalized_state.rs @@ -16,9 +16,9 @@ use zebra_chain::{ use crate::{ constants::MAX_NON_FINALIZED_CHAIN_FORKS, - request::{ContextuallyValidBlock, FinalizedWithTrees}, + request::{ContextuallyVerifiedBlock, ContextuallyVerifiedBlockWithTrees}, service::{check, finalized_state::ZebraDb}, - PreparedBlock, ValidateContextError, + SemanticallyVerifiedBlock, ValidateContextError, }; mod chain; @@ -174,7 +174,7 @@ impl NonFinalizedState { /// Finalize the lowest height block in the non-finalized portion of the best /// chain and update all side-chains to match. - pub fn finalize(&mut self) -> FinalizedWithTrees { + pub fn finalize(&mut self) -> ContextuallyVerifiedBlockWithTrees { // Chain::cmp uses the partial cumulative work, and the hash of the tip block. // Neither of these fields has interior mutability. // (And when the tip block is dropped for a chain, the chain is also dropped.) @@ -226,7 +226,7 @@ impl NonFinalizedState { self.update_metrics_for_chains(); // Add the treestate to the finalized block. - FinalizedWithTrees::new(best_chain_root, root_treestate) + ContextuallyVerifiedBlockWithTrees::new(best_chain_root, root_treestate) } /// Commit block to the non-finalized state, on top of: @@ -235,7 +235,7 @@ impl NonFinalizedState { #[tracing::instrument(level = "debug", skip(self, finalized_state, prepared))] pub fn commit_block( &mut self, - prepared: PreparedBlock, + prepared: SemanticallyVerifiedBlock, finalized_state: &ZebraDb, ) -> Result<(), ValidateContextError> { let parent_hash = prepared.block.header.previous_block_hash; @@ -266,7 +266,7 @@ impl NonFinalizedState { #[allow(clippy::unwrap_in_result)] pub fn commit_new_chain( &mut self, - prepared: PreparedBlock, + prepared: SemanticallyVerifiedBlock, finalized_state: &ZebraDb, ) -> Result<(), ValidateContextError> { let finalized_tip_height = finalized_state.finalized_tip_height(); @@ -308,7 +308,7 @@ impl NonFinalizedState { fn validate_and_commit( &self, new_chain: Arc, - prepared: PreparedBlock, + prepared: SemanticallyVerifiedBlock, finalized_state: &ZebraDb, ) -> Result, ValidateContextError> { // Reads from disk @@ -336,7 +336,7 @@ impl NonFinalizedState { ); // Quick check that doesn't read from disk - let contextual = ContextuallyValidBlock::with_block_and_spent_utxos( + let contextual = ContextuallyVerifiedBlock::with_block_and_spent_utxos( prepared.clone(), spent_utxos.clone(), ) @@ -358,7 +358,7 @@ impl NonFinalizedState { #[tracing::instrument(skip(new_chain, sprout_final_treestates))] fn validate_and_update_parallel( new_chain: Arc, - contextual: ContextuallyValidBlock, + contextual: ContextuallyVerifiedBlock, sprout_final_treestates: HashMap>, ) -> Result, ValidateContextError> { let mut block_commitment_result = None; @@ -489,7 +489,7 @@ impl NonFinalizedState { /// Returns the block at the tip of the best chain. #[allow(dead_code)] - pub fn best_tip_block(&self) -> Option<&ContextuallyValidBlock> { + pub fn best_tip_block(&self) -> Option<&ContextuallyVerifiedBlock> { let best_chain = self.best_chain()?; best_chain.tip_block() diff --git a/zebra-state/src/service/non_finalized_state/chain.rs b/zebra-state/src/service/non_finalized_state/chain.rs index 3913fc4d669..2986919b755 100644 --- a/zebra-state/src/service/non_finalized_state/chain.rs +++ b/zebra-state/src/service/non_finalized_state/chain.rs @@ -28,7 +28,7 @@ use zebra_chain::{ }; use crate::{ - request::Treestate, service::check, ContextuallyValidBlock, HashOrHeight, OutputLocation, + request::Treestate, service::check, ContextuallyVerifiedBlock, HashOrHeight, OutputLocation, TransactionLocation, ValidateContextError, }; @@ -50,7 +50,7 @@ pub struct Chain { // Blocks, heights, hashes, and transaction locations // /// The contextually valid blocks which form this non-finalized partial chain, in height order. - pub(crate) blocks: BTreeMap, + pub(crate) blocks: BTreeMap, /// An index of block heights for each block hash in `blocks`. pub height_by_hash: HashMap, @@ -318,10 +318,10 @@ impl Chain { /// /// If the block is invalid, drops this chain, and returns an error. /// - /// Note: a [`ContextuallyValidBlock`] isn't actually contextually valid until + /// Note: a [`ContextuallyVerifiedBlock`] isn't actually contextually valid until /// [`Self::update_chain_tip_with`] returns success. #[instrument(level = "debug", skip(self, block), fields(block = %block.block))] - pub fn push(mut self, block: ContextuallyValidBlock) -> Result { + pub fn push(mut self, block: ContextuallyVerifiedBlock) -> Result { // update cumulative data members self.update_chain_tip_with(&block)?; @@ -334,7 +334,7 @@ impl Chain { /// Pops the lowest height block of the non-finalized portion of a chain, /// and returns it with its associated treestate. #[instrument(level = "debug", skip(self))] - pub(crate) fn pop_root(&mut self) -> (ContextuallyValidBlock, Treestate) { + pub(crate) fn pop_root(&mut self) -> (ContextuallyVerifiedBlock, Treestate) { // Obtain the lowest height. let block_height = self.non_finalized_root_height(); @@ -388,9 +388,9 @@ impl Chain { self.network } - /// Returns the [`ContextuallyValidBlock`] with [`block::Hash`] or + /// Returns the [`ContextuallyVerifiedBlock`] with [`block::Hash`] or /// [`Height`](zebra_chain::block::Height), if it exists in this chain. - pub fn block(&self, hash_or_height: HashOrHeight) -> Option<&ContextuallyValidBlock> { + pub fn block(&self, hash_or_height: HashOrHeight) -> Option<&ContextuallyVerifiedBlock> { let height = hash_or_height.height_or_else(|hash| self.height_by_hash.get(&hash).cloned())?; @@ -969,7 +969,7 @@ impl Chain { /// Return the non-finalized tip block for this chain, /// or `None` if `self.blocks` is empty. - pub fn tip_block(&self) -> Option<&ContextuallyValidBlock> { + pub fn tip_block(&self) -> Option<&ContextuallyVerifiedBlock> { self.blocks.values().next_back() } @@ -1123,12 +1123,12 @@ impl Chain { /// Update the chain tip with the `contextually_valid` block, /// running note commitment tree updates in parallel with other updates. /// - /// Used to implement `update_chain_tip_with::`. + /// Used to implement `update_chain_tip_with::`. #[instrument(skip(self, contextually_valid), fields(block = %contextually_valid.block))] #[allow(clippy::unwrap_in_result)] fn update_chain_tip_with_block_parallel( &mut self, - contextually_valid: &ContextuallyValidBlock, + contextually_valid: &ContextuallyVerifiedBlock, ) -> Result<(), ValidateContextError> { let height = contextually_valid.height; @@ -1186,12 +1186,12 @@ impl Chain { /// Update the chain tip with the `contextually_valid` block, /// except for the note commitment and history tree updates. /// - /// Used to implement `update_chain_tip_with::`. + /// Used to implement `update_chain_tip_with::`. #[instrument(skip(self, contextually_valid), fields(block = %contextually_valid.block))] #[allow(clippy::unwrap_in_result)] fn update_chain_tip_with_block_except_trees( &mut self, - contextually_valid: &ContextuallyValidBlock, + contextually_valid: &ContextuallyVerifiedBlock, ) -> Result<(), ValidateContextError> { let ( block, @@ -1327,12 +1327,12 @@ trait UpdateWith { fn revert_chain_with(&mut self, _: &T, position: RevertPosition); } -impl UpdateWith for Chain { +impl UpdateWith for Chain { #[instrument(skip(self, contextually_valid), fields(block = %contextually_valid.block))] #[allow(clippy::unwrap_in_result)] fn update_chain_tip_with( &mut self, - contextually_valid: &ContextuallyValidBlock, + contextually_valid: &ContextuallyVerifiedBlock, ) -> Result<(), ValidateContextError> { self.update_chain_tip_with_block_parallel(contextually_valid) } @@ -1340,7 +1340,7 @@ impl UpdateWith for Chain { #[instrument(skip(self, contextually_valid), fields(block = %contextually_valid.block))] fn revert_chain_with( &mut self, - contextually_valid: &ContextuallyValidBlock, + contextually_valid: &ContextuallyVerifiedBlock, position: RevertPosition, ) { let ( diff --git a/zebra-state/src/service/non_finalized_state/tests/prop.rs b/zebra-state/src/service/non_finalized_state/tests/prop.rs index 32bd8e300cf..fcf49f49b3c 100644 --- a/zebra-state/src/service/non_finalized_state/tests/prop.rs +++ b/zebra-state/src/service/non_finalized_state/tests/prop.rs @@ -17,7 +17,7 @@ use zebra_chain::{ use crate::{ arbitrary::Prepare, - request::ContextuallyValidBlock, + request::ContextuallyVerifiedBlock, service::{ arbitrary::PreparedChain, finalized_state::FinalizedState, @@ -55,7 +55,7 @@ fn push_genesis_chain() -> Result<()> { for block in chain.iter().take(count).cloned() { let block = - ContextuallyValidBlock::with_block_and_spent_utxos( + ContextuallyVerifiedBlock::with_block_and_spent_utxos( block, only_chain.unspent_utxos(), ) @@ -104,7 +104,7 @@ fn push_history_tree_chain() -> Result<()> { for block in chain .iter() .take(count) - .map(ContextuallyValidBlock::test_with_zero_chain_pool_change) { + .map(ContextuallyVerifiedBlock::test_with_zero_chain_pool_change) { only_chain = only_chain.push(block)?; } @@ -151,7 +151,7 @@ fn forked_equals_pushed_genesis() -> Result<()> { ValueBalance::zero(), ); for block in chain.iter().take(fork_at_count).cloned() { - let block = ContextuallyValidBlock::with_block_and_spent_utxos( + let block = ContextuallyVerifiedBlock::with_block_and_spent_utxos( block, partial_chain.unspent_utxos(), )?; @@ -172,7 +172,7 @@ fn forked_equals_pushed_genesis() -> Result<()> { ); for block in chain.iter().cloned() { let block = - ContextuallyValidBlock::with_block_and_spent_utxos(block, full_chain.unspent_utxos())?; + ContextuallyVerifiedBlock::with_block_and_spent_utxos(block, full_chain.unspent_utxos())?; full_chain = full_chain .push(block.clone()) .expect("full chain push is valid"); @@ -216,7 +216,7 @@ fn forked_equals_pushed_genesis() -> Result<()> { // same original full chain. for block in chain.iter().skip(fork_at_count).cloned() { let block = - ContextuallyValidBlock::with_block_and_spent_utxos(block, forked.unspent_utxos())?; + ContextuallyVerifiedBlock::with_block_and_spent_utxos(block, forked.unspent_utxos())?; forked = forked.push(block).expect("forked chain push is valid"); } @@ -256,13 +256,13 @@ fn forked_equals_pushed_history_tree() -> Result<()> { for block in chain .iter() .take(fork_at_count) - .map(ContextuallyValidBlock::test_with_zero_chain_pool_change) { + .map(ContextuallyVerifiedBlock::test_with_zero_chain_pool_change) { partial_chain = partial_chain.push(block)?; } for block in chain .iter() - .map(ContextuallyValidBlock::test_with_zero_chain_pool_change) { + .map(ContextuallyVerifiedBlock::test_with_zero_chain_pool_change) { full_chain = full_chain.push(block.clone())?; } @@ -279,7 +279,7 @@ fn forked_equals_pushed_history_tree() -> Result<()> { for block in chain .iter() .skip(fork_at_count) - .map(ContextuallyValidBlock::test_with_zero_chain_pool_change) { + .map(ContextuallyVerifiedBlock::test_with_zero_chain_pool_change) { forked = forked.push(block)?; } @@ -310,7 +310,7 @@ fn finalized_equals_pushed_genesis() -> Result<()> { // TODO: fix this test or the code so the full_chain temporary trees aren't overwritten let chain = chain.iter() .filter(|block| block.height != Height(0)) - .map(ContextuallyValidBlock::test_with_zero_spent_utxos); + .map(ContextuallyVerifiedBlock::test_with_zero_spent_utxos); // use `end_count` as the number of non-finalized blocks at the end of the chain, // make sure this test pushes at least 1 block in the partial chain. @@ -399,7 +399,7 @@ fn finalized_equals_pushed_history_tree() -> Result<()> { for block in chain .iter() .take(finalized_count) - .map(ContextuallyValidBlock::test_with_zero_spent_utxos) { + .map(ContextuallyVerifiedBlock::test_with_zero_spent_utxos) { full_chain = full_chain.push(block)?; } @@ -416,14 +416,14 @@ fn finalized_equals_pushed_history_tree() -> Result<()> { for block in chain .iter() .skip(finalized_count) - .map(ContextuallyValidBlock::test_with_zero_spent_utxos) { + .map(ContextuallyVerifiedBlock::test_with_zero_spent_utxos) { partial_chain = partial_chain.push(block.clone())?; } for block in chain .iter() .skip(finalized_count) - .map(ContextuallyValidBlock::test_with_zero_spent_utxos) { + .map(ContextuallyVerifiedBlock::test_with_zero_spent_utxos) { full_chain= full_chain.push(block.clone())?; } diff --git a/zebra-state/src/service/non_finalized_state/tests/vectors.rs b/zebra-state/src/service/non_finalized_state/tests/vectors.rs index 23242fabfc3..a8e61e7c060 100644 --- a/zebra-state/src/service/non_finalized_state/tests/vectors.rs +++ b/zebra-state/src/service/non_finalized_state/tests/vectors.rs @@ -214,11 +214,11 @@ fn finalize_pops_from_best_chain_for_network(network: Network) -> Result<()> { state.commit_block(child.prepare(), &finalized_state)?; let finalized_with_trees = state.finalize(); - let finalized = finalized_with_trees.finalized; + let finalized = finalized_with_trees.checkpoint_verified; assert_eq!(block1, finalized.block); let finalized_with_trees = state.finalize(); - let finalized = finalized_with_trees.finalized; + let finalized = finalized_with_trees.checkpoint_verified; assert_eq!(block2, finalized.block); assert!(state.best_chain().is_none()); diff --git a/zebra-state/src/service/queued_blocks.rs b/zebra-state/src/service/queued_blocks.rs index 7343208570c..9f350ea2c05 100644 --- a/zebra-state/src/service/queued_blocks.rs +++ b/zebra-state/src/service/queued_blocks.rs @@ -10,20 +10,20 @@ use tracing::instrument; use zebra_chain::{block, transparent}; -use crate::{BoxError, FinalizedBlock, PreparedBlock}; +use crate::{BoxError, CheckpointVerifiedBlock, SemanticallyVerifiedBlock}; #[cfg(test)] mod tests; /// A queued finalized block, and its corresponding [`Result`] channel. pub type QueuedFinalized = ( - FinalizedBlock, + CheckpointVerifiedBlock, oneshot::Sender>, ); /// A queued non-finalized block, and its corresponding [`Result`] channel. pub type QueuedNonFinalized = ( - PreparedBlock, + SemanticallyVerifiedBlock, oneshot::Sender>, ); @@ -242,7 +242,7 @@ impl SentHashes { /// /// Assumes that blocks are added in the order of their height between `finish_batch` calls /// for efficient pruning. - pub fn add(&mut self, block: &PreparedBlock) { + pub fn add(&mut self, block: &SemanticallyVerifiedBlock) { // Track known UTXOs in sent blocks. let outpoints = block .new_outputs @@ -271,7 +271,7 @@ impl SentHashes { /// for efficient pruning. /// /// For more details see `add()`. - pub fn add_finalized(&mut self, block: &FinalizedBlock) { + pub fn add_finalized(&mut self, block: &CheckpointVerifiedBlock) { // Track known UTXOs in sent blocks. let outpoints = block .new_outputs diff --git a/zebra-state/src/service/tests.rs b/zebra-state/src/service/tests.rs index 4723e9b6856..aed292313f4 100644 --- a/zebra-state/src/service/tests.rs +++ b/zebra-state/src/service/tests.rs @@ -23,7 +23,7 @@ use crate::{ init_test, service::{arbitrary::populated_state, chain_tip::TipAction, StateService}, tests::setup::{partial_nu5_chain_strategy, transaction_v4_from_coinbase}, - BoxError, Config, FinalizedBlock, PreparedBlock, Request, Response, + BoxError, CheckpointVerifiedBlock, Config, Request, Response, SemanticallyVerifiedBlock, }; const LAST_BLOCK_HEIGHT: u32 = 10; @@ -216,7 +216,7 @@ async fn empty_state_still_responds_to_requests() -> Result<()> { zebra_test::vectors::BLOCK_MAINNET_419200_BYTES.zcash_deserialize_into::>()?; let iter = vec![ - // No checks for CommitBlock or CommitFinalizedBlock because empty state + // No checks for SemanticallyVerifiedBlock or CommitCheckpointVerifiedBlock because empty state // precondition doesn't matter to them (Request::Depth(block.hash()), Ok(Response::Depth(None))), (Request::Tip, Ok(Response::Tip(None))), @@ -555,8 +555,8 @@ proptest! { fn continuous_empty_blocks_from_test_vectors() -> impl Strategy< Value = ( Network, - SummaryDebug>, - SummaryDebug>, + SummaryDebug>, + SummaryDebug>, ), > { any::() @@ -567,7 +567,7 @@ fn continuous_empty_blocks_from_test_vectors() -> impl Strategy< Network::Testnet => &*zebra_test::vectors::CONTINUOUS_TESTNET_BLOCKS, }; - // Transform the test vector's block bytes into a vector of `PreparedBlock`s. + // Transform the test vector's block bytes into a vector of `SemanticallyVerifiedBlock`s. let blocks: Vec<_> = raw_blocks .iter() .map(|(_height, &block_bytes)| { @@ -591,7 +591,7 @@ fn continuous_empty_blocks_from_test_vectors() -> impl Strategy< let non_finalized_blocks = blocks.split_off(finalized_blocks_count); let finalized_blocks: Vec<_> = blocks .into_iter() - .map(|prepared_block| FinalizedBlock::from(prepared_block.block)) + .map(|prepared_block| CheckpointVerifiedBlock::from(prepared_block.block)) .collect(); ( diff --git a/zebra-state/src/service/write.rs b/zebra-state/src/service/write.rs index ab7b466ce8a..74d6de14400 100644 --- a/zebra-state/src/service/write.rs +++ b/zebra-state/src/service/write.rs @@ -20,7 +20,7 @@ use crate::{ queued_blocks::{QueuedFinalized, QueuedNonFinalized}, BoxError, ChainTipBlock, ChainTipSender, CloneError, }, - CommitBlockError, PreparedBlock, + CommitSemanticallyVerifiedError, SemanticallyVerifiedBlock, }; // These types are used in doc links @@ -49,8 +49,8 @@ const PARENT_ERROR_MAP_LIMIT: usize = MAX_BLOCK_REORG_HEIGHT as usize * 2; pub(crate) fn validate_and_commit_non_finalized( finalized_state: &ZebraDb, non_finalized_state: &mut NonFinalizedState, - prepared: PreparedBlock, -) -> Result<(), CommitBlockError> { + prepared: SemanticallyVerifiedBlock, +) -> Result<(), CommitSemanticallyVerifiedError> { check::initial_contextual_validity(finalized_state, non_finalized_state, &prepared)?; let parent_hash = prepared.block.header.previous_block_hash; @@ -288,9 +288,9 @@ pub fn write_blocks_from_channels( while non_finalized_state.best_chain_len() > MAX_BLOCK_REORG_HEIGHT { tracing::trace!("finalizing block past the reorg limit"); - let finalized_with_trees = non_finalized_state.finalize(); + let contextually_verified_with_trees = non_finalized_state.finalize(); finalized_state - .commit_finalized_direct(finalized_with_trees, "best non-finalized chain root") + .commit_finalized_direct(contextually_verified_with_trees, "commit contextually-verified request") .expect( "unexpected finalized block commit error: note commitment and history trees were already checked by the non-finalized state", ); diff --git a/zebra-state/src/tests/setup.rs b/zebra-state/src/tests/setup.rs index e84ef6b975d..7316b12a284 100644 --- a/zebra-state/src/tests/setup.rs +++ b/zebra-state/src/tests/setup.rs @@ -18,7 +18,7 @@ use crate::{ service::{ check, finalized_state::FinalizedState, non_finalized_state::NonFinalizedState, read, }, - Config, FinalizedBlock, + CheckpointVerifiedBlock, Config, }; /// Generate a chain that allows us to make tests for the legacy chain rules. @@ -83,8 +83,8 @@ pub(crate) fn partial_nu5_chain_strategy( /// Return a new `StateService` containing the mainnet genesis block. /// Also returns the finalized genesis block itself. -pub(crate) fn new_state_with_mainnet_genesis() -> (FinalizedState, NonFinalizedState, FinalizedBlock) -{ +pub(crate) fn new_state_with_mainnet_genesis( +) -> (FinalizedState, NonFinalizedState, CheckpointVerifiedBlock) { let genesis = zebra_test::vectors::BLOCK_MAINNET_GENESIS_BYTES .zcash_deserialize_into::>() .expect("block should deserialize"); @@ -105,7 +105,7 @@ pub(crate) fn new_state_with_mainnet_genesis() -> (FinalizedState, NonFinalizedS read::best_tip(&non_finalized_state, &finalized_state.db) ); - let genesis = FinalizedBlock::from(genesis); + let genesis = CheckpointVerifiedBlock::from(genesis); finalized_state .commit_finalized_direct(genesis.clone().into(), "test") .expect("unexpected invalid genesis block test vector"); diff --git a/zebra-state/tests/basic.rs b/zebra-state/tests/basic.rs index 9aebfeb775e..638ab0f1a41 100644 --- a/zebra-state/tests/basic.rs +++ b/zebra-state/tests/basic.rs @@ -25,7 +25,7 @@ static COMMIT_FINALIZED_BLOCK_MAINNET: Lazy< let hash = block.hash(); vec![ ( - Request::CommitFinalizedBlock(block.into()), + Request::CommitCheckpointVerifiedBlock(block.into()), Ok(Response::Committed(hash)), ), ( @@ -46,7 +46,7 @@ static COMMIT_FINALIZED_BLOCK_TESTNET: Lazy< let hash = block.hash(); vec![ ( - Request::CommitFinalizedBlock(block.into()), + Request::CommitCheckpointVerifiedBlock(block.into()), Ok(Response::Committed(hash)), ), ( diff --git a/zebrad/src/commands/copy_state.rs b/zebrad/src/commands/copy_state.rs index 2466ed7b2e4..11d024d2c73 100644 --- a/zebrad/src/commands/copy_state.rs +++ b/zebrad/src/commands/copy_state.rs @@ -230,7 +230,7 @@ impl CopyStateCmd { let target_block_commit_hash = target_state .ready() .await? - .call(new_zs::Request::CommitFinalizedBlock( + .call(new_zs::Request::CommitCheckpointVerifiedBlock( source_block.clone().into(), )) .await?; @@ -240,7 +240,7 @@ impl CopyStateCmd { target_block_commit_hash } response => Err(format!( - "unexpected response to CommitFinalizedBlock request, height: {height}\n \ + "unexpected response to CommitCheckpointVerifiedBlock request, height: {height}\n \ response: {response:?}", ))?, }; diff --git a/zebrad/src/commands/start.rs b/zebrad/src/commands/start.rs index ab57713a3cc..07a8b5b19d8 100644 --- a/zebrad/src/commands/start.rs +++ b/zebrad/src/commands/start.rs @@ -75,7 +75,7 @@ use tokio::{pin, select, sync::oneshot}; use tower::{builder::ServiceBuilder, util::BoxService}; use tracing_futures::Instrument; -use zebra_consensus::chain::BackgroundTaskHandles; +use zebra_consensus::router::BackgroundTaskHandles; use zebra_rpc::server::RpcServer; use crate::{ @@ -104,7 +104,7 @@ impl StartCmd { let config = app_config().clone(); info!("initializing node state"); - let (_, max_checkpoint_height) = zebra_consensus::chain::init_checkpoint_list( + let (_, max_checkpoint_height) = zebra_consensus::router::init_checkpoint_list( config.consensus.clone(), config.network.network, ); @@ -147,8 +147,8 @@ impl StartCmd { .await; info!("initializing verifiers"); - let (chain_verifier, tx_verifier, consensus_task_handles, max_checkpoint_height) = - zebra_consensus::chain::init( + let (router_verifier, tx_verifier, consensus_task_handles, max_checkpoint_height) = + zebra_consensus::router::init( config.consensus.clone(), config.network.network, state.clone(), @@ -161,7 +161,7 @@ impl StartCmd { &config, max_checkpoint_height, peer_set.clone(), - chain_verifier.clone(), + router_verifier.clone(), state.clone(), latest_chain_tip.clone(), ); @@ -186,7 +186,7 @@ impl StartCmd { let setup_data = InboundSetupData { address_book: address_book.clone(), block_download_peer_set: peer_set.clone(), - block_verifier: chain_verifier.clone(), + block_verifier: router_verifier.clone(), mempool: mempool.clone(), state, latest_chain_tip: latest_chain_tip.clone(), @@ -207,7 +207,7 @@ impl StartCmd { app_version(), mempool.clone(), read_only_state_service, - chain_verifier, + router_verifier, sync_status.clone(), address_book, latest_chain_tip.clone(), diff --git a/zebrad/src/components/inbound.rs b/zebrad/src/components/inbound.rs index f3029ec5d2e..d7c9ca08485 100644 --- a/zebrad/src/components/inbound.rs +++ b/zebrad/src/components/inbound.rs @@ -29,7 +29,7 @@ use zebra_chain::{ serialization::ZcashSerialize, transaction::UnminedTxId, }; -use zebra_consensus::chain::VerifyChainError; +use zebra_consensus::router::RouterError; use zebra_network::{ constants::{ADDR_RESPONSE_LIMIT_DENOMINATOR, MAX_ADDRS_IN_MESSAGE}, AddressBook, InventoryResponse, @@ -73,12 +73,12 @@ type BlockDownloadPeerSet = Buffer, zn::Request>; type State = Buffer, zs::Request>; type Mempool = Buffer, mempool::Request>; -type BlockVerifier = Buffer< - BoxService, +type SemanticBlockVerifier = Buffer< + BoxService, zebra_consensus::Request, >; type GossipedBlockDownloads = - BlockDownloads, Timeout, State>; + BlockDownloads, Timeout, State>; /// The services used by the [`Inbound`] service. pub struct InboundSetupData { @@ -91,7 +91,7 @@ pub struct InboundSetupData { /// A service that verifies downloaded blocks. /// /// Given to `Inbound.block_downloads` after the required services are set up. - pub block_verifier: BlockVerifier, + pub block_verifier: SemanticBlockVerifier, /// A service that manages transactions in the memory pool. pub mempool: Mempool, diff --git a/zebrad/src/components/inbound/tests/fake_peer_set.rs b/zebrad/src/components/inbound/tests/fake_peer_set.rs index be7154198bc..1c0bdd34973 100644 --- a/zebrad/src/components/inbound/tests/fake_peer_set.rs +++ b/zebrad/src/components/inbound/tests/fake_peer_set.rs @@ -410,7 +410,7 @@ async fn mempool_transaction_expiration() -> Result<(), crate::BoxError> { .unwrap(); state_service .clone() - .oneshot(zebra_state::Request::CommitFinalizedBlock( + .oneshot(zebra_state::Request::CommitCheckpointVerifiedBlock( block_two.clone().into(), )) .await @@ -483,7 +483,7 @@ async fn mempool_transaction_expiration() -> Result<(), crate::BoxError> { .unwrap(); state_service .clone() - .oneshot(zebra_state::Request::CommitFinalizedBlock( + .oneshot(zebra_state::Request::CommitCheckpointVerifiedBlock( block_three.clone().into(), )) .await @@ -591,7 +591,7 @@ async fn mempool_transaction_expiration() -> Result<(), crate::BoxError> { for block in more_blocks { state_service .clone() - .oneshot(zebra_state::Request::CommitFinalizedBlock( + .oneshot(zebra_state::Request::CommitCheckpointVerifiedBlock( block.clone().into(), )) .await @@ -784,7 +784,7 @@ async fn setup( // Download task panics and timeouts are propagated to the tests that use Groth16 verifiers. let (block_verifier, _transaction_verifier, _groth16_download_handle, _max_checkpoint_height) = - zebra_consensus::chain::init( + zebra_consensus::router::init( consensus_config.clone(), network, state_service.clone(), @@ -812,7 +812,7 @@ async fn setup( .ready() .await .unwrap() - .call(zebra_state::Request::CommitFinalizedBlock( + .call(zebra_state::Request::CommitCheckpointVerifiedBlock( genesis_block.clone().into(), )) .await @@ -842,7 +842,7 @@ async fn setup( .unwrap(); state_service .clone() - .oneshot(zebra_state::Request::CommitFinalizedBlock( + .oneshot(zebra_state::Request::CommitCheckpointVerifiedBlock( block_one.clone().into(), )) .await diff --git a/zebrad/src/components/inbound/tests/real_peer_set.rs b/zebrad/src/components/inbound/tests/real_peer_set.rs index 6f9df6aa4e3..20bedc665da 100644 --- a/zebrad/src/components/inbound/tests/real_peer_set.rs +++ b/zebrad/src/components/inbound/tests/real_peer_set.rs @@ -18,7 +18,7 @@ use zebra_chain::{ serialization::ZcashDeserializeInto, transaction::{AuthDigest, Hash as TxHash, Transaction, UnminedTx, UnminedTxId, WtxId}, }; -use zebra_consensus::{chain::VerifyChainError, error::TransactionError, transaction}; +use zebra_consensus::{error::TransactionError, router::RouterError, transaction}; use zebra_network::{ canonical_peer_addr, connect_isolated_tcp_direct_with_inbound, types::InventoryHash, Config as NetworkConfig, InventoryResponse, PeerError, Request, Response, SharedPeerError, @@ -609,7 +609,7 @@ async fn setup( Buffer, mempool::Request>, Buffer, zebra_state::Request>, // mocked services - MockService, + MockService, MockService, // real tasks JoinHandle>, diff --git a/zebrad/src/components/mempool/tests/prop.rs b/zebrad/src/components/mempool/tests/prop.rs index 41523013a6b..46e85b35e14 100644 --- a/zebrad/src/components/mempool/tests/prop.rs +++ b/zebrad/src/components/mempool/tests/prop.rs @@ -20,7 +20,7 @@ use zebra_consensus::{error::TransactionError, transaction as tx}; use zebra_network as zn; use zebra_state::{self as zs, ChainTipBlock, ChainTipSender}; use zebra_test::mock_service::{MockService, PropTestAssertion}; -use zs::FinalizedBlock; +use zs::CheckpointVerifiedBlock; use crate::components::{ mempool::{config::Config, Mempool}, @@ -239,7 +239,7 @@ proptest! { fn genesis_chain_tip() -> Option { zebra_test::vectors::BLOCK_MAINNET_GENESIS_BYTES .zcash_deserialize_into::>() - .map(FinalizedBlock::from) + .map(CheckpointVerifiedBlock::from) .map(ChainTipBlock::from) .ok() } @@ -247,7 +247,7 @@ fn genesis_chain_tip() -> Option { fn block1_chain_tip() -> Option { zebra_test::vectors::BLOCK_MAINNET_1_BYTES .zcash_deserialize_into::>() - .map(FinalizedBlock::from) + .map(CheckpointVerifiedBlock::from) .map(ChainTipBlock::from) .ok() } diff --git a/zebrad/src/components/mempool/tests/vector.rs b/zebrad/src/components/mempool/tests/vector.rs index 4dbed9426bb..60e70674309 100644 --- a/zebrad/src/components/mempool/tests/vector.rs +++ b/zebrad/src/components/mempool/tests/vector.rs @@ -412,7 +412,7 @@ async fn mempool_cancel_mined() -> Result<(), Report> { .ready() .await .unwrap() - .call(zebra_state::Request::CommitFinalizedBlock( + .call(zebra_state::Request::CommitCheckpointVerifiedBlock( block1.clone().into(), )) .await @@ -457,7 +457,7 @@ async fn mempool_cancel_mined() -> Result<(), Report> { // Push block 2 to the state state_service - .oneshot(zebra_state::Request::CommitFinalizedBlock( + .oneshot(zebra_state::Request::CommitCheckpointVerifiedBlock( block2.clone().into(), )) .await @@ -545,7 +545,7 @@ async fn mempool_cancel_downloads_after_network_upgrade() -> Result<(), Report> .ready() .await .unwrap() - .call(zebra_state::Request::CommitFinalizedBlock( + .call(zebra_state::Request::CommitCheckpointVerifiedBlock( block1.clone().into(), )) .await @@ -822,7 +822,7 @@ async fn mempool_reverifies_after_tip_change() -> Result<(), Report> { .ready() .await .unwrap() - .call(zebra_state::Request::CommitFinalizedBlock( + .call(zebra_state::Request::CommitCheckpointVerifiedBlock( block1.clone().into(), )) .await @@ -882,7 +882,7 @@ async fn mempool_reverifies_after_tip_change() -> Result<(), Report> { .ready() .await .unwrap() - .call(zebra_state::Request::CommitFinalizedBlock( + .call(zebra_state::Request::CommitCheckpointVerifiedBlock( block2.clone().into(), )) .await @@ -955,7 +955,7 @@ async fn setup( .ready() .await .unwrap() - .call(zebra_state::Request::CommitFinalizedBlock( + .call(zebra_state::Request::CommitCheckpointVerifiedBlock( genesis_block.clone().into(), )) .await diff --git a/zebrad/src/components/sync/downloads.rs b/zebrad/src/components/sync/downloads.rs index fde200f7fc8..f43d8005280 100644 --- a/zebrad/src/components/sync/downloads.rs +++ b/zebrad/src/components/sync/downloads.rs @@ -122,7 +122,7 @@ pub enum BlockDownloadVerifyError { #[error("block failed consensus validation: {error:?} {height:?} {hash:?}")] Invalid { #[source] - error: zebra_consensus::chain::VerifyChainError, + error: zebra_consensus::router::RouterError, height: block::Height, hash: block::Hash, }, @@ -543,7 +543,7 @@ where verification .map(|hash| (block_height, hash)) .map_err(|err| { - match err.downcast::() { + match err.downcast::() { Ok(error) => BlockDownloadVerifyError::Invalid { error: *error, height: block_height, hash }, Err(error) => BlockDownloadVerifyError::ValidationRequestError { error, height: block_height, hash }, } diff --git a/zebrad/src/components/sync/tests/vectors.rs b/zebrad/src/components/sync/tests/vectors.rs index 940f4c27f4d..f406d3dbfe7 100644 --- a/zebrad/src/components/sync/tests/vectors.rs +++ b/zebrad/src/components/sync/tests/vectors.rs @@ -45,7 +45,7 @@ async fn sync_blocks_ok() -> Result<(), crate::BoxError> { let ( chain_sync_future, _sync_status, - mut chain_verifier, + mut router_verifier, mut peer_set, mut state_service, _mock_chain_tip_sender, @@ -88,7 +88,7 @@ async fn sync_blocks_ok() -> Result<(), crate::BoxError> { .await .respond(zn::Response::Blocks(vec![Available(block0.clone())])); - chain_verifier + router_verifier .expect_request(zebra_consensus::Request::Commit(block0)) .await .respond(block0_hash); @@ -96,7 +96,7 @@ async fn sync_blocks_ok() -> Result<(), crate::BoxError> { // Check that nothing unexpected happened. // We expect more requests to the state service, because the syncer keeps on running. peer_set.expect_no_requests().await; - chain_verifier.expect_no_requests().await; + router_verifier.expect_no_requests().await; // State is checked for genesis again state_service @@ -144,7 +144,7 @@ async fn sync_blocks_ok() -> Result<(), crate::BoxError> { // Check that nothing unexpected happened. peer_set.expect_no_requests().await; - chain_verifier.expect_no_requests().await; + router_verifier.expect_no_requests().await; // State is checked for all non-tip blocks (blocks 1 & 2) in response order state_service @@ -174,7 +174,7 @@ async fn sync_blocks_ok() -> Result<(), crate::BoxError> { .collect(); for _ in 1..=2 { - chain_verifier + router_verifier .expect_request_that(|req| remaining_blocks.remove(&req.block().hash()).is_some()) .await .respond_with(|req| req.block().hash()); @@ -186,7 +186,7 @@ async fn sync_blocks_ok() -> Result<(), crate::BoxError> { ); // Check that nothing unexpected happened. - chain_verifier.expect_no_requests().await; + router_verifier.expect_no_requests().await; state_service.expect_no_requests().await; // ChainSync::extend_tips @@ -217,7 +217,7 @@ async fn sync_blocks_ok() -> Result<(), crate::BoxError> { } // Check that nothing unexpected happened. - chain_verifier.expect_no_requests().await; + router_verifier.expect_no_requests().await; state_service.expect_no_requests().await; // Blocks 3 & 4 are fetched in order, then verified concurrently @@ -238,7 +238,7 @@ async fn sync_blocks_ok() -> Result<(), crate::BoxError> { .collect(); for _ in 3..=4 { - chain_verifier + router_verifier .expect_request_that(|req| remaining_blocks.remove(&req.block().hash()).is_some()) .await .respond_with(|req| req.block().hash()); @@ -250,7 +250,7 @@ async fn sync_blocks_ok() -> Result<(), crate::BoxError> { ); // Check that nothing unexpected happened. - chain_verifier.expect_no_requests().await; + router_verifier.expect_no_requests().await; state_service.expect_no_requests().await; let chain_sync_result = chain_sync_task_handle.now_or_never(); @@ -272,7 +272,7 @@ async fn sync_blocks_duplicate_hashes_ok() -> Result<(), crate::BoxError> { let ( chain_sync_future, _sync_status, - mut chain_verifier, + mut router_verifier, mut peer_set, mut state_service, _mock_chain_tip_sender, @@ -315,7 +315,7 @@ async fn sync_blocks_duplicate_hashes_ok() -> Result<(), crate::BoxError> { .await .respond(zn::Response::Blocks(vec![Available(block0.clone())])); - chain_verifier + router_verifier .expect_request(zebra_consensus::Request::Commit(block0)) .await .respond(block0_hash); @@ -323,7 +323,7 @@ async fn sync_blocks_duplicate_hashes_ok() -> Result<(), crate::BoxError> { // Check that nothing unexpected happened. // We expect more requests to the state service, because the syncer keeps on running. peer_set.expect_no_requests().await; - chain_verifier.expect_no_requests().await; + router_verifier.expect_no_requests().await; // State is checked for genesis again state_service @@ -373,7 +373,7 @@ async fn sync_blocks_duplicate_hashes_ok() -> Result<(), crate::BoxError> { // Check that nothing unexpected happened. peer_set.expect_no_requests().await; - chain_verifier.expect_no_requests().await; + router_verifier.expect_no_requests().await; // State is checked for all non-tip blocks (blocks 1 & 2) in response order state_service @@ -403,7 +403,7 @@ async fn sync_blocks_duplicate_hashes_ok() -> Result<(), crate::BoxError> { .collect(); for _ in 1..=2 { - chain_verifier + router_verifier .expect_request_that(|req| remaining_blocks.remove(&req.block().hash()).is_some()) .await .respond_with(|req| req.block().hash()); @@ -415,7 +415,7 @@ async fn sync_blocks_duplicate_hashes_ok() -> Result<(), crate::BoxError> { ); // Check that nothing unexpected happened. - chain_verifier.expect_no_requests().await; + router_verifier.expect_no_requests().await; state_service.expect_no_requests().await; // ChainSync::extend_tips @@ -448,7 +448,7 @@ async fn sync_blocks_duplicate_hashes_ok() -> Result<(), crate::BoxError> { } // Check that nothing unexpected happened. - chain_verifier.expect_no_requests().await; + router_verifier.expect_no_requests().await; state_service.expect_no_requests().await; // Blocks 3 & 4 are fetched in order, then verified concurrently @@ -469,7 +469,7 @@ async fn sync_blocks_duplicate_hashes_ok() -> Result<(), crate::BoxError> { .collect(); for _ in 3..=4 { - chain_verifier + router_verifier .expect_request_that(|req| remaining_blocks.remove(&req.block().hash()).is_some()) .await .respond_with(|req| req.block().hash()); @@ -481,7 +481,7 @@ async fn sync_blocks_duplicate_hashes_ok() -> Result<(), crate::BoxError> { ); // Check that nothing unexpected happened. - chain_verifier.expect_no_requests().await; + router_verifier.expect_no_requests().await; state_service.expect_no_requests().await; let chain_sync_result = chain_sync_task_handle.now_or_never(); @@ -500,7 +500,7 @@ async fn sync_block_lookahead_drop() -> Result<(), crate::BoxError> { let ( chain_sync_future, _sync_status, - mut chain_verifier, + mut router_verifier, mut peer_set, mut state_service, _mock_chain_tip_sender, @@ -535,7 +535,7 @@ async fn sync_block_lookahead_drop() -> Result<(), crate::BoxError> { // Block is dropped because it is too far ahead of the tip. // We expect more requests to the state service, because the syncer keeps on running. peer_set.expect_no_requests().await; - chain_verifier.expect_no_requests().await; + router_verifier.expect_no_requests().await; let chain_sync_result = chain_sync_task_handle.now_or_never(); assert!( @@ -555,7 +555,7 @@ async fn sync_block_too_high_obtain_tips() -> Result<(), crate::BoxError> { let ( chain_sync_future, _sync_status, - mut chain_verifier, + mut router_verifier, mut peer_set, mut state_service, _mock_chain_tip_sender, @@ -597,7 +597,7 @@ async fn sync_block_too_high_obtain_tips() -> Result<(), crate::BoxError> { .await .respond(zn::Response::Blocks(vec![Available(block0.clone())])); - chain_verifier + router_verifier .expect_request(zebra_consensus::Request::Commit(block0)) .await .respond(block0_hash); @@ -605,7 +605,7 @@ async fn sync_block_too_high_obtain_tips() -> Result<(), crate::BoxError> { // Check that nothing unexpected happened. // We expect more requests to the state service, because the syncer keeps on running. peer_set.expect_no_requests().await; - chain_verifier.expect_no_requests().await; + router_verifier.expect_no_requests().await; // State is checked for genesis again state_service @@ -654,7 +654,7 @@ async fn sync_block_too_high_obtain_tips() -> Result<(), crate::BoxError> { // Check that nothing unexpected happened. peer_set.expect_no_requests().await; - chain_verifier.expect_no_requests().await; + router_verifier.expect_no_requests().await; // State is checked for all non-tip blocks (blocks 982k, 1, 2) in response order state_service @@ -710,7 +710,7 @@ async fn sync_block_too_high_extend_tips() -> Result<(), crate::BoxError> { let ( chain_sync_future, _sync_status, - mut chain_verifier, + mut router_verifier, mut peer_set, mut state_service, _mock_chain_tip_sender, @@ -758,7 +758,7 @@ async fn sync_block_too_high_extend_tips() -> Result<(), crate::BoxError> { .await .respond(zn::Response::Blocks(vec![Available(block0.clone())])); - chain_verifier + router_verifier .expect_request(zebra_consensus::Request::Commit(block0)) .await .respond(block0_hash); @@ -766,7 +766,7 @@ async fn sync_block_too_high_extend_tips() -> Result<(), crate::BoxError> { // Check that nothing unexpected happened. // We expect more requests to the state service, because the syncer keeps on running. peer_set.expect_no_requests().await; - chain_verifier.expect_no_requests().await; + router_verifier.expect_no_requests().await; // State is checked for genesis again state_service @@ -814,7 +814,7 @@ async fn sync_block_too_high_extend_tips() -> Result<(), crate::BoxError> { // Check that nothing unexpected happened. peer_set.expect_no_requests().await; - chain_verifier.expect_no_requests().await; + router_verifier.expect_no_requests().await; // State is checked for all non-tip blocks (blocks 1 & 2) in response order state_service @@ -844,7 +844,7 @@ async fn sync_block_too_high_extend_tips() -> Result<(), crate::BoxError> { .collect(); for _ in 1..=2 { - chain_verifier + router_verifier .expect_request_that(|req| remaining_blocks.remove(&req.block().hash()).is_some()) .await .respond_with(|req| req.block().hash()); @@ -856,7 +856,7 @@ async fn sync_block_too_high_extend_tips() -> Result<(), crate::BoxError> { ); // Check that nothing unexpected happened. - chain_verifier.expect_no_requests().await; + router_verifier.expect_no_requests().await; state_service.expect_no_requests().await; // ChainSync::extend_tips @@ -888,7 +888,7 @@ async fn sync_block_too_high_extend_tips() -> Result<(), crate::BoxError> { } // Check that nothing unexpected happened. - chain_verifier.expect_no_requests().await; + router_verifier.expect_no_requests().await; state_service.expect_no_requests().await; // Blocks 3, 4, 982k are fetched in order, then verified concurrently, @@ -926,7 +926,7 @@ fn setup() -> ( // ChainSync impl Future> + Send, SyncStatus, - // ChainVerifier + // BlockVerifierRouter MockService, // PeerSet MockService, @@ -951,7 +951,7 @@ fn setup() -> ( .with_max_request_delay(MAX_SERVICE_REQUEST_DELAY) .for_unit_tests(); - let chain_verifier = MockService::build() + let router_verifier = MockService::build() .with_max_request_delay(MAX_SERVICE_REQUEST_DELAY) .for_unit_tests(); @@ -965,7 +965,7 @@ fn setup() -> ( &config, Height(0), peer_set.clone(), - chain_verifier.clone(), + router_verifier.clone(), state_service.clone(), mock_chain_tip, ); @@ -975,7 +975,7 @@ fn setup() -> ( ( chain_sync_future, sync_status, - chain_verifier, + router_verifier, peer_set, state_service, mock_chain_tip_sender, diff --git a/zebrad/tests/acceptance.rs b/zebrad/tests/acceptance.rs index 81952672cf7..c8fac004d45 100644 --- a/zebrad/tests/acceptance.rs +++ b/zebrad/tests/acceptance.rs @@ -1038,7 +1038,8 @@ fn sync_large_checkpoints_mempool_mainnet() -> Result<()> { #[tracing::instrument] fn create_cached_database(network: Network) -> Result<()> { let height = network.mandatory_checkpoint_height(); - let checkpoint_stop_regex = format!("{STOP_AT_HEIGHT_REGEX}.*CommitFinalized request"); + let checkpoint_stop_regex = + format!("{STOP_AT_HEIGHT_REGEX}.*commit checkpoint-verified request"); create_cached_database_height( network, @@ -1056,7 +1057,7 @@ fn create_cached_database(network: Network) -> Result<()> { fn sync_past_mandatory_checkpoint(network: Network) -> Result<()> { let height = network.mandatory_checkpoint_height() + 1200; let full_validation_stop_regex = - format!("{STOP_AT_HEIGHT_REGEX}.*best non-finalized chain root"); + format!("{STOP_AT_HEIGHT_REGEX}.*commit contextually-verified request"); create_cached_database_height( network, diff --git a/zebrad/tests/common/sync.rs b/zebrad/tests/common/sync.rs index 2da33067f2c..ff483827a7a 100644 --- a/zebrad/tests/common/sync.rs +++ b/zebrad/tests/common/sync.rs @@ -52,7 +52,7 @@ pub const SYNC_PROGRESS_REGEX: &str = r"sync_percent"; /// The text that should be logged when Zebra loads its compiled-in checkpoints. #[cfg(feature = "zebra-checkpoints")] pub const CHECKPOINT_VERIFIER_REGEX: &str = - r"initializing chain verifier.*max_checkpoint_height.*=.*Height"; + r"initializing block verifier router.*max_checkpoint_height.*=.*Height"; /// The maximum amount of time Zebra should take to reload after shutting down. /// From a7b03228bc4e66fdbd4db2e82ea67f7cef35f259 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 5 Jun 2023 21:20:04 +0000 Subject: [PATCH 039/265] build(deps): bump tj-actions/changed-files from 36.0.9 to 36.0.17 (#6815) Bumps [tj-actions/changed-files](https://github.com/tj-actions/changed-files) from 36.0.9 to 36.0.17. - [Release notes](https://github.com/tj-actions/changed-files/releases) - [Changelog](https://github.com/tj-actions/changed-files/blob/main/HISTORY.md) - [Commits](https://github.com/tj-actions/changed-files/compare/v36.0.9...v36.0.17) --- updated-dependencies: - dependency-name: tj-actions/changed-files dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/lint.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index b5a60b25e06..8ff00d25ca8 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -37,7 +37,7 @@ jobs: - name: Rust files id: changed-files-rust - uses: tj-actions/changed-files@v36.0.9 + uses: tj-actions/changed-files@v36.0.17 with: files: | **/*.rs @@ -49,7 +49,7 @@ jobs: - name: Workflow files id: changed-files-workflows - uses: tj-actions/changed-files@v36.0.9 + uses: tj-actions/changed-files@v36.0.17 with: files: | .github/workflows/*.yml From 628ddd64eaa28aa2119fca7c9f69bab2332705cb Mon Sep 17 00:00:00 2001 From: teor Date: Tue, 6 Jun 2023 09:56:08 +1000 Subject: [PATCH 040/265] fix(test): Skip editor files and other hidden files in test configs directory (#6796) * Skip editor files and other hidden files in test configs directory * Also ignore files starting with `#` --- zebrad/tests/acceptance.rs | 52 ++++++++++++++++++++++++++++++++------ 1 file changed, 44 insertions(+), 8 deletions(-) diff --git a/zebrad/tests/acceptance.rs b/zebrad/tests/acceptance.rs index c8fac004d45..c53f5c4b22b 100644 --- a/zebrad/tests/acceptance.rs +++ b/zebrad/tests/acceptance.rs @@ -572,8 +572,8 @@ fn config_tests() -> Result<()> { // Check that we have a current version of the config stored last_config_is_stored()?; - // Check that Zebra stored configuration works - stored_configs_works()?; + // Check that Zebra's previous configurations still work + stored_configs_work()?; // Runs `zebrad` serially to avoid potential port conflicts app_no_args()?; @@ -702,13 +702,31 @@ fn last_config_is_stored() -> Result<()> { .to_string(); // Loop all the stored configs + // + // TODO: use the same filename list code in last_config_is_stored() and stored_configs_work() for config_file in configs_dir() .read_dir() .expect("read_dir call failed") .flatten() { + let config_file_path = config_file.path(); + let config_file_name = config_file_path + .file_name() + .expect("config files must have a file name") + .to_string_lossy(); + + if config_file_name.as_ref().starts_with('.') || config_file_name.as_ref().starts_with('#') + { + // Skip editor files and other invalid config paths + tracing::info!( + ?config_file_path, + "skipping hidden/temporary config file path" + ); + continue; + } + // Read stored config - let stored_content = fs::read_to_string(config_file_full_path(config_file.path())) + let stored_content = fs::read_to_string(config_file_full_path(config_file_path)) .expect("Should have been able to read the file") .trim() .to_string(); @@ -832,7 +850,7 @@ fn invalid_generated_config() -> Result<()> { /// Test all versions of `zebrad.toml` we have stored can be parsed by the latest `zebrad`. #[tracing::instrument] -fn stored_configs_works() -> Result<()> { +fn stored_configs_work() -> Result<()> { let old_configs_dir = configs_dir(); for config_file in old_configs_dir @@ -840,15 +858,33 @@ fn stored_configs_works() -> Result<()> { .expect("read_dir call failed") .flatten() { + let config_file_path = config_file.path(); + let config_file_name = config_file_path + .file_name() + .expect("config files must have a file name") + .to_string_lossy(); + + if config_file_name.as_ref().starts_with('.') || config_file_name.as_ref().starts_with('#') + { + // Skip editor files and other invalid config paths + tracing::info!( + ?config_file_path, + "skipping hidden/temporary config file path" + ); + continue; + } + // ignore files starting with getblocktemplate prefix // if we were not built with the getblocktemplate-rpcs feature. #[cfg(not(feature = "getblocktemplate-rpcs"))] - if config_file - .file_name() - .into_string() - .expect("all files names should be string convertible") + if config_file_name + .as_ref() .starts_with(GET_BLOCK_TEMPLATE_CONFIG_PREFIX) { + tracing::info!( + ?config_file_path, + "skipping getblocktemplate-rpcs config file path" + ); continue; } From 3cf3798927d07642c47335e7380e2bfc5dea78cb Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 6 Jun 2023 02:04:52 +0000 Subject: [PATCH 041/265] build(deps): bump chrono from 0.4.25 to 0.4.26 (#6799) Bumps [chrono](https://github.com/chronotope/chrono) from 0.4.25 to 0.4.26. - [Release notes](https://github.com/chronotope/chrono/releases) - [Changelog](https://github.com/chronotope/chrono/blob/main/CHANGELOG.md) - [Commits](https://github.com/chronotope/chrono/compare/v0.4.25...v0.4.26) --- updated-dependencies: - dependency-name: chrono dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- zebra-chain/Cargo.toml | 2 +- zebra-consensus/Cargo.toml | 2 +- zebra-network/Cargo.toml | 2 +- zebra-rpc/Cargo.toml | 2 +- zebra-state/Cargo.toml | 2 +- zebrad/Cargo.toml | 2 +- 7 files changed, 8 insertions(+), 8 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a13d7785c45..1f1b472f4e1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -653,9 +653,9 @@ dependencies = [ [[package]] name = "chrono" -version = "0.4.25" +version = "0.4.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fdbc37d37da9e5bce8173f3a41b71d9bf3c674deebbaceacd0ebdabde76efb03" +checksum = "ec837a71355b28f6556dbd569b37b3f363091c0bd4b2e735674521b4c5fd9bc5" dependencies = [ "android-tzdata", "iana-time-zone", diff --git a/zebra-chain/Cargo.toml b/zebra-chain/Cargo.toml index b20c49a0786..4a004651e80 100644 --- a/zebra-chain/Cargo.toml +++ b/zebra-chain/Cargo.toml @@ -69,7 +69,7 @@ zcash_note_encryption = "0.3.0" zcash_primitives = { version = "0.11.0", features = ["transparent-inputs"] } # Time -chrono = { version = "0.4.25", default-features = false, features = ["clock", "std", "serde"] } +chrono = { version = "0.4.26", default-features = false, features = ["clock", "std", "serde"] } humantime = "2.1.0" # Error Handling & Formatting diff --git a/zebra-consensus/Cargo.toml b/zebra-consensus/Cargo.toml index 5eb6f37cdd8..deab9ce8b1a 100644 --- a/zebra-consensus/Cargo.toml +++ b/zebra-consensus/Cargo.toml @@ -34,7 +34,7 @@ jubjub = "0.10.0" rand = { version = "0.8.5", package = "rand" } rayon = "1.7.0" -chrono = { version = "0.4.25", default-features = false, features = ["clock", "std"] } +chrono = { version = "0.4.26", default-features = false, features = ["clock", "std"] } displaydoc = "0.2.4" lazy_static = "1.4.0" once_cell = "1.17.2" diff --git a/zebra-network/Cargo.toml b/zebra-network/Cargo.toml index 6a24808d612..2b3e5b2c88c 100644 --- a/zebra-network/Cargo.toml +++ b/zebra-network/Cargo.toml @@ -26,7 +26,7 @@ proptest-impl = ["proptest", "proptest-derive", "zebra-chain/proptest-impl"] bitflags = "2.2.1" byteorder = "1.4.3" bytes = "1.4.0" -chrono = { version = "0.4.25", default-features = false, features = ["clock", "std"] } +chrono = { version = "0.4.26", default-features = false, features = ["clock", "std"] } hex = "0.4.3" humantime-serde = "1.1.1" indexmap = { version = "1.9.3", features = ["serde"] } diff --git a/zebra-rpc/Cargo.toml b/zebra-rpc/Cargo.toml index c2c7181e860..ddcbd0da7bc 100644 --- a/zebra-rpc/Cargo.toml +++ b/zebra-rpc/Cargo.toml @@ -32,7 +32,7 @@ proptest-impl = [ ] [dependencies] -chrono = { version = "0.4.25", default-features = false, features = ["clock", "std"] } +chrono = { version = "0.4.26", default-features = false, features = ["clock", "std"] } futures = "0.3.28" # lightwalletd sends JSON-RPC requests over HTTP 1.1 diff --git a/zebra-state/Cargo.toml b/zebra-state/Cargo.toml index 03a2fb257b8..95cf4827cde 100644 --- a/zebra-state/Cargo.toml +++ b/zebra-state/Cargo.toml @@ -34,7 +34,7 @@ elasticsearch = [ [dependencies] bincode = "1.3.3" -chrono = { version = "0.4.25", default-features = false, features = ["clock", "std"] } +chrono = { version = "0.4.26", default-features = false, features = ["clock", "std"] } dirs = "5.0.1" futures = "0.3.28" hex = "0.4.3" diff --git a/zebrad/Cargo.toml b/zebrad/Cargo.toml index fa0ea3f5a69..2c0d3f66ef2 100644 --- a/zebrad/Cargo.toml +++ b/zebrad/Cargo.toml @@ -117,7 +117,7 @@ zebra-state = { path = "../zebra-state" } abscissa_core = "0.5" gumdrop = { version = "0.7", features = ["default_expr"]} -chrono = { version = "0.4.25", default-features = false, features = ["clock", "std"] } +chrono = { version = "0.4.26", default-features = false, features = ["clock", "std"] } humantime-serde = "1.1.1" indexmap = "1.9.3" lazy_static = "1.4.0" From 1866262ee7d94db169e4a4493b918f87f1735974 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 6 Jun 2023 02:05:15 +0000 Subject: [PATCH 042/265] build(deps): bump indicatif from 0.17.4 to 0.17.5 (#6818) Bumps [indicatif](https://github.com/console-rs/indicatif) from 0.17.4 to 0.17.5. - [Release notes](https://github.com/console-rs/indicatif/releases) - [Commits](https://github.com/console-rs/indicatif/compare/0.17.4...0.17.5) --- updated-dependencies: - dependency-name: indicatif dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- zebrad/Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1f1b472f4e1..75229d55e70 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2078,9 +2078,9 @@ dependencies = [ [[package]] name = "indicatif" -version = "0.17.4" +version = "0.17.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db45317f37ef454e6519b6c3ed7d377e5f23346f0823f86e65ca36912d1d0ef8" +checksum = "8ff8cc23a7393a397ed1d7f56e6365cba772aba9f9912ab968b03043c395d057" dependencies = [ "console", "instant", diff --git a/zebrad/Cargo.toml b/zebrad/Cargo.toml index 2c0d3f66ef2..45f27275eec 100644 --- a/zebrad/Cargo.toml +++ b/zebrad/Cargo.toml @@ -175,7 +175,7 @@ log = "0.4.18" # prod feature progress-bar howudoin = { version = "0.1.2", features = ["term-line"], optional = true } -indicatif = { version = "0.17.4", optional = true } +indicatif = { version = "0.17.5", optional = true } # test feature proptest-impl proptest = { version = "1.2.0", optional = true } From 427e2b03ed4f6f005d9801c69f7722edd2974203 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 6 Jun 2023 02:05:31 +0000 Subject: [PATCH 043/265] build(deps): bump arduino/setup-protoc from 1.3.0 to 2.0.0 (#6798) * build(deps): bump arduino/setup-protoc from 1.3.0 to 2.0.0 Bumps [arduino/setup-protoc](https://github.com/arduino/setup-protoc) from 1.3.0 to 2.0.0. - [Release notes](https://github.com/arduino/setup-protoc/releases) - [Commits](https://github.com/arduino/setup-protoc/compare/v1.3.0...v2.0.0) --- updated-dependencies: - dependency-name: arduino/setup-protoc dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] * Apply suggestions from code review --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Arya --- .github/workflows/build-crates-individually.yml | 4 ++-- .github/workflows/continous-integration-os.yml | 8 ++++---- .github/workflows/docs.yml | 4 ++-- .github/workflows/lint.yml | 12 ++++++------ 4 files changed, 14 insertions(+), 14 deletions(-) diff --git a/.github/workflows/build-crates-individually.yml b/.github/workflows/build-crates-individually.yml index 756a37b088a..1546a361c65 100644 --- a/.github/workflows/build-crates-individually.yml +++ b/.github/workflows/build-crates-individually.yml @@ -112,10 +112,10 @@ jobs: - uses: r7kamura/rust-problem-matchers@v1.3.0 - name: Install last version of Protoc - uses: arduino/setup-protoc@v1.3.0 + uses: arduino/setup-protoc@v2.0.0 with: # TODO: increase to latest version after https://github.com/arduino/setup-protoc/issues/33 is fixed - version: '3.20.1' + version: '23.x' repo-token: ${{ secrets.GITHUB_TOKEN }} # Setup Rust with stable toolchain and minimal profile diff --git a/.github/workflows/continous-integration-os.yml b/.github/workflows/continous-integration-os.yml index c6749d23e86..08083e79e9d 100644 --- a/.github/workflows/continous-integration-os.yml +++ b/.github/workflows/continous-integration-os.yml @@ -94,10 +94,10 @@ jobs: - uses: r7kamura/rust-problem-matchers@v1.3.0 - name: Install last version of Protoc - uses: arduino/setup-protoc@v1.3.0 + uses: arduino/setup-protoc@v2.0.0 with: # TODO: increase to latest version after https://github.com/arduino/setup-protoc/issues/33 is fixed - version: '3.20.1' + version: '23.x' repo-token: ${{ secrets.GITHUB_TOKEN }} # Setup Rust with ${{ matrix.rust }} toolchain and minimal profile @@ -230,10 +230,10 @@ jobs: - uses: r7kamura/rust-problem-matchers@v1.3.0 - name: Install last version of Protoc - uses: arduino/setup-protoc@v1.3.0 + uses: arduino/setup-protoc@v2.0.0 with: # TODO: increase to latest version after https://github.com/arduino/setup-protoc/issues/33 is fixed - version: '3.20.1' + version: '23.x' repo-token: ${{ secrets.GITHUB_TOKEN }} # Setup Rust with stable toolchain and minimal profile diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index e889e7711cd..acf07c8a2ba 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -45,10 +45,10 @@ jobs: persist-credentials: false - name: Install last version of Protoc - uses: arduino/setup-protoc@v1.3.0 + uses: arduino/setup-protoc@v2.0.0 with: # TODO: increase to latest version after https://github.com/arduino/setup-protoc/issues/33 is fixed - version: '3.20.1' + version: '23.x' repo-token: ${{ secrets.GITHUB_TOKEN }} # Setup Rust with beta toolchain and default profile (to include rust-docs) diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 8ff00d25ca8..c2c04a157a6 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -67,10 +67,10 @@ jobs: persist-credentials: false - name: Install last version of Protoc - uses: arduino/setup-protoc@v1.3.0 + uses: arduino/setup-protoc@v2.0.0 with: # TODO: increase to latest version after https://github.com/arduino/setup-protoc/issues/33 is fixed - version: '3.20.1' + version: '23.x' repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Check workflow permissions @@ -118,10 +118,10 @@ jobs: - uses: r7kamura/rust-problem-matchers@v1.3.0 - name: Install last version of Protoc - uses: arduino/setup-protoc@v1.3.0 + uses: arduino/setup-protoc@v2.0.0 with: # TODO: increase to latest version after https://github.com/arduino/setup-protoc/issues/33 is fixed - version: '3.20.1' + version: '23.x' repo-token: ${{ secrets.GITHUB_TOKEN }} # Setup Rust with stable toolchain and default profile @@ -157,10 +157,10 @@ jobs: - uses: r7kamura/rust-problem-matchers@v1.3.0 - name: Install last version of Protoc - uses: arduino/setup-protoc@v1.3.0 + uses: arduino/setup-protoc@v2.0.0 with: # TODO: increase to latest version after https://github.com/arduino/setup-protoc/issues/33 is fixed - version: '3.20.1' + version: '23.x' repo-token: ${{ secrets.GITHUB_TOKEN }} # Setup Rust with stable toolchain and default profile From 0c2107af7c49dbd41ff3c324759f5e1dea833a4b Mon Sep 17 00:00:00 2001 From: Alfredo Garcia Date: Tue, 6 Jun 2023 00:35:39 -0300 Subject: [PATCH 044/265] disable macOS tests (#6825) --- .github/workflows/continous-integration-os.patch.yml | 3 ++- .github/workflows/continous-integration-os.yml | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/.github/workflows/continous-integration-os.patch.yml b/.github/workflows/continous-integration-os.patch.yml index 2f5eea44f98..b79ab949975 100644 --- a/.github/workflows/continous-integration-os.patch.yml +++ b/.github/workflows/continous-integration-os.patch.yml @@ -22,7 +22,8 @@ jobs: strategy: matrix: # TODO: Windows was removed for now, see https://github.com/ZcashFoundation/zebra/issues/3801 - os: [ubuntu-latest, macos-latest] + # TODO: macOS tests were removed for now, see https://github.com/ZcashFoundation/zebra/issues/6824 + os: [ubuntu-latest] rust: [stable, beta] features: ["", " --features getblocktemplate-rpcs"] exclude: diff --git a/.github/workflows/continous-integration-os.yml b/.github/workflows/continous-integration-os.yml index 08083e79e9d..8b0c04c45f3 100644 --- a/.github/workflows/continous-integration-os.yml +++ b/.github/workflows/continous-integration-os.yml @@ -69,7 +69,8 @@ jobs: fail-fast: false matrix: # TODO: Windows was removed for now, see https://github.com/ZcashFoundation/zebra/issues/3801 - os: [ubuntu-latest, macos-latest] + # TODO: macOS tests were removed for now, see https://github.com/ZcashFoundation/zebra/issues/6824 + os: [ubuntu-latest] rust: [stable, beta] features: ["", " --features getblocktemplate-rpcs"] exclude: From f197dfb75e0c4d4558b4b9ecb38b5195e34e52aa Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 6 Jun 2023 05:41:25 +0000 Subject: [PATCH 045/265] build(deps): bump regex from 1.8.3 to 1.8.4 (#6817) Bumps [regex](https://github.com/rust-lang/regex) from 1.8.3 to 1.8.4. - [Release notes](https://github.com/rust-lang/regex/releases) - [Changelog](https://github.com/rust-lang/regex/blob/master/CHANGELOG.md) - [Commits](https://github.com/rust-lang/regex/compare/1.8.3...1.8.4) --- updated-dependencies: - dependency-name: regex dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- zebra-network/Cargo.toml | 2 +- zebra-state/Cargo.toml | 2 +- zebra-test/Cargo.toml | 2 +- zebra-utils/Cargo.toml | 2 +- zebrad/Cargo.toml | 2 +- 6 files changed, 7 insertions(+), 7 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 75229d55e70..d67f22b8e15 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3624,9 +3624,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.8.3" +version = "1.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81ca098a9821bd52d6b24fd8b10bd081f47d39c22778cafaa75a2857a62c6390" +checksum = "d0ab3ca65655bb1e41f2a8c8cd662eb4fb035e67c3f78da1d61dffe89d07300f" dependencies = [ "aho-corasick 1.0.1", "memchr", diff --git a/zebra-network/Cargo.toml b/zebra-network/Cargo.toml index 2b3e5b2c88c..ea591c47ad7 100644 --- a/zebra-network/Cargo.toml +++ b/zebra-network/Cargo.toml @@ -35,7 +35,7 @@ ordered-map = "0.4.2" pin-project = "1.1.0" rand = { version = "0.8.5", package = "rand" } rayon = "1.7.0" -regex = "1.8.3" +regex = "1.8.4" serde = { version = "1.0.163", features = ["serde_derive"] } thiserror = "1.0.40" diff --git a/zebra-state/Cargo.toml b/zebra-state/Cargo.toml index 95cf4827cde..8e2100d83e4 100644 --- a/zebra-state/Cargo.toml +++ b/zebra-state/Cargo.toml @@ -43,7 +43,7 @@ itertools = "0.10.5" lazy_static = "1.4.0" metrics = "0.21.0" mset = "0.1.1" -regex = "1.8.3" +regex = "1.8.4" rlimit = "0.9.1" rocksdb = { version = "0.21.0", default_features = false, features = ["lz4"] } serde = { version = "1.0.163", features = ["serde_derive"] } diff --git a/zebra-test/Cargo.toml b/zebra-test/Cargo.toml index 2d9e973583c..fd034a74efe 100644 --- a/zebra-test/Cargo.toml +++ b/zebra-test/Cargo.toml @@ -15,7 +15,7 @@ insta = "1.29.0" proptest = "1.2.0" once_cell = "1.17.2" rand = { version = "0.8.5", package = "rand" } -regex = "1.8.3" +regex = "1.8.4" tokio = { version = "1.28.2", features = ["full", "tracing", "test-util"] } tower = { version = "0.4.13", features = ["util"] } diff --git a/zebra-utils/Cargo.toml b/zebra-utils/Cargo.toml index 962c37b03d3..6d2d3c3b074 100644 --- a/zebra-utils/Cargo.toml +++ b/zebra-utils/Cargo.toml @@ -67,7 +67,7 @@ zebra-chain = { path = "../zebra-chain" } itertools = { version = "0.10.5", optional = true } # These crates are needed for the search-issue-refs binary -regex = { version = "1.8.3", optional = true } +regex = { version = "1.8.4", optional = true } reqwest = { version = "0.11.18", optional = true } # These crates are needed for the zebra-checkpoints and search-issue-refs binaries diff --git a/zebrad/Cargo.toml b/zebrad/Cargo.toml index 45f27275eec..b69396dbc63 100644 --- a/zebrad/Cargo.toml +++ b/zebrad/Cargo.toml @@ -195,7 +195,7 @@ abscissa_core = { version = "0.5", features = ["testing"] } hex = "0.4.3" jsonrpc-core = "18.0.0" once_cell = "1.17.2" -regex = "1.8.3" +regex = "1.8.4" semver = "1.0.17" # zebra-rpc needs the preserve_order feature, it also makes test results more stable From 04e96c25260fe0135627ff6c13ef5c4d13527201 Mon Sep 17 00:00:00 2001 From: teor Date: Tue, 6 Jun 2023 18:28:14 +1000 Subject: [PATCH 046/265] feat(net): Cache a list of useful peers on disk (#6739) * Rewrite some state cache docs to clarify * Add a zebra_network::Config.cache_dir for peer address caches * Add new config test files and fix config test failure message * Create some zebra-chain and zebra-network convenience functions * Add methods for reading and writing the peer address cache * Add cached disk peers to the initial peers list * Add metrics and logging for loading and storing the peer cache * Replace log of useless redacted peer IP addresses * Limit the peer cache minimum and maximum size, don't write empty caches * Add a cacheable_peers() method to the address book * Add a peer disk cache updater task to the peer set tasks * Document that the peer cache is shared by multiple instances unless configured otherwise * Disable peer cache read/write in disconnected tests * Make initial peer cache updater sleep shorter for tests * Add unit tests for reading and writing the peer cache * Update the task list in the start command docs * Modify the existing persistent acceptance test to check for peer caches * Update the peer cache directory when writing test configs * Add a CacheDir type so the default config can be enabled, but tests can disable it * Update tests to use the CacheDir config type * Rename some CacheDir internals * Add config file test cases for each kind of CacheDir config * Panic if the config contains invalid socket addresses, rather than continuing * Add a network directory to state cache directory contents tests * Add new network.cache_dir config to the config parsing tests --- Cargo.lock | 2 + zebra-chain/src/parameters/network.rs | 5 + zebra-network/Cargo.toml | 4 +- zebra-network/src/address_book.rs | 36 ++- zebra-network/src/config.rs | 290 +++++++++++++++++- zebra-network/src/config/cache_dir.rs | 74 +++++ zebra-network/src/constants.rs | 16 + zebra-network/src/lib.rs | 13 +- zebra-network/src/meta_addr/peer_addr.rs | 8 + zebra-network/src/peer_cache_updater.rs | 63 ++++ zebra-network/src/peer_set/initialize.rs | 34 +- .../src/peer_set/initialize/tests/vectors.rs | 93 ++++++ zebra-state/src/config.rs | 38 ++- zebrad/src/commands/start.rs | 5 +- .../components/inbound/tests/real_peer_set.rs | 3 +- zebrad/tests/acceptance.rs | 26 +- .../configs/getblocktemplate-v1.0.0-rc.9.toml | 74 +++++ .../configs/net-cache-custom-v1.0.0-rc.9.toml | 16 + .../net-cache-disabled-v1.0.0-rc.9.toml | 9 + .../net-cache-enabled-v1.0.0-rc.9.toml | 18 ++ zebrad/tests/common/configs/v1.0.0-rc.9.toml | 71 +++++ zebrad/tests/common/launch.rs | 18 +- 22 files changed, 868 insertions(+), 48 deletions(-) create mode 100644 zebra-network/src/config/cache_dir.rs create mode 100644 zebra-network/src/peer_cache_updater.rs create mode 100644 zebrad/tests/common/configs/getblocktemplate-v1.0.0-rc.9.toml create mode 100644 zebrad/tests/common/configs/net-cache-custom-v1.0.0-rc.9.toml create mode 100644 zebrad/tests/common/configs/net-cache-disabled-v1.0.0-rc.9.toml create mode 100644 zebrad/tests/common/configs/net-cache-enabled-v1.0.0-rc.9.toml create mode 100644 zebrad/tests/common/configs/v1.0.0-rc.9.toml diff --git a/Cargo.lock b/Cargo.lock index d67f22b8e15..a3d81a998e2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5847,6 +5847,7 @@ dependencies = [ "byteorder", "bytes", "chrono", + "dirs", "futures", "hex", "howudoin", @@ -5863,6 +5864,7 @@ dependencies = [ "regex", "serde", "static_assertions", + "tempfile", "thiserror", "tokio", "tokio-stream", diff --git a/zebra-chain/src/parameters/network.rs b/zebra-chain/src/parameters/network.rs index 6c6319ce765..6ec34384e10 100644 --- a/zebra-chain/src/parameters/network.rs +++ b/zebra-chain/src/parameters/network.rs @@ -119,6 +119,11 @@ impl Network { Network::Testnet => "test".to_string(), } } + + /// Return the lowercase network name. + pub fn lowercase_name(&self) -> String { + self.to_string().to_ascii_lowercase() + } } impl FromStr for Network { diff --git a/zebra-network/Cargo.toml b/zebra-network/Cargo.toml index ea591c47ad7..cb001281a02 100644 --- a/zebra-network/Cargo.toml +++ b/zebra-network/Cargo.toml @@ -27,6 +27,7 @@ bitflags = "2.2.1" byteorder = "1.4.3" bytes = "1.4.0" chrono = { version = "0.4.26", default-features = false, features = ["clock", "std"] } +dirs = "5.0.1" hex = "0.4.3" humantime-serde = "1.1.1" indexmap = { version = "1.9.3", features = ["serde"] } @@ -37,10 +38,11 @@ rand = { version = "0.8.5", package = "rand" } rayon = "1.7.0" regex = "1.8.4" serde = { version = "1.0.163", features = ["serde_derive"] } +tempfile = "3.5.0" thiserror = "1.0.40" futures = "0.3.28" -tokio = { version = "1.28.2", features = ["net", "time", "tracing", "macros", "rt-multi-thread"] } +tokio = { version = "1.28.2", features = ["fs", "net", "time", "tracing", "macros", "rt-multi-thread"] } tokio-stream = { version = "0.1.14", features = ["sync", "time"] } tokio-util = { version = "0.7.8", features = ["codec"] } tower = { version = "0.4.13", features = ["retry", "discover", "load", "load-shed", "timeout", "util", "buffer"] } diff --git a/zebra-network/src/address_book.rs b/zebra-network/src/address_book.rs index fc0bda70dcd..8e8b45114cc 100644 --- a/zebra-network/src/address_book.rs +++ b/zebra-network/src/address_book.rs @@ -240,7 +240,8 @@ impl AddressBook { self.local_listener } - /// Get the contents of `self` in random order with sanitized timestamps. + /// Get the active addresses in `self` in random order with sanitized timestamps, + /// including our local listener address. pub fn sanitized(&self, now: chrono::DateTime) -> Vec { use rand::seq::SliceRandom; let _guard = self.span.enter(); @@ -254,10 +255,12 @@ impl AddressBook { peers.insert(local_listener.addr, local_listener); // Then sanitize and shuffle - let mut peers = peers + let mut peers: Vec = peers .descending_values() .filter_map(|meta_addr| meta_addr.sanitize(self.network)) - // Security: remove peers that: + // # Security + // + // Remove peers that: // - last responded more than three hours ago, or // - haven't responded yet but were reported last seen more than three hours ago // @@ -265,9 +268,34 @@ impl AddressBook { // nodes impacts the network health, because connection attempts end up being wasted on // peers that are less likely to respond. .filter(|addr| addr.is_active_for_gossip(now)) - .collect::>(); + .collect(); + peers.shuffle(&mut rand::thread_rng()); + + peers + } + + /// Get the active addresses in `self`, in preferred caching order, + /// excluding our local listener address. + pub fn cacheable(&self, now: chrono::DateTime) -> Vec { + let _guard = self.span.enter(); + + let peers = self.by_addr.clone(); + + // Get peers in preferred order, then keep the recently active ones peers + .descending_values() + // # Security + // + // Remove peers that: + // - last responded more than three hours ago, or + // - haven't responded yet but were reported last seen more than three hours ago + // + // This prevents Zebra from caching nodes that are likely unreachable, + // which improves startup time and reliability. + .filter(|addr| addr.is_active_for_gossip(now)) + .cloned() + .collect() } /// Look up `addr` in the address book, and return its [`MetaAddr`]. diff --git a/zebra-network/src/config.rs b/zebra-network/src/config.rs index c1d49ab7905..78662a712b3 100644 --- a/zebra-network/src/config.rs +++ b/zebra-network/src/config.rs @@ -2,6 +2,8 @@ use std::{ collections::HashSet, + ffi::OsString, + io::{self, ErrorKind}, net::{IpAddr, SocketAddr}, string::String, time::Duration, @@ -9,21 +11,27 @@ use std::{ use indexmap::IndexSet; use serde::{de, Deserialize, Deserializer}; +use tempfile::NamedTempFile; +use tokio::{fs, io::AsyncWriteExt}; use zebra_chain::parameters::Network; use crate::{ constants::{ DEFAULT_CRAWL_NEW_PEER_INTERVAL, DNS_LOOKUP_TIMEOUT, INBOUND_PEER_LIMIT_MULTIPLIER, - OUTBOUND_PEER_LIMIT_MULTIPLIER, + MAX_PEER_DISK_CACHE_SIZE, OUTBOUND_PEER_LIMIT_MULTIPLIER, }, protocol::external::{canonical_peer_addr, canonical_socket_addr}, BoxError, PeerSocketAddr, }; +mod cache_dir; + #[cfg(test)] mod tests; +pub use cache_dir::CacheDir; + /// The number of times Zebra will retry each initial peer's DNS resolution, /// before checking if any other initial peers have returned addresses. /// @@ -71,9 +79,64 @@ pub struct Config { /// testnet. pub initial_testnet_peers: IndexSet, + /// An optional root directory for storing cached peer address data. + /// + /// # Configuration + /// + /// Set to: + /// - `true` to read and write peer addresses to disk using the default cache path, + /// - `false` to disable reading and writing peer addresses to disk, + /// - `'/custom/cache/directory'` to read and write peer addresses to a custom directory. + /// + /// By default, all Zebra instances run by the same user will share a single peer cache. + /// If you use a custom cache path, you might also want to change `state.cache_dir`. + /// + /// # Functionality + /// + /// The peer cache is a list of the addresses of some recently useful peers. + /// + /// For privacy reasons, the cache does *not* include any other information about peers, + /// such as when they were connected to the node. + /// + /// Deleting or modifying the peer cache can impact your node's: + /// - reliability: if DNS or the Zcash DNS seeders are unavailable or broken + /// - security: if DNS is compromised with malicious peers + /// + /// If you delete it, Zebra will replace it with a fresh set of peers from the DNS seeders. + /// + /// # Defaults + /// + /// The default directory is platform dependent, based on + /// [`dirs::cache_dir()`](https://docs.rs/dirs/3.0.1/dirs/fn.cache_dir.html): + /// + /// |Platform | Value | Example | + /// | ------- | ----------------------------------------------- | ------------------------------------ | + /// | Linux | `$XDG_CACHE_HOME/zebra` or `$HOME/.cache/zebra` | `/home/alice/.cache/zebra` | + /// | macOS | `$HOME/Library/Caches/zebra` | `/Users/Alice/Library/Caches/zebra` | + /// | Windows | `{FOLDERID_LocalAppData}\zebra` | `C:\Users\Alice\AppData\Local\zebra` | + /// | Other | `std::env::current_dir()/cache/zebra` | `/cache/zebra` | + /// + /// # Security + /// + /// If you are running Zebra with elevated permissions ("root"), create the + /// directory for this file before running Zebra, and make sure the Zebra user + /// account has exclusive access to that directory, and other users can't modify + /// its parent directories. + /// + /// # Implementation Details + /// + /// Each network has a separate peer list, which is updated regularly from the current + /// address book. These lists are stored in `network/mainnet.peers` and + /// `network/testnet.peers` files, underneath the `cache_dir` path. + /// + /// Previous peer lists are automatically loaded at startup, and used to populate the + /// initial peer set and address book. + pub cache_dir: CacheDir, + /// The initial target size for the peer set. /// - /// Also used to limit the number of inbound and outbound connections made by Zebra. + /// Also used to limit the number of inbound and outbound connections made by Zebra, + /// and the size of the cached peer list. /// /// If you have a slow network connection, and Zebra is having trouble /// syncing, try reducing the peer set size. You can also reduce the peer @@ -144,9 +207,24 @@ impl Config { } } - /// Resolve initial seed peer IP addresses, based on the configured network. + /// Resolve initial seed peer IP addresses, based on the configured network, + /// and load cached peers from disk, if available. + /// + /// # Panics + /// + /// If a configured address is an invalid [`SocketAddr`] or DNS name. pub async fn initial_peers(&self) -> HashSet { - Config::resolve_peers(&self.initial_peer_hostnames().iter().cloned().collect()).await + // TODO: do DNS and disk in parallel if startup speed becomes important + let dns_peers = + Config::resolve_peers(&self.initial_peer_hostnames().iter().cloned().collect()).await; + + // Ignore disk errors because the cache is optional and the method already logs them. + let disk_peers = self.load_peer_cache().await.unwrap_or_default(); + + dns_peers + .into_iter() + .chain(disk_peers.into_iter()) + .collect() } /// Concurrently resolves `peers` into zero or more IP addresses, with a @@ -161,6 +239,7 @@ impl Config { warn!( "no initial peers in the network config. \ Hint: you must configure at least one peer IP or DNS seeder to run Zebra, \ + give it some previously cached peer IP addresses on disk, \ or make sure Zebra's listener port gets inbound connections." ); return HashSet::new(); @@ -196,6 +275,10 @@ impl Config { /// `max_retries` times. /// /// If DNS continues to fail, returns an empty list of addresses. + /// + /// # Panics + /// + /// If a configured address is an invalid [`SocketAddr`] or DNS name. async fn resolve_host(host: &str, max_retries: usize) -> HashSet { for retries in 0..=max_retries { if let Ok(addresses) = Config::resolve_host_once(host).await { @@ -225,6 +308,10 @@ impl Config { /// /// If `host` is a DNS name, performs DNS resolution with a timeout of a few seconds. /// If DNS resolution fails or times out, returns an error. + /// + /// # Panics + /// + /// If a configured address is an invalid [`SocketAddr`] or DNS name. async fn resolve_host_once(host: &str) -> Result, BoxError> { let fut = tokio::net::lookup_host(host); let fut = tokio::time::timeout(DNS_LOOKUP_TIMEOUT, fut); @@ -260,6 +347,13 @@ impl Config { Ok(ip_addrs.into_iter().collect()) } + Ok(Err(e)) if e.kind() == ErrorKind::InvalidInput => { + // TODO: add testnet/mainnet ports, like we do with the listener address + panic!( + "Invalid peer IP address in Zebra config: addresses must have ports:\n\ + resolving {host:?} returned {e:?}" + ); + } Ok(Err(e)) => { tracing::info!(?host, ?e, "DNS error resolving peer IP addresses"); Err(e.into()) @@ -270,6 +364,190 @@ impl Config { } } } + + /// Returns the addresses in the peer list cache file, if available. + pub async fn load_peer_cache(&self) -> io::Result> { + let Some(peer_cache_file) = self.cache_dir.peer_cache_file_path(self.network) else { + return Ok(HashSet::new()); + }; + + let peer_list = match fs::read_to_string(&peer_cache_file).await { + Ok(peer_list) => peer_list, + Err(peer_list_error) => { + // We expect that the cache will be missing for new Zebra installs + if peer_list_error.kind() == ErrorKind::NotFound { + return Ok(HashSet::new()); + } else { + info!( + ?peer_list_error, + "could not load cached peer list, using default seed peers" + ); + return Err(peer_list_error); + } + } + }; + + // Skip and log addresses that don't parse, and automatically deduplicate using the HashSet. + // (These issues shouldn't happen unless users modify the file.) + let peer_list: HashSet = peer_list + .lines() + .filter_map(|peer| { + peer.parse() + .map_err(|peer_parse_error| { + info!( + ?peer_parse_error, + "invalid peer address in cached peer list, skipping" + ); + peer_parse_error + }) + .ok() + }) + .collect(); + + // This log is needed for user debugging, but it's annoying during tests. + #[cfg(not(test))] + info!( + cached_ip_count = ?peer_list.len(), + ?peer_cache_file, + "loaded cached peer IP addresses" + ); + #[cfg(test)] + debug!( + cached_ip_count = ?peer_list.len(), + ?peer_cache_file, + "loaded cached peer IP addresses" + ); + + for ip in &peer_list { + // Count each initial peer, recording the cache file and loaded IP address. + // + // If an IP is returned by DNS seeders and the cache, + // each duplicate adds 1 to the initial peer count. + // (But we only make one initial connection attempt to each IP.) + metrics::counter!( + "zcash.net.peers.initial", + 1, + "cache" => peer_cache_file.display().to_string(), + "remote_ip" => ip.to_string() + ); + } + + Ok(peer_list) + } + + /// Atomically writes a new `peer_list` to the peer list cache file, if configured. + /// If the list is empty, keeps the previous cache file. + /// + /// Also creates the peer cache directory, if it doesn't already exist. + /// + /// Atomic writes avoid corrupting the cache if Zebra panics or crashes, or if multiple Zebra + /// instances try to read and write the same cache file. + pub async fn update_peer_cache(&self, peer_list: HashSet) -> io::Result<()> { + let Some(peer_cache_file) = self.cache_dir.peer_cache_file_path(self.network) else { + return Ok(()); + }; + + if peer_list.is_empty() { + info!( + ?peer_cache_file, + "cacheable peer list was empty, keeping previous cache" + ); + return Ok(()); + } + + // Turn IP addresses into strings + let mut peer_list: Vec = peer_list + .iter() + .take(MAX_PEER_DISK_CACHE_SIZE) + .map(|redacted_peer| redacted_peer.remove_socket_addr_privacy().to_string()) + .collect(); + // # Privacy + // + // Sort to destroy any peer order, which could leak peer connection times. + // (Currently the HashSet argument does this as well.) + peer_list.sort(); + // Make a newline-separated list + let peer_data = peer_list.join("\n"); + + // Write to a temporary file, so the cache is not corrupted if Zebra shuts down or crashes + // at the same time. + // + // # Concurrency + // + // We want to use async code to avoid blocking the tokio executor on filesystem operations, + // but `tempfile` is implemented using non-asyc methods. So we wrap its filesystem + // operations in `tokio::spawn_blocking()`. + // + // TODO: split this out into an atomic_write_to_tmp_file() method if we need to re-use it + + // Create the peer cache directory if needed + let peer_cache_dir = peer_cache_file + .parent() + .expect("cache path always has a network directory") + .to_owned(); + tokio::fs::create_dir_all(&peer_cache_dir).await?; + + // Give the temporary file a similar name to the permanent cache file, + // but hide it in directory listings. + let mut tmp_peer_cache_prefix: OsString = ".tmp.".into(); + tmp_peer_cache_prefix.push( + peer_cache_file + .file_name() + .expect("cache file always has a file name"), + ); + + // Create the temporary file. + // Do blocking filesystem operations on a dedicated thread. + let tmp_peer_cache_file = tokio::task::spawn_blocking(move || { + // Put the temporary file in the same directory as the permanent file, + // so atomic filesystem operations are possible. + tempfile::Builder::new() + .prefix(&tmp_peer_cache_prefix) + .tempfile_in(peer_cache_dir) + }) + .await + .expect("unexpected panic creating temporary peer cache file")?; + + // Write the list to the file asynchronously, by extracting the inner file, using it, + // then combining it back into a type that will correctly drop the file on error. + let (tmp_peer_cache_file, tmp_peer_cache_path) = tmp_peer_cache_file.into_parts(); + let mut tmp_peer_cache_file = tokio::fs::File::from_std(tmp_peer_cache_file); + tmp_peer_cache_file.write_all(peer_data.as_bytes()).await?; + + let tmp_peer_cache_file = + NamedTempFile::from_parts(tmp_peer_cache_file, tmp_peer_cache_path); + + // Atomically replace the current cache with the temporary cache. + // Do blocking filesystem operations on a dedicated thread. + tokio::task::spawn_blocking(move || { + let result = tmp_peer_cache_file.persist(&peer_cache_file); + + // Drops the temp file if needed + match result { + Ok(_temp_file) => { + info!( + cached_ip_count = ?peer_list.len(), + ?peer_cache_file, + "updated cached peer IP addresses" + ); + + for ip in &peer_list { + metrics::counter!( + "zcash.net.peers.cache", + 1, + "cache" => peer_cache_file.display().to_string(), + "remote_ip" => ip.to_string() + ); + } + + Ok(()) + } + Err(error) => Err(error.error), + } + }) + .await + .expect("unexpected panic making temporary peer cache file permanent") + } } impl Default for Config { @@ -300,6 +578,7 @@ impl Default for Config { network: Network::Mainnet, initial_mainnet_peers: mainnet_peers, initial_testnet_peers: testnet_peers, + cache_dir: CacheDir::default(), crawl_new_peer_interval: DEFAULT_CRAWL_NEW_PEER_INTERVAL, // # Security @@ -326,6 +605,7 @@ impl<'de> Deserialize<'de> for Config { network: Network, initial_mainnet_peers: IndexSet, initial_testnet_peers: IndexSet, + cache_dir: CacheDir, peerset_initial_target_size: usize, #[serde(alias = "new_peer_interval", with = "humantime_serde")] crawl_new_peer_interval: Duration, @@ -339,6 +619,7 @@ impl<'de> Deserialize<'de> for Config { network: config.network, initial_mainnet_peers: config.initial_mainnet_peers, initial_testnet_peers: config.initial_testnet_peers, + cache_dir: config.cache_dir, peerset_initial_target_size: config.peerset_initial_target_size, crawl_new_peer_interval: config.crawl_new_peer_interval, } @@ -362,6 +643,7 @@ impl<'de> Deserialize<'de> for Config { network: config.network, initial_mainnet_peers: config.initial_mainnet_peers, initial_testnet_peers: config.initial_testnet_peers, + cache_dir: config.cache_dir, peerset_initial_target_size: config.peerset_initial_target_size, crawl_new_peer_interval: config.crawl_new_peer_interval, }) diff --git a/zebra-network/src/config/cache_dir.rs b/zebra-network/src/config/cache_dir.rs new file mode 100644 index 00000000000..112ebe704ec --- /dev/null +++ b/zebra-network/src/config/cache_dir.rs @@ -0,0 +1,74 @@ +//! Cache directory configuration for zebra-network. + +use std::path::{Path, PathBuf}; + +use zebra_chain::parameters::Network; + +/// A cache directory config field. +/// +/// This cache directory configuration field is optional. +/// It defaults to being enabled with the default config path, +/// but also allows a custom path to be set. +#[derive(Clone, Debug, PartialEq, Eq, serde::Serialize, serde::Deserialize)] +#[serde(untagged)] +pub enum CacheDir { + /// Whether the cache directory is enabled with the default path (`true`), + /// or disabled (`false`). + IsEnabled(bool), + + /// Enable the cache directory and use a custom path. + CustomPath(PathBuf), +} + +impl CacheDir { + /// Returns a `CacheDir` enabled with the default path. + pub fn default_path() -> Self { + Self::IsEnabled(true) + } + + /// Returns a disabled `CacheDir`. + pub fn disabled() -> Self { + Self::IsEnabled(false) + } + + /// Returns a custom `CacheDir` enabled with `path`. + pub fn custom_path(path: impl AsRef) -> Self { + Self::CustomPath(path.as_ref().to_owned()) + } + + /// Returns `true` if this `CacheDir` is enabled with the default or a custom path. + pub fn is_enabled(&self) -> bool { + match self { + CacheDir::IsEnabled(is_enabled) => *is_enabled, + CacheDir::CustomPath(_) => true, + } + } + + /// Returns the peer cache file path for `network`, if enabled. + pub fn peer_cache_file_path(&self, network: Network) -> Option { + Some( + self.cache_dir()? + .join("network") + .join(format!("{}.peers", network.lowercase_name())), + ) + } + + /// Returns the `zebra-network` base cache directory, if enabled. + pub fn cache_dir(&self) -> Option { + match self { + Self::IsEnabled(is_enabled) => is_enabled.then(|| { + dirs::cache_dir() + .unwrap_or_else(|| std::env::current_dir().unwrap().join("cache")) + .join("zebra") + }), + + Self::CustomPath(cache_dir) => Some(cache_dir.to_owned()), + } + } +} + +impl Default for CacheDir { + fn default() -> Self { + Self::default_path() + } +} diff --git a/zebra-network/src/constants.rs b/zebra-network/src/constants.rs index 7b7f51b5fa7..e9082bfcd5b 100644 --- a/zebra-network/src/constants.rs +++ b/zebra-network/src/constants.rs @@ -140,6 +140,22 @@ pub const INVENTORY_ROTATION_INTERVAL: Duration = Duration::from_secs(53); /// don't synchronise with other crawls. pub const DEFAULT_CRAWL_NEW_PEER_INTERVAL: Duration = Duration::from_secs(61); +/// The peer address disk cache update interval. +/// +/// This should be longer than [`DEFAULT_CRAWL_NEW_PEER_INTERVAL`], +/// but shorter than [`MAX_PEER_ACTIVE_FOR_GOSSIP`]. +/// +/// We use a short interval so Zebra instances which are restarted frequently +/// still have useful caches. +pub const PEER_DISK_CACHE_UPDATE_INTERVAL: Duration = Duration::from_secs(5 * 60); + +/// The maximum number of addresses in the peer disk cache. +/// +/// This is chosen to be less than the number of active peers, +/// and approximately the same as the number of seed peers returned by DNS. +/// It is a tradeoff between fingerprinting attacks, DNS pollution risk, and cache pollution risk. +pub const MAX_PEER_DISK_CACHE_SIZE: usize = 75; + /// The maximum duration since a peer was last seen to consider it reachable. /// /// This is used to prevent Zebra from gossiping addresses that are likely unreachable. Peers that diff --git a/zebra-network/src/lib.rs b/zebra-network/src/lib.rs index c867c135d8c..97eafef656e 100644 --- a/zebra-network/src/lib.rs +++ b/zebra-network/src/lib.rs @@ -103,6 +103,7 @@ //! //! Peer Inventory Service: //! * tracks gossiped `inv` advertisements for each peer +//! * updated before each `PeerSet` request is processed //! * tracks missing inventory for each peer //! * used by the `PeerSet` to route block and transaction requests //! to peers that have the requested data @@ -112,10 +113,15 @@ //! [`AddressBook`] Service: //! * maintains a list of peer addresses and associated connection attempt metadata //! * address book metadata is used to prioritise peer connection attempts +//! * updated by an independent thread based on peer connection status changes +//! * caches peer addresses to disk regularly using an independent task //! //! Initial Seed Peer Task: -//! * initiates new outbound peer connections to seed peers, resolving them via DNS if required -//! * adds seed peer addresses to the [`AddressBook`] +//! On startup: +//! * loads seed peers from the config, resolving them via DNS if required +//! * loads cached peer addresses from disk +//! * initiates new outbound peer connections to seed and cached peers +//! * adds seed and cached peer addresses to the [`AddressBook`] //! //! Peer Crawler Task: //! * discovers new peer addresses by sending `Addr` requests to connected peers @@ -151,6 +157,7 @@ pub mod constants; mod isolated; mod meta_addr; mod peer; +mod peer_cache_updater; mod peer_set; mod policies; mod protocol; @@ -174,7 +181,7 @@ pub use crate::{ pub use crate::{ address_book::AddressBook, address_book_peers::AddressBookPeers, - config::Config, + config::{CacheDir, Config}, isolated::{connect_isolated, connect_isolated_tcp_direct}, meta_addr::{PeerAddrState, PeerSocketAddr}, peer::{Client, ConnectedAddr, ConnectionInfo, HandshakeError, PeerError, SharedPeerError}, diff --git a/zebra-network/src/meta_addr/peer_addr.rs b/zebra-network/src/meta_addr/peer_addr.rs index 09876a080a7..e7aa6859318 100644 --- a/zebra-network/src/meta_addr/peer_addr.rs +++ b/zebra-network/src/meta_addr/peer_addr.rs @@ -64,3 +64,11 @@ impl DerefMut for PeerSocketAddr { &mut self.0 } } + +impl PeerSocketAddr { + /// Return the underlying [`SocketAddr`], which allows sensitive peer address information to + /// be printed and logged. + pub fn remove_socket_addr_privacy(&self) -> SocketAddr { + **self + } +} diff --git a/zebra-network/src/peer_cache_updater.rs b/zebra-network/src/peer_cache_updater.rs new file mode 100644 index 00000000000..3d23f4d27a5 --- /dev/null +++ b/zebra-network/src/peer_cache_updater.rs @@ -0,0 +1,63 @@ +//! An async task that regularly updates the peer cache on disk from the current address book. + +use std::{ + io, + sync::{Arc, Mutex}, +}; + +use chrono::Utc; +use tokio::time::sleep; + +use crate::{ + constants::{DNS_LOOKUP_TIMEOUT, PEER_DISK_CACHE_UPDATE_INTERVAL}, + meta_addr::MetaAddr, + AddressBook, BoxError, Config, +}; + +/// An ongoing task that regularly caches the current `address_book` to disk, based on `config`. +pub async fn peer_cache_updater( + config: Config, + address_book: Arc>, +) -> Result<(), BoxError> { + // Wait until we've queried DNS and (hopefully) sent peers to the address book. + // Ideally we'd wait for at least one peer crawl, but that makes tests very slow. + // + // TODO: turn the initial sleep time into a parameter of this function, + // and allow it to be set in tests + sleep(DNS_LOOKUP_TIMEOUT * 2).await; + + loop { + // Ignore errors because updating the cache is optional. + // Errors are already logged by the functions we're calling. + let _ = update_peer_cache_once(&config, &address_book).await; + + sleep(PEER_DISK_CACHE_UPDATE_INTERVAL).await; + } +} + +/// Caches peers from the current `address_book` to disk, based on `config`. +pub async fn update_peer_cache_once( + config: &Config, + address_book: &Arc>, +) -> io::Result<()> { + let peer_list = cacheable_peers(address_book) + .iter() + .map(|meta_addr| meta_addr.addr) + .collect(); + + config.update_peer_cache(peer_list).await +} + +/// Returns a list of cacheable peers, blocking for as short a time as possible. +fn cacheable_peers(address_book: &Arc>) -> Vec { + // TODO: use spawn_blocking() here, if needed to handle address book mutex load + let now = Utc::now(); + + // # Concurrency + // + // We return from this function immediately to make sure the address book is unlocked. + address_book + .lock() + .expect("unexpected panic in previous thread while accessing the address book") + .cacheable(now) +} diff --git a/zebra-network/src/peer_set/initialize.rs b/zebra-network/src/peer_set/initialize.rs index 31c5a662586..72e1b8878b3 100644 --- a/zebra-network/src/peer_set/initialize.rs +++ b/zebra-network/src/peer_set/initialize.rs @@ -38,6 +38,7 @@ use crate::{ self, address_is_valid_for_inbound_listeners, HandshakeRequest, MinimumPeerVersion, OutboundConnectorRequest, PeerPreference, }, + peer_cache_updater::peer_cache_updater, peer_set::{set::MorePeers, ActiveConnectionCounter, CandidateSet, ConnectionTracker, PeerSet}, AddressBook, BoxError, Config, PeerSocketAddr, Request, Response, }; @@ -186,7 +187,7 @@ where ); let listen_guard = tokio::spawn(listen_fut.in_current_span()); - // 2. Initial peers, specified in the config. + // 2. Initial peers, specified in the config and cached on disk. let initial_peers_fut = add_initial_peers( config.clone(), outbound_connector.clone(), @@ -224,8 +225,9 @@ where let _ = demand_tx.try_send(MorePeers); } + // Start the peer crawler let crawl_fut = crawl_and_dial( - config, + config.clone(), demand_tx, demand_rx, candidates, @@ -235,15 +237,24 @@ where ); let crawl_guard = tokio::spawn(crawl_fut.in_current_span()); + // Start the peer disk cache updater + let peer_cache_updater_fut = peer_cache_updater(config, address_book.clone()); + let peer_cache_updater_guard = tokio::spawn(peer_cache_updater_fut.in_current_span()); + handle_tx - .send(vec![listen_guard, crawl_guard, address_book_updater_guard]) + .send(vec![ + listen_guard, + crawl_guard, + address_book_updater_guard, + peer_cache_updater_guard, + ]) .unwrap(); (peer_set, address_book) } -/// Use the provided `outbound_connector` to connect to the configured initial peers, -/// then send the resulting peer connections over `peerset_tx`. +/// Use the provided `outbound_connector` to connect to the configured DNS seeder and +/// disk cache initial peers, then send the resulting peer connections over `peerset_tx`. /// /// Also sends every initial peer address to the `address_book_updater`. #[instrument(skip(config, outbound_connector, peerset_tx, address_book_updater))] @@ -273,9 +284,12 @@ where "Outbound Connections", ); + // TODO: update when we add Tor peers or other kinds of addresses. + let ipv4_peer_count = initial_peers.iter().filter(|ip| ip.is_ipv4()).count(); + let ipv6_peer_count = initial_peers.iter().filter(|ip| ip.is_ipv6()).count(); info!( - initial_peer_count = ?initial_peers.len(), - ?initial_peers, + ?ipv4_peer_count, + ?ipv6_peer_count, "connecting to initial peer set" ); @@ -385,7 +399,7 @@ where ?handshake_success_total, ?handshake_error_total, ?outbound_connections, - "finished connecting to initial seed peers" + "finished connecting to initial seed and disk cache peers" ); Ok(active_outbound_connections) @@ -423,10 +437,10 @@ async fn limit_initial_peers( .entry(preference) .or_default() .push(peer_addr), - Err(error) => warn!( + Err(error) => info!( ?peer_addr, ?error, - "invalid initial peer from DNS seeder or configured IP address", + "invalid initial peer from DNS seeder, configured IP address, or disk cache", ), } } diff --git a/zebra-network/src/peer_set/initialize/tests/vectors.rs b/zebra-network/src/peer_set/initialize/tests/vectors.rs index 76110e8e9d1..9df6b29ec0b 100644 --- a/zebra-network/src/peer_set/initialize/tests/vectors.rs +++ b/zebra-network/src/peer_set/initialize/tests/vectors.rs @@ -31,6 +31,7 @@ use zebra_test::net::random_known_port; use crate::{ address_book_updater::AddressBookUpdater, + config::CacheDir, constants, init, meta_addr::{MetaAddr, PeerAddrState}, peer::{self, ClientTestHarness, HandshakeRequest, OutboundConnectorRequest}, @@ -53,6 +54,11 @@ use Network::*; /// Using a very short time can make the crawler not run at all. const CRAWLER_TEST_DURATION: Duration = Duration::from_secs(10); +/// The amount of time to run the peer cache updater task, before testing what it has done. +/// +/// Using a very short time can make the peer cache updater not run at all. +const PEER_CACHE_UPDATER_TEST_DURATION: Duration = Duration::from_secs(25); + /// The amount of time to run the listener, before testing what it has done. /// /// Using a very short time can make the listener not run at all. @@ -288,6 +294,89 @@ async fn peer_limit_two_testnet() { // Any number of address book peers is valid here, because some peers might have failed. } +/// Test zebra-network writes a peer cache file, and can read it back manually. +#[tokio::test] +async fn written_peer_cache_can_be_read_manually() { + let _init_guard = zebra_test::init(); + + if zebra_test::net::zebra_skip_network_tests() { + return; + } + + let nil_inbound_service = service_fn(|_| async { Ok(Response::Nil) }); + + // The default config should have an active peer cache + let config = Config::default(); + let address_book = + init_with_peer_limit(25, nil_inbound_service, Mainnet, None, config.clone()).await; + + // Let the peer cache updater run for a while. + tokio::time::sleep(PEER_CACHE_UPDATER_TEST_DURATION).await; + + let approximate_peer_count = address_book + .lock() + .expect("previous thread panicked while holding address book lock") + .len(); + if approximate_peer_count > 0 { + let cached_peers = config + .load_peer_cache() + .await + .expect("unexpected error reading peer cache"); + + assert!( + !cached_peers.is_empty(), + "unexpected empty peer cache from manual load: {:?}", + config.cache_dir.peer_cache_file_path(config.network) + ); + } +} + +/// Test zebra-network writes a peer cache file, and reads it back automatically. +#[tokio::test] +async fn written_peer_cache_is_automatically_read_on_startup() { + let _init_guard = zebra_test::init(); + + if zebra_test::net::zebra_skip_network_tests() { + return; + } + + let nil_inbound_service = service_fn(|_| async { Ok(Response::Nil) }); + + // The default config should have an active peer cache + let mut config = Config::default(); + let address_book = + init_with_peer_limit(25, nil_inbound_service, Mainnet, None, config.clone()).await; + + // Let the peer cache updater run for a while. + tokio::time::sleep(PEER_CACHE_UPDATER_TEST_DURATION).await; + + let approximate_peer_count = address_book + .lock() + .expect("previous thread panicked while holding address book lock") + .len(); + if approximate_peer_count > 0 { + // Make sure our only peers are coming from the disk cache + config.initial_mainnet_peers = Default::default(); + + let address_book = + init_with_peer_limit(25, nil_inbound_service, Mainnet, None, config.clone()).await; + + // Let the peer cache reader run and fill the address book. + tokio::time::sleep(CRAWLER_TEST_DURATION).await; + + // We should have loaded at least one peer from the cache + let approximate_cached_peer_count = address_book + .lock() + .expect("previous thread panicked while holding address book lock") + .len(); + assert!( + approximate_cached_peer_count > 0, + "unexpected empty address book using cache from previous instance: {:?}", + config.cache_dir.peer_cache_file_path(config.network) + ); + } +} + /// Test the crawler with an outbound peer limit of zero peers, and a connector that panics. #[tokio::test] async fn crawler_peer_limit_zero_connect_panic() { @@ -1126,6 +1215,7 @@ async fn self_connections_should_fail() { initial_mainnet_peers: IndexSet::new(), initial_testnet_peers: IndexSet::new(), + cache_dir: CacheDir::disabled(), ..Config::default() }; @@ -1371,6 +1461,7 @@ async fn local_listener_port_with(listen_addr: SocketAddr, network: Network) { // Stop Zebra making outbound connections initial_mainnet_peers: IndexSet::new(), initial_testnet_peers: IndexSet::new(), + cache_dir: CacheDir::disabled(), ..Config::default() }; @@ -1706,6 +1797,8 @@ where let config = Config { initial_mainnet_peers: peers, + // We want exactly the above list of peers, without any cached peers. + cache_dir: CacheDir::disabled(), network: Network::Mainnet, listen_addr: unused_v4, diff --git a/zebra-state/src/config.rs b/zebra-state/src/config.rs index ba62f834c9a..0475aae6bcc 100644 --- a/zebra-state/src/config.rs +++ b/zebra-state/src/config.rs @@ -15,22 +15,19 @@ use zebra_chain::parameters::Network; #[derive(Clone, Debug, Deserialize, Serialize)] #[serde(deny_unknown_fields, default)] pub struct Config { - /// The root directory for storing cached data. + /// The root directory for storing cached block data. /// - /// Cached data includes any state that can be replicated from the network - /// (e.g., the chain state, the blocks, the UTXO set, etc.). It does *not* - /// include private data that cannot be replicated from the network, such as - /// wallet data. That data is not handled by `zebra-state`. + /// If you change this directory, you might also want to change `network.cache_dir`. /// - /// Each state format version and network has a separate state. - /// These states are stored in `state/vN/mainnet` and `state/vN/testnet` subdirectories, - /// underneath the `cache_dir` path, where `N` is the state format version. + /// This cache stores permanent blockchain state that can be replicated from + /// the network, including the best chain, blocks, the UTXO set, and other indexes. + /// Any state that can be rolled back is only stored in memory. /// - /// When Zebra's state format changes, it creates a new state subdirectory for that version, - /// and re-syncs from genesis. + /// The `zebra-state` cache does *not* include any private data, such as wallet data. /// - /// It is ok to delete the entire cached state directory. - /// If you do, Zebra will re-sync from genesis next time it is launched. + /// You can delete the entire cached state directory, but it will impact your node's + /// readiness and network usage. If you do, Zebra will re-sync from genesis the next + /// time it is launched. /// /// The default directory is platform dependent, based on /// [`dirs::cache_dir()`](https://docs.rs/dirs/3.0.1/dirs/fn.cache_dir.html): @@ -48,6 +45,18 @@ pub struct Config { /// directory for this file before running Zebra, and make sure the Zebra user /// account has exclusive access to that directory, and other users can't modify /// its parent directories. + /// + /// # Implementation Details + /// + /// Each state format version and network has a separate state. + /// These states are stored in `state/vN/mainnet` and `state/vN/testnet` subdirectories, + /// underneath the `cache_dir` path, where `N` is the state format version. + /// + /// When Zebra's state format changes, it creates a new state subdirectory for that version, + /// and re-syncs from genesis. + /// + /// Old state versions are automatically deleted at startup. You can also manually delete old + /// state versions. pub cache_dir: PathBuf, /// Whether to use an ephemeral database. @@ -100,10 +109,7 @@ fn gen_temp_path(prefix: &str) -> PathBuf { impl Config { /// Returns the path for the finalized state database pub fn db_path(&self, network: Network) -> PathBuf { - let net_dir = match network { - Network::Mainnet => "mainnet", - Network::Testnet => "testnet", - }; + let net_dir = network.lowercase_name(); if self.ephemeral { gen_temp_path(&format!( diff --git a/zebrad/src/commands/start.rs b/zebrad/src/commands/start.rs index 07a8b5b19d8..3de502113a3 100644 --- a/zebrad/src/commands/start.rs +++ b/zebrad/src/commands/start.rs @@ -2,7 +2,7 @@ //! //! ## Application Structure //! -//! A zebra node consists of the following services and tasks: +//! A zebra node consists of the following major services and tasks: //! //! Peers: //! * Peer Connection Pool Service @@ -12,6 +12,9 @@ //! * maintains a list of peer addresses, and connection priority metadata //! * discovers new peer addresses from existing peer connections //! * initiates new outbound peer connections in response to demand from tasks within this node +//! * Peer Cache Service +//! * Reads previous peer cache on startup, and adds it to the configured DNS seed peers +//! * Periodically updates the peer cache on disk from the latest address book state //! //! Blocks & Mempool Transactions: //! * Consensus Service diff --git a/zebrad/src/components/inbound/tests/real_peer_set.rs b/zebrad/src/components/inbound/tests/real_peer_set.rs index 20bedc665da..35cfe7345d7 100644 --- a/zebrad/src/components/inbound/tests/real_peer_set.rs +++ b/zebrad/src/components/inbound/tests/real_peer_set.rs @@ -20,7 +20,7 @@ use zebra_chain::{ }; use zebra_consensus::{error::TransactionError, router::RouterError, transaction}; use zebra_network::{ - canonical_peer_addr, connect_isolated_tcp_direct_with_inbound, types::InventoryHash, + canonical_peer_addr, connect_isolated_tcp_direct_with_inbound, types::InventoryHash, CacheDir, Config as NetworkConfig, InventoryResponse, PeerError, Request, Response, SharedPeerError, }; use zebra_node_services::mempool; @@ -647,6 +647,7 @@ async fn setup( // Stop Zebra making outbound connections initial_mainnet_peers: IndexSet::new(), initial_testnet_peers: IndexSet::new(), + cache_dir: CacheDir::disabled(), ..NetworkConfig::default() }; diff --git a/zebrad/tests/acceptance.rs b/zebrad/tests/acceptance.rs index c53f5c4b22b..22fb1eeac79 100644 --- a/zebrad/tests/acceptance.rs +++ b/zebrad/tests/acceptance.rs @@ -165,7 +165,10 @@ use common::{ config::{ config_file_full_path, configs_dir, default_test_config, persistent_test_config, testdir, }, - launch::{spawn_zebrad_for_rpc, ZebradTestDirExt, BETWEEN_NODES_DELAY, LAUNCH_DELAY}, + launch::{ + spawn_zebrad_for_rpc, ZebradTestDirExt, BETWEEN_NODES_DELAY, EXTENDED_LAUNCH_DELAY, + LAUNCH_DELAY, + }, lightwalletd::{can_spawn_lightwalletd_for_rpc, spawn_lightwalletd_for_rpc}, sync::{ create_cached_database_height, sync_until, MempoolBehavior, LARGE_CHECKPOINT_TEST_HEIGHT, @@ -371,6 +374,7 @@ async fn db_init_outside_future_executor() -> Result<()> { Ok(()) } +/// Check that the block state and peer list caches are written to disk. #[test] fn persistent_mode() -> Result<()> { let _init_guard = zebra_test::init(); @@ -381,7 +385,7 @@ fn persistent_mode() -> Result<()> { let mut child = testdir.spawn_child(args!["-v", "start"])?; // Run the program and kill it after a few seconds - std::thread::sleep(LAUNCH_DELAY); + std::thread::sleep(EXTENDED_LAUNCH_DELAY); child.kill(false)?; let output = child.wait_with_output()?; @@ -395,6 +399,13 @@ fn persistent_mode() -> Result<()> { "state directory empty despite persistent state config" ); + let cache_dir = testdir.path().join("network"); + assert_with_context!( + cache_dir.read_dir()?.count() > 0, + &output, + "network directory empty despite persistent network config" + ); + Ok(()) } @@ -424,6 +435,9 @@ fn misconfigured_ephemeral_missing_directory() -> Result<()> { ) } +/// Check that the state directory created on disk matches the state config. +/// +/// TODO: do a similar test for `network.cache_dir` #[tracing::instrument] fn ephemeral(cache_dir_config: EphemeralConfig, cache_dir_check: EphemeralCheck) -> Result<()> { use std::io::ErrorKind; @@ -449,7 +463,7 @@ fn ephemeral(cache_dir_config: EphemeralConfig, cache_dir_check: EphemeralCheck) .with_config(&mut config)? .spawn_child(args!["start"])?; // Run the program and kill it after a few seconds - std::thread::sleep(LAUNCH_DELAY); + std::thread::sleep(EXTENDED_LAUNCH_DELAY); child.kill(false)?; let output = child.wait_with_output()?; @@ -472,7 +486,7 @@ fn ephemeral(cache_dir_config: EphemeralConfig, cache_dir_check: EphemeralCheck) ignored_cache_dir.read_dir().unwrap().collect::>() ); - ["state", "zebrad.toml"].iter() + ["state", "network", "zebrad.toml"].iter() } // we didn't create the state directory, so it should not exist @@ -490,7 +504,7 @@ fn ephemeral(cache_dir_config: EphemeralConfig, cache_dir_check: EphemeralCheck) ignored_cache_dir.read_dir().unwrap().collect::>() ); - ["zebrad.toml"].iter() + ["network", "zebrad.toml"].iter() } }; @@ -754,7 +768,7 @@ fn last_config_is_stored() -> Result<()> { Or run: \n\ cargo build {}--bin zebrad && \n\ zebrad generate | \n\ - sed \"s/cache_dir = '.*'/cache_dir = 'cache_dir'/\" > \n\ + sed 's/cache_dir = \".*\"/cache_dir = \"cache_dir\"/' > \n\ zebrad/tests/common/configs/{}.toml", if cfg!(feature = "getblocktemplate-rpcs") { GET_BLOCK_TEMPLATE_CONFIG_PREFIX diff --git a/zebrad/tests/common/configs/getblocktemplate-v1.0.0-rc.9.toml b/zebrad/tests/common/configs/getblocktemplate-v1.0.0-rc.9.toml new file mode 100644 index 00000000000..c6629087b72 --- /dev/null +++ b/zebrad/tests/common/configs/getblocktemplate-v1.0.0-rc.9.toml @@ -0,0 +1,74 @@ +# Default configuration for zebrad. +# +# This file can be used as a skeleton for custom configs. +# +# Unspecified fields use default values. Optional fields are Some(field) if the +# field is present and None if it is absent. +# +# This file is generated as an example using zebrad's current defaults. +# You should set only the config options you want to keep, and delete the rest. +# Only a subset of fields are present in the skeleton, since optional values +# whose default is None are omitted. +# +# The config format (including a complete list of sections and fields) is +# documented here: +# https://doc.zebra.zfnd.org/zebrad/config/struct.ZebradConfig.html +# +# zebrad attempts to load configs in the following order: +# +# 1. The -c flag on the command line, e.g., `zebrad -c myconfig.toml start`; +# 2. The file `zebrad.toml` in the users's preference directory (platform-dependent); +# 3. The default config. + +[consensus] +checkpoint_sync = true +debug_skip_parameter_preload = false + +[mempool] +eviction_memory_time = "1h" +tx_cost_limit = 80000000 + +[metrics] + +[mining] +debug_like_zcashd = true + +[network] +cache_dir = true +crawl_new_peer_interval = "1m 1s" +initial_mainnet_peers = [ + "dnsseed.z.cash:8233", + "dnsseed.str4d.xyz:8233", + "mainnet.seeder.zfnd.org:8233", + "mainnet.is.yolo.money:8233", +] +initial_testnet_peers = [ + "dnsseed.testnet.z.cash:18233", + "testnet.seeder.zfnd.org:18233", + "testnet.is.yolo.money:18233", +] +listen_addr = "0.0.0.0:8233" +network = "Mainnet" +peerset_initial_target_size = 25 + +[rpc] +debug_force_finished_sync = false +parallel_cpu_threads = 0 + +[state] +cache_dir = "cache_dir" +delete_old_database = true +ephemeral = false + +[sync] +checkpoint_verify_concurrency_limit = 1000 +download_concurrency_limit = 50 +full_verify_concurrency_limit = 20 +parallel_cpu_threads = 0 + +[tracing] +buffer_limit = 128000 +force_use_color = false +use_color = true +use_journald = false + diff --git a/zebrad/tests/common/configs/net-cache-custom-v1.0.0-rc.9.toml b/zebrad/tests/common/configs/net-cache-custom-v1.0.0-rc.9.toml new file mode 100644 index 00000000000..30ac9d46c1f --- /dev/null +++ b/zebrad/tests/common/configs/net-cache-custom-v1.0.0-rc.9.toml @@ -0,0 +1,16 @@ +# Custom network.cache_dir config parsing test + +[network] +# Enable the peer address cache with a custom path +cache_dir = "/tmp" + +# Use a custom seed peer config +# https://en.wikipedia.org/wiki/IPv6_address#Documentation +initial_mainnet_peers = [ + "192.0.2.0:8233", + "2001:db8::0:8233", +] +initial_testnet_peers = [ + "192.0.2.1:18233", + "2001:db8::1:18233", +] diff --git a/zebrad/tests/common/configs/net-cache-disabled-v1.0.0-rc.9.toml b/zebrad/tests/common/configs/net-cache-disabled-v1.0.0-rc.9.toml new file mode 100644 index 00000000000..c2d33eecc0e --- /dev/null +++ b/zebrad/tests/common/configs/net-cache-disabled-v1.0.0-rc.9.toml @@ -0,0 +1,9 @@ +# Custom network.cache_dir config parsing test + +[network] +# Disable the peer address cache +cache_dir = false + +# Disable seed peers as well, to create an isolated node +initial_mainnet_peers = [] +initial_testnet_peers = [] diff --git a/zebrad/tests/common/configs/net-cache-enabled-v1.0.0-rc.9.toml b/zebrad/tests/common/configs/net-cache-enabled-v1.0.0-rc.9.toml new file mode 100644 index 00000000000..40e119be9b3 --- /dev/null +++ b/zebrad/tests/common/configs/net-cache-enabled-v1.0.0-rc.9.toml @@ -0,0 +1,18 @@ +# Custom network.cache_dir config parsing test + +[network] +# Enable the peer address cache with the default path +cache_dir = true + +# Use the default seed peer config +initial_mainnet_peers = [ + "dnsseed.z.cash:8233", + "dnsseed.str4d.xyz:8233", + "mainnet.seeder.zfnd.org:8233", + "mainnet.is.yolo.money:8233", +] +initial_testnet_peers = [ + "dnsseed.testnet.z.cash:18233", + "testnet.seeder.zfnd.org:18233", + "testnet.is.yolo.money:18233", +] diff --git a/zebrad/tests/common/configs/v1.0.0-rc.9.toml b/zebrad/tests/common/configs/v1.0.0-rc.9.toml new file mode 100644 index 00000000000..52cd503be0b --- /dev/null +++ b/zebrad/tests/common/configs/v1.0.0-rc.9.toml @@ -0,0 +1,71 @@ +# Default configuration for zebrad. +# +# This file can be used as a skeleton for custom configs. +# +# Unspecified fields use default values. Optional fields are Some(field) if the +# field is present and None if it is absent. +# +# This file is generated as an example using zebrad's current defaults. +# You should set only the config options you want to keep, and delete the rest. +# Only a subset of fields are present in the skeleton, since optional values +# whose default is None are omitted. +# +# The config format (including a complete list of sections and fields) is +# documented here: +# https://doc.zebra.zfnd.org/zebrad/config/struct.ZebradConfig.html +# +# zebrad attempts to load configs in the following order: +# +# 1. The -c flag on the command line, e.g., `zebrad -c myconfig.toml start`; +# 2. The file `zebrad.toml` in the users's preference directory (platform-dependent); +# 3. The default config. + +[consensus] +checkpoint_sync = true +debug_skip_parameter_preload = false + +[mempool] +eviction_memory_time = "1h" +tx_cost_limit = 80000000 + +[metrics] + +[network] +cache_dir = true +crawl_new_peer_interval = "1m 1s" +initial_mainnet_peers = [ + "dnsseed.z.cash:8233", + "dnsseed.str4d.xyz:8233", + "mainnet.seeder.zfnd.org:8233", + "mainnet.is.yolo.money:8233", +] +initial_testnet_peers = [ + "dnsseed.testnet.z.cash:18233", + "testnet.seeder.zfnd.org:18233", + "testnet.is.yolo.money:18233", +] +listen_addr = "0.0.0.0:8233" +network = "Mainnet" +peerset_initial_target_size = 25 + +[rpc] +debug_force_finished_sync = false +parallel_cpu_threads = 1 + +[state] +cache_dir = "cache_dir" +delete_old_database = true +ephemeral = false + +[sync] +checkpoint_verify_concurrency_limit = 1000 +download_concurrency_limit = 50 +full_verify_concurrency_limit = 20 +parallel_cpu_threads = 0 + +[tracing] +buffer_limit = 128000 +force_use_color = false +use_color = true +use_journald = false + diff --git a/zebrad/tests/common/launch.rs b/zebrad/tests/common/launch.rs index ac8590ccba1..8b8738b41d4 100644 --- a/zebrad/tests/common/launch.rs +++ b/zebrad/tests/common/launch.rs @@ -17,6 +17,7 @@ use indexmap::IndexSet; use tempfile::TempDir; use zebra_chain::parameters::Network; +use zebra_network::CacheDir; use zebra_test::{ args, command::{Arguments, TestDirExt}, @@ -36,6 +37,10 @@ use crate::common::{ /// metrics or tracing test failures in Windows CI. pub const LAUNCH_DELAY: Duration = Duration::from_secs(15); +/// After we launch `zebrad`, wait this long in extended tests. +/// See [`LAUNCH_DELAY`] for details. +pub const EXTENDED_LAUNCH_DELAY: Duration = Duration::from_secs(25); + /// After we launch `lightwalletd`, wait this long for the command to start up, /// take the actions expected by the quick tests, and log the expected logs. /// @@ -167,9 +172,16 @@ where } fn cache_config_update_helper(self, config: &mut ZebradConfig) -> Result { + let dir = self.as_ref(); + let cache_dir = PathBuf::from(dir); + + // If the peer cache has already been disabled, don't re-enable it + if config.network.cache_dir.is_enabled() { + config.network.cache_dir = CacheDir::custom_path(&cache_dir); + } + + // Only replace the state cache directory if it's going to be used if !config.state.ephemeral { - let dir = self.as_ref(); - let cache_dir = PathBuf::from(dir); config.state.cache_dir = cache_dir; } @@ -232,6 +244,8 @@ pub fn spawn_zebrad_for_rpc + std::fmt::Debug>( if !use_internet_connection { config.network.initial_mainnet_peers = IndexSet::new(); config.network.initial_testnet_peers = IndexSet::new(); + // Avoid re-using cached peers from disk when we're supposed to be a disconnected instance + config.network.cache_dir = CacheDir::disabled(); config.mempool.debug_enable_at_height = Some(0); } From 815c77870d3d9b0839f954ca1b075603691dd3be Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 6 Jun 2023 11:14:53 +0000 Subject: [PATCH 047/265] build(deps): bump once_cell from 1.17.2 to 1.18.0 (#6816) Bumps [once_cell](https://github.com/matklad/once_cell) from 1.17.2 to 1.18.0. - [Changelog](https://github.com/matklad/once_cell/blob/master/CHANGELOG.md) - [Commits](https://github.com/matklad/once_cell/compare/v1.17.2...v1.18.0) --- updated-dependencies: - dependency-name: once_cell dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- zebra-consensus/Cargo.toml | 2 +- zebra-state/Cargo.toml | 2 +- zebra-test/Cargo.toml | 2 +- zebrad/Cargo.toml | 2 +- 5 files changed, 6 insertions(+), 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a3d81a998e2..3bc9c5fee6e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2761,9 +2761,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.17.2" +version = "1.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9670a07f94779e00908f3e686eab508878ebb390ba6e604d3a284c00e8d0487b" +checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d" [[package]] name = "oorandom" diff --git a/zebra-consensus/Cargo.toml b/zebra-consensus/Cargo.toml index deab9ce8b1a..084cb357f36 100644 --- a/zebra-consensus/Cargo.toml +++ b/zebra-consensus/Cargo.toml @@ -37,7 +37,7 @@ rayon = "1.7.0" chrono = { version = "0.4.26", default-features = false, features = ["clock", "std"] } displaydoc = "0.2.4" lazy_static = "1.4.0" -once_cell = "1.17.2" +once_cell = "1.18.0" serde = { version = "1.0.163", features = ["serde_derive"] } futures = "0.3.28" diff --git a/zebra-state/Cargo.toml b/zebra-state/Cargo.toml index 8e2100d83e4..fe4e18addf9 100644 --- a/zebra-state/Cargo.toml +++ b/zebra-state/Cargo.toml @@ -75,7 +75,7 @@ color-eyre = "0.6.2" # Enable a feature that makes tinyvec compile much faster. tinyvec = { version = "1.6.0", features = ["rustc_1_55"] } -once_cell = "1.17.2" +once_cell = "1.18.0" spandoc = "0.2.2" hex = { version = "0.4.3", features = ["serde"] } diff --git a/zebra-test/Cargo.toml b/zebra-test/Cargo.toml index fd034a74efe..695e9b4848e 100644 --- a/zebra-test/Cargo.toml +++ b/zebra-test/Cargo.toml @@ -13,7 +13,7 @@ indexmap = "1.9.3" lazy_static = "1.4.0" insta = "1.29.0" proptest = "1.2.0" -once_cell = "1.17.2" +once_cell = "1.18.0" rand = { version = "0.8.5", package = "rand" } regex = "1.8.4" diff --git a/zebrad/Cargo.toml b/zebrad/Cargo.toml index b69396dbc63..12a9bcc83ac 100644 --- a/zebrad/Cargo.toml +++ b/zebrad/Cargo.toml @@ -194,7 +194,7 @@ tonic-build = { version = "0.9.2", optional = true } abscissa_core = { version = "0.5", features = ["testing"] } hex = "0.4.3" jsonrpc-core = "18.0.0" -once_cell = "1.17.2" +once_cell = "1.18.0" regex = "1.8.4" semver = "1.0.17" From 355f1233f5421ae860ffbffaa48129d6274ffdc4 Mon Sep 17 00:00:00 2001 From: teor Date: Wed, 7 Jun 2023 07:18:57 +1000 Subject: [PATCH 048/265] change(db): Make the first stable release forward-compatible with planned state changes (#6813) * Implement minor and patch database format versions * Log and update database format versions when opening database * Refactor the current list of column families into a constant * Open all available column families, including from future Zebra versions * Refactor note commitment tree lookups to go through the height methods * Make Sapling/Orchard note commitment tree lookup forwards compatible * Ignore errors reading column family lists from disk * Update format version comments and TODOs * Correctly log newly created database formats --------- Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> --- Cargo.lock | 1 + zebra-state/Cargo.toml | 1 + zebra-state/src/config.rs | 98 ++++++++++- zebra-state/src/constants.rs | 47 +++++- .../src/service/finalized_state/disk_db.rs | 152 ++++++++++++------ .../finalized_state/zebra_db/shielded.rs | 63 ++++++-- 6 files changed, 290 insertions(+), 72 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3bc9c5fee6e..072c69cff45 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5962,6 +5962,7 @@ dependencies = [ "regex", "rlimit", "rocksdb", + "semver 1.0.17", "serde", "serde_json", "spandoc", diff --git a/zebra-state/Cargo.toml b/zebra-state/Cargo.toml index fe4e18addf9..8caba9ecbc3 100644 --- a/zebra-state/Cargo.toml +++ b/zebra-state/Cargo.toml @@ -46,6 +46,7 @@ mset = "0.1.1" regex = "1.8.4" rlimit = "0.9.1" rocksdb = { version = "0.21.0", default_features = false, features = ["lz4"] } +semver = "1.0.17" serde = { version = "1.0.163", features = ["serde_derive"] } tempfile = "3.5.0" thiserror = "1.0.40" diff --git a/zebra-state/src/config.rs b/zebra-state/src/config.rs index 0475aae6bcc..3d09aecdc8c 100644 --- a/zebra-state/src/config.rs +++ b/zebra-state/src/config.rs @@ -1,16 +1,26 @@ //! Cached state configuration for Zebra. use std::{ - fs::{canonicalize, remove_dir_all, DirEntry, ReadDir}, + fs::{self, canonicalize, remove_dir_all, DirEntry, ReadDir}, + io::ErrorKind, path::{Path, PathBuf}, }; +use semver::Version; use serde::{Deserialize, Serialize}; use tokio::task::{spawn_blocking, JoinHandle}; use tracing::Span; use zebra_chain::parameters::Network; +use crate::{ + constants::{ + DATABASE_FORMAT_MINOR_VERSION, DATABASE_FORMAT_PATCH_VERSION, DATABASE_FORMAT_VERSION, + DATABASE_FORMAT_VERSION_FILE_NAME, + }, + BoxError, +}; + /// Configuration for the state service. #[derive(Clone, Debug, Deserialize, Serialize)] #[serde(deny_unknown_fields, default)] @@ -125,6 +135,15 @@ impl Config { } } + /// Returns the path of the database format version file. + pub fn version_file_path(&self, network: Network) -> PathBuf { + let mut version_path = self.db_path(network); + + version_path.push(DATABASE_FORMAT_VERSION_FILE_NAME); + + version_path + } + /// Construct a config for an ephemeral database pub fn ephemeral() -> Config { Config { @@ -267,8 +286,83 @@ fn parse_dir_name(entry: &DirEntry) -> Option { /// Parse the state version number from `dir_name`. /// /// Returns `None` if parsing fails, or the directory name is not in the expected format. -fn parse_version_number(dir_name: &str) -> Option { +fn parse_version_number(dir_name: &str) -> Option { dir_name .strip_prefix('v') .and_then(|version| version.parse().ok()) } + +/// Returns the full semantic version of the currently running database format code. +/// +/// This is the version implemented by the Zebra code that's currently running, +/// the minor and patch versions on disk can be different. +pub fn database_format_version_in_code() -> Version { + Version::new( + DATABASE_FORMAT_VERSION, + DATABASE_FORMAT_MINOR_VERSION, + DATABASE_FORMAT_PATCH_VERSION, + ) +} + +/// Returns the full semantic version of the on-disk database. +/// If there is no existing on-disk database, returns `Ok(None)`. +/// +/// This is the format of the data on disk, the minor and patch versions +/// implemented by the running Zebra code can be different. +pub fn database_format_version_on_disk( + config: &Config, + network: Network, +) -> Result, BoxError> { + let version_path = config.version_file_path(network); + + let version = match fs::read_to_string(version_path) { + Ok(version) => version, + Err(e) if e.kind() == ErrorKind::NotFound => { + // If the version file doesn't exist, don't guess the version. + // (It will end up being the version in code, once the database is created.) + return Ok(None); + } + Err(e) => Err(e)?, + }; + + let (minor, patch) = version + .split_once('.') + .ok_or("invalid database format version file")?; + + Ok(Some(Version::new( + DATABASE_FORMAT_VERSION, + minor.parse()?, + patch.parse()?, + ))) +} + +/// Writes the currently running semantic database version to the on-disk database. +/// +/// # Correctness +/// +/// This should only be called after all running format upgrades are complete. +/// +/// # Concurrency +/// +/// This must only be called while RocksDB has an open database for `config`. +/// Otherwise, multiple Zebra processes could write the version at the same time, +/// corrupting the file. +pub fn write_database_format_version_to_disk( + config: &Config, + network: Network, +) -> Result<(), BoxError> { + let version_path = config.version_file_path(network); + + // The major version is already in the directory path. + let version = format!( + "{}.{}", + DATABASE_FORMAT_MINOR_VERSION, DATABASE_FORMAT_PATCH_VERSION + ); + + // # Concurrency + // + // The caller handles locking for this file write. + fs::write(version_path, version.as_bytes())?; + + Ok(()) +} diff --git a/zebra-state/src/constants.rs b/zebra-state/src/constants.rs index 85ae1e77df1..011b4115eda 100644 --- a/zebra-state/src/constants.rs +++ b/zebra-state/src/constants.rs @@ -1,4 +1,11 @@ -//! Definitions of constants. +//! Constants that impact state behaviour. + +use lazy_static::lazy_static; +use regex::Regex; + +// For doc comment links +#[allow(unused_imports)] +use crate::config::{database_format_version_in_code, database_format_version_on_disk}; pub use zebra_chain::transparent::MIN_TRANSPARENT_COINBASE_MATURITY; @@ -19,13 +26,42 @@ pub use zebra_chain::transparent::MIN_TRANSPARENT_COINBASE_MATURITY; // TODO: change to HeightDiff pub const MAX_BLOCK_REORG_HEIGHT: u32 = MIN_TRANSPARENT_COINBASE_MATURITY - 1; -/// The database format version, incremented each time the database format changes. -pub const DATABASE_FORMAT_VERSION: u32 = 25; +/// The database format major version, incremented each time the on-disk database format has a +/// breaking data format change. +/// +/// Breaking changes include: +/// - deleting a column family, or +/// - changing a column family's data format in an incompatible way. +/// +/// Breaking changes become minor version changes if: +/// - we previously added compatibility code, and +/// - it's available in all supported Zebra versions. +/// +/// Use [`database_format_version_in_code()`] or [`database_format_version_on_disk()`] +/// to get the full semantic format version. +pub const DATABASE_FORMAT_VERSION: u64 = 25; + +/// The database format minor version, incremented each time the on-disk database format has a +/// significant data format change. +/// +/// Significant changes include: +/// - adding new column families, +/// - changing the format of a column family in a compatible way, or +/// - breaking changes with compatibility code in all supported Zebra versions. +pub const DATABASE_FORMAT_MINOR_VERSION: u64 = 0; + +/// The database format patch version, incremented each time the on-disk database format has a +/// significant format compatibility fix. +pub const DATABASE_FORMAT_PATCH_VERSION: u64 = 1; + +/// The name of the file containing the minor and patch database versions. +pub const DATABASE_FORMAT_VERSION_FILE_NAME: &str = "version"; /// The maximum number of blocks to check for NU5 transactions, /// before we assume we are on a pre-NU5 legacy chain. /// -/// Zebra usually only has to check back a few blocks, but on testnet it can be a long time between v5 transactions. +/// Zebra usually only has to check back a few blocks on mainnet, but on testnet it can be a long +/// time between v5 transactions. pub const MAX_LEGACY_CHAIN_BLOCKS: usize = 100_000; /// The maximum number of non-finalized chain forks Zebra will track. @@ -58,9 +94,6 @@ const MAX_FIND_BLOCK_HEADERS_RESULTS_FOR_PROTOCOL: u32 = 160; pub const MAX_FIND_BLOCK_HEADERS_RESULTS_FOR_ZEBRA: u32 = MAX_FIND_BLOCK_HEADERS_RESULTS_FOR_PROTOCOL - 2; -use lazy_static::lazy_static; -use regex::Regex; - lazy_static! { /// Regex that matches the RocksDB error when its lock file is already open. pub static ref LOCK_FILE_ERROR: Regex = Regex::new("(lock file).*(temporarily unavailable)|(in use)|(being used by another process)").expect("regex is valid"); diff --git a/zebra-state/src/service/finalized_state/disk_db.rs b/zebra-state/src/service/finalized_state/disk_db.rs index 3c732acc464..0432167d183 100644 --- a/zebra-state/src/service/finalized_state/disk_db.rs +++ b/zebra-state/src/service/finalized_state/disk_db.rs @@ -10,13 +10,18 @@ //! The [`crate::constants::DATABASE_FORMAT_VERSION`] constant must //! be incremented each time the database format (column, serialization, etc) changes. -use std::{fmt::Debug, path::Path, sync::Arc}; +use std::{cmp::Ordering, fmt::Debug, path::Path, sync::Arc}; +use itertools::Itertools; use rlimit::increase_nofile_limit; use zebra_chain::parameters::Network; use crate::{ + config::{ + database_format_version_in_code, database_format_version_on_disk, + write_database_format_version_to_disk, + }, service::finalized_state::disk_format::{FromDisk, IntoDisk}, Config, }; @@ -386,61 +391,93 @@ impl DiskDb { /// const MEMTABLE_RAM_CACHE_MEGABYTES: usize = 128; + /// The column families supported by the running database code. + const COLUMN_FAMILIES_IN_CODE: &[&'static str] = &[ + // Blocks + "hash_by_height", + "height_by_hash", + "block_header_by_height", + // Transactions + "tx_by_loc", + "hash_by_tx_loc", + "tx_loc_by_hash", + // Transparent + "balance_by_transparent_addr", + "tx_loc_by_transparent_addr_loc", + "utxo_by_out_loc", + "utxo_loc_by_transparent_addr_loc", + // Sprout + "sprout_nullifiers", + "sprout_anchors", + "sprout_note_commitment_tree", + // Sapling + "sapling_nullifiers", + "sapling_anchors", + "sapling_note_commitment_tree", + // Orchard + "orchard_nullifiers", + "orchard_anchors", + "orchard_note_commitment_tree", + // Chain + "history_tree", + "tip_chain_value_pool", + ]; + /// Opens or creates the database at `config.path` for `network`, /// and returns a shared low-level database wrapper. pub fn new(config: &Config, network: Network) -> DiskDb { let path = config.db_path(network); - let db_options = DiskDb::options(); - let column_families = vec![ - // Blocks - rocksdb::ColumnFamilyDescriptor::new("hash_by_height", db_options.clone()), - rocksdb::ColumnFamilyDescriptor::new("height_by_hash", db_options.clone()), - rocksdb::ColumnFamilyDescriptor::new("block_header_by_height", db_options.clone()), - // Transactions - rocksdb::ColumnFamilyDescriptor::new("tx_by_loc", db_options.clone()), - rocksdb::ColumnFamilyDescriptor::new("hash_by_tx_loc", db_options.clone()), - rocksdb::ColumnFamilyDescriptor::new("tx_loc_by_hash", db_options.clone()), - // Transparent - rocksdb::ColumnFamilyDescriptor::new("balance_by_transparent_addr", db_options.clone()), - rocksdb::ColumnFamilyDescriptor::new( - "tx_loc_by_transparent_addr_loc", - db_options.clone(), + let running_version = database_format_version_in_code(); + let disk_version = database_format_version_on_disk(config, network) + .expect("unable to read database format version file"); + + match disk_version.as_ref().map(|disk| disk.cmp(&running_version)) { + // TODO: if the on-disk format is older, actually run the upgrade task after the + // database has been opened (#6642) + Some(Ordering::Less) => info!( + ?running_version, + ?disk_version, + "trying to open older database format: launching upgrade task" ), - rocksdb::ColumnFamilyDescriptor::new("utxo_by_out_loc", db_options.clone()), - rocksdb::ColumnFamilyDescriptor::new( - "utxo_loc_by_transparent_addr_loc", - db_options.clone(), + // TODO: if the on-disk format is newer, downgrade the version after the + // database has been opened (#6642) + Some(Ordering::Greater) => info!( + ?running_version, + ?disk_version, + "trying to open newer database format: data should be compatible" ), - // Sprout - rocksdb::ColumnFamilyDescriptor::new("sprout_nullifiers", db_options.clone()), - rocksdb::ColumnFamilyDescriptor::new("sprout_anchors", db_options.clone()), - rocksdb::ColumnFamilyDescriptor::new("sprout_note_commitment_tree", db_options.clone()), - // Sapling - rocksdb::ColumnFamilyDescriptor::new("sapling_nullifiers", db_options.clone()), - rocksdb::ColumnFamilyDescriptor::new("sapling_anchors", db_options.clone()), - rocksdb::ColumnFamilyDescriptor::new( - "sapling_note_commitment_tree", - db_options.clone(), + Some(Ordering::Equal) => info!( + ?running_version, + "trying to open compatible database format" ), - // Orchard - rocksdb::ColumnFamilyDescriptor::new("orchard_nullifiers", db_options.clone()), - rocksdb::ColumnFamilyDescriptor::new("orchard_anchors", db_options.clone()), - rocksdb::ColumnFamilyDescriptor::new( - "orchard_note_commitment_tree", - db_options.clone(), + None => info!( + ?running_version, + "creating new database with the current format" ), - // Chain - rocksdb::ColumnFamilyDescriptor::new("history_tree", db_options.clone()), - rocksdb::ColumnFamilyDescriptor::new("tip_chain_value_pool", db_options.clone()), - ]; - - // TODO: move opening the database to a blocking thread (#2188) - let db_result = rocksdb::DBWithThreadMode::::open_cf_descriptors( - &db_options, - &path, - column_families, - ); + } + + let db_options = DiskDb::options(); + + // When opening the database in read/write mode, all column families must be opened. + // + // To make Zebra forward-compatible with databases updated by later versions, + // we read any existing column families off the disk, then add any new column families + // from the current implementation. + // + // + let column_families_on_disk = DB::list_cf(&db_options, &path).unwrap_or_default(); + let column_families_in_code = Self::COLUMN_FAMILIES_IN_CODE + .iter() + .map(ToString::to_string); + + let column_families = column_families_on_disk + .into_iter() + .chain(column_families_in_code) + .unique() + .map(|cf_name| rocksdb::ColumnFamilyDescriptor::new(cf_name, db_options.clone())); + + let db_result = DB::open_cf_descriptors(&db_options, &path, column_families); match db_result { Ok(db) => { @@ -453,6 +490,27 @@ impl DiskDb { db.assert_default_cf_is_empty(); + // Now we've checked that the database format is up-to-date, + // mark it as updated on disk. + // + // # Concurrency + // + // The version must only be updated while RocksDB is holding the database + // directory lock. This prevents multiple Zebra instances corrupting the version + // file. + // + // # TODO + // + // - only update the version at the end of the format upgrade task (#6642) + // - add a note to the format upgrade task code to update the version constants + // whenever the format changes + // - add a test that the format upgrade runs exactly once when: + // 1. if an older cached state format is opened, the format is upgraded, + // then if Zebra is launched again the format is not upgraded + // 2. if the current cached state format is opened, the format is not upgraded + write_database_format_version_to_disk(config, network) + .expect("unable to write database format version file to disk"); + db } diff --git a/zebra-state/src/service/finalized_state/zebra_db/shielded.rs b/zebra-state/src/service/finalized_state/zebra_db/shielded.rs index 1e6b2fdf6c5..83a4d36f67f 100644 --- a/zebra-state/src/service/finalized_state/zebra_db/shielded.rs +++ b/zebra-state/src/service/finalized_state/zebra_db/shielded.rs @@ -107,24 +107,41 @@ impl ZebraDb { None => return Default::default(), }; - let sapling_nct_handle = self.db.cf_handle("sapling_note_commitment_tree").unwrap(); - - self.db - .zs_get(&sapling_nct_handle, &height) - .map(Arc::new) + self.sapling_note_commitment_tree_by_height(&height) .expect("Sapling note commitment tree must exist if there is a finalized tip") } - /// Returns the Sapling note commitment tree matching the given block height. - #[allow(dead_code)] + /// Returns the Sapling note commitment tree matching the given block height, + /// or `None` if the height is above the finalized tip. #[allow(clippy::unwrap_in_result)] pub fn sapling_note_commitment_tree_by_height( &self, height: &Height, ) -> Option> { + let tip_height = self.finalized_tip_height()?; + + // If we're above the tip, searching backwards would always return the tip tree. + // But the correct answer is "we don't know that tree yet". + if *height > tip_height { + return None; + } + let sapling_trees = self.db.cf_handle("sapling_note_commitment_tree").unwrap(); - self.db.zs_get(&sapling_trees, height).map(Arc::new) + // If we know there must be a tree, search backwards for it. + // + // # Compatibility + // + // Allow older Zebra versions to read future database formats, after note commitment trees + // have been deduplicated. See ticket #6642 for details. + let (_first_duplicate_height, tree) = self + .db + .zs_prev_key_value_back_from(&sapling_trees, height) + .expect( + "Sapling note commitment trees must exist for all heights below the finalized tip", + ); + + Some(Arc::new(tree)) } /// Returns the Orchard note commitment tree of the finalized tip @@ -135,24 +152,38 @@ impl ZebraDb { None => return Default::default(), }; - let orchard_nct_handle = self.db.cf_handle("orchard_note_commitment_tree").unwrap(); - - self.db - .zs_get(&orchard_nct_handle, &height) - .map(Arc::new) + self.orchard_note_commitment_tree_by_height(&height) .expect("Orchard note commitment tree must exist if there is a finalized tip") } - /// Returns the Orchard note commitment tree matching the given block height. - #[allow(dead_code)] + /// Returns the Orchard note commitment tree matching the given block height, + /// or `None` if the height is above the finalized tip. #[allow(clippy::unwrap_in_result)] pub fn orchard_note_commitment_tree_by_height( &self, height: &Height, ) -> Option> { + let tip_height = self.finalized_tip_height()?; + + // If we're above the tip, searching backwards would always return the tip tree. + // But the correct answer is "we don't know that tree yet". + if *height > tip_height { + return None; + } + let orchard_trees = self.db.cf_handle("orchard_note_commitment_tree").unwrap(); - self.db.zs_get(&orchard_trees, height).map(Arc::new) + // # Compatibility + // + // Allow older Zebra versions to read future database formats. See ticket #6642 for details. + let (_first_duplicate_height, tree) = self + .db + .zs_prev_key_value_back_from(&orchard_trees, height) + .expect( + "Orchard note commitment trees must exist for all heights below the finalized tip", + ); + + Some(Arc::new(tree)) } /// Returns the shielded note commitment trees of the finalized tip From d7b90552f3c8b9b3264e23755ce14a7a1eebe6fc Mon Sep 17 00:00:00 2001 From: teor Date: Wed, 7 Jun 2023 09:38:48 +1000 Subject: [PATCH 049/265] fix(state): Avoid panicking on state errors during shutdown (#6828) * Enable cancel_all_background_work() only on macOS * Ignore expected "during shutdown" errors, and log other errors * Disable cancel_all_background_work() but keep the updated docs and error handling * Add the macOS shutdown crash to the README known issues --------- Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> --- README.md | 2 + .../src/service/finalized_state/disk_db.rs | 90 +++++++++++++++---- 2 files changed, 77 insertions(+), 15 deletions(-) diff --git a/README.md b/README.md index 9e3a253a928..98bd9a8af04 100644 --- a/README.md +++ b/README.md @@ -155,6 +155,8 @@ There are a few bugs in Zebra that we're still working on fixing: - Block download and verification sometimes times out during Zebra's initial sync [#5709](https://github.com/ZcashFoundation/zebra/issues/5709). The full sync still finishes reasonably quickly. +- Rust 1.70 [causes crashes during shutdown on macOS x86_64 (#6812)](https://github.com/ZcashFoundation/zebra/issues/6812). The state cache should stay valid despite the crash. + - No Windows support [#3801](https://github.com/ZcashFoundation/zebra/issues/3801). We used to test with Windows Server 2019, but not any more; see the issue for details. - Experimental Tor support is disabled until [Zebra upgrades to the latest `arti-client`](https://github.com/ZcashFoundation/zebra/issues/5492). This happened due to a Rust dependency conflict, which could only be resolved by `arti` upgrading to a version of `x25519-dalek` with the dependency fix. diff --git a/zebra-state/src/service/finalized_state/disk_db.rs b/zebra-state/src/service/finalized_state/disk_db.rs index 0432167d183..f896b3aac25 100644 --- a/zebra-state/src/service/finalized_state/disk_db.rs +++ b/zebra-state/src/service/finalized_state/disk_db.rs @@ -780,18 +780,72 @@ impl DiskDb { let path = self.path(); debug!(?path, "flushing database to disk"); - self.db - .flush() - .expect("unexpected failure flushing SST data to disk"); - self.db - .flush_wal(true) - .expect("unexpected failure flushing WAL data to disk"); + // These flushes can fail during forced shutdown or during Drop after a shutdown, + // particularly in tests. If they fail, there's nothing we can do about it anyway. + if let Err(error) = self.db.flush() { + let error = format!("{error:?}"); + if error.to_ascii_lowercase().contains("shutdown in progress") { + debug!( + ?error, + ?path, + "expected shutdown error flushing database SST files to disk" + ); + } else { + info!( + ?error, + ?path, + "unexpected error flushing database SST files to disk during shutdown" + ); + } + } + if let Err(error) = self.db.flush_wal(true) { + let error = format!("{error:?}"); + if error.to_ascii_lowercase().contains("shutdown in progress") { + debug!( + ?error, + ?path, + "expected shutdown error flushing database WAL buffer to disk" + ); + } else { + info!( + ?error, + ?path, + "unexpected error flushing database WAL buffer to disk during shutdown" + ); + } + } + + // # Memory Safety + // // We'd like to call `cancel_all_background_work()` before Zebra exits, // but when we call it, we get memory, thread, or C++ errors when the process exits. // (This seems to be a bug in RocksDB: cancel_all_background_work() should wait until // all the threads have cleaned up.) // + // # Change History + // + // We've changed this setting multiple times since 2021, in response to new RocksDB + // and Rust compiler behaviour. + // + // We enabled cancel_all_background_work() due to failures on: + // - Rust 1.57 on Linux + // + // We disabled cancel_all_background_work() due to failures on: + // - Rust 1.64 on Linux + // + // We tried enabling cancel_all_background_work() due to failures on: + // - Rust 1.70 on macOS 12.6.5 on x86_64 + // but it didn't stop the aborts happening (PR #6820). + // + // There weren't any failures with cancel_all_background_work() disabled on: + // - Rust 1.69 or earlier + // - Linux with Rust 1.70 + // And with cancel_all_background_work() enabled or disabled on: + // - macOS 13.2 on aarch64 (M1), native and emulated x86_64, with Rust 1.70 + // + // # Detailed Description + // // We see these kinds of errors: // ``` // pthread lock: Invalid argument @@ -803,13 +857,26 @@ impl DiskDb { // signal: 11, SIGSEGV: invalid memory reference // ``` // + // # Reference + // // The RocksDB wiki says: // > Q: Is it safe to close RocksDB while another thread is issuing read, write or manual compaction requests? // > // > A: No. The users of RocksDB need to make sure all functions have finished before they close RocksDB. // > You can speed up the waiting by calling CancelAllBackgroundWork(). // - // https://github.com/facebook/rocksdb/wiki/RocksDB-FAQ + // + // + // > rocksdb::DB instances need to be destroyed before your main function exits. + // > RocksDB instances usually depend on some internal static variables. + // > Users need to make sure rocksdb::DB instances are destroyed before those static variables. + // + // + // + // # TODO + // + // Try re-enabling this code and fixing the underlying concurrency bug. + // //info!(?path, "stopping background database tasks"); //self.db.cancel_all_background_work(true); @@ -818,14 +885,7 @@ impl DiskDb { // But Rust's ownership rules make that difficult, // so we just flush and delete ephemeral data instead. // - // The RocksDB wiki says: - // > rocksdb::DB instances need to be destroyed before your main function exits. - // > RocksDB instances usually depend on some internal static variables. - // > Users need to make sure rocksdb::DB instances are destroyed before those static variables. - // - // https://github.com/facebook/rocksdb/wiki/Known-Issues - // - // But this implementation doesn't seem to cause any issues, + // This implementation doesn't seem to cause any issues, // and the RocksDB Drop implementation handles any cleanup. self.delete_ephemeral(); } From 428493e3de829748ad8aaba23d98bb0515e33aaf Mon Sep 17 00:00:00 2001 From: teor Date: Wed, 7 Jun 2023 13:41:49 +1000 Subject: [PATCH 050/265] fix(net): Try harder to drop connections when they shut down, Credit: Ziggurat Team (#6832) * Force Client tasks to shut down when it is dropped * Try to close the peer sender sink on drop * Reliably shut down the peer sender when the Connection is shut down * Add a TODO for closing peer_rx * Add logging for duplicate peer connections and IP addresses --- Cargo.lock | 1 + zebra-network/Cargo.toml | 1 + zebra-network/src/constants.rs | 3 + zebra-network/src/peer/client.rs | 6 +- zebra-network/src/peer/connection.rs | 113 ++++++++++++------- zebra-network/src/peer/connection/peer_tx.rs | 27 ++++- zebra-network/src/peer_set/set.rs | 68 +++++++++-- 7 files changed, 170 insertions(+), 49 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 072c69cff45..a455225f74f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5853,6 +5853,7 @@ dependencies = [ "howudoin", "humantime-serde", "indexmap", + "itertools", "lazy_static", "metrics 0.21.0", "ordered-map", diff --git a/zebra-network/Cargo.toml b/zebra-network/Cargo.toml index cb001281a02..93b87959e3b 100644 --- a/zebra-network/Cargo.toml +++ b/zebra-network/Cargo.toml @@ -31,6 +31,7 @@ dirs = "5.0.1" hex = "0.4.3" humantime-serde = "1.1.1" indexmap = { version = "1.9.3", features = ["serde"] } +itertools = "0.10.5" lazy_static = "1.4.0" ordered-map = "0.4.2" pin-project = "1.1.0" diff --git a/zebra-network/src/constants.rs b/zebra-network/src/constants.rs index e9082bfcd5b..f327cce4b4c 100644 --- a/zebra-network/src/constants.rs +++ b/zebra-network/src/constants.rs @@ -351,6 +351,9 @@ pub const MIN_OVERLOAD_DROP_PROBABILITY: f32 = 0.05; /// [`Overloaded`](crate::PeerError::Overloaded) error. pub const MAX_OVERLOAD_DROP_PROBABILITY: f32 = 0.95; +/// The minimum interval between logging peer set status updates. +pub const MIN_PEER_SET_LOG_INTERVAL: Duration = Duration::from_secs(60); + lazy_static! { /// The minimum network protocol version accepted by this crate for each network, /// represented as a network upgrade. diff --git a/zebra-network/src/peer/client.rs b/zebra-network/src/peer/client.rs index f264cc5ff98..69940275414 100644 --- a/zebra-network/src/peer/client.rs +++ b/zebra-network/src/peer/client.rs @@ -543,10 +543,14 @@ impl Client { // Prevent any senders from sending more messages to this peer. self.server_tx.close_channel(); - // Stop the heartbeat task + // Ask the heartbeat task to stop. if let Some(shutdown_tx) = self.shutdown_tx.take() { let _ = shutdown_tx.send(CancelHeartbeatTask); } + + // Force the connection and heartbeat tasks to stop. + self.connection_task.abort(); + self.heartbeat_task.abort(); } } diff --git a/zebra-network/src/peer/connection.rs b/zebra-network/src/peer/connection.rs index 5d8f83039e6..318357dbd6b 100644 --- a/zebra-network/src/peer/connection.rs +++ b/zebra-network/src/peer/connection.rs @@ -451,7 +451,10 @@ impl From for InboundMessage { } /// The channels, services, and associated state for a peer connection. -pub struct Connection { +pub struct Connection +where + Tx: Sink + Unpin, +{ /// The metadata for the connected peer `service`. /// /// This field is used for debugging. @@ -519,7 +522,10 @@ pub struct Connection { last_overload_time: Option, } -impl fmt::Debug for Connection { +impl fmt::Debug for Connection +where + Tx: Sink + Unpin, +{ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { // skip the channels, they don't tell us anything useful f.debug_struct(std::any::type_name::>()) @@ -534,7 +540,10 @@ impl fmt::Debug for Connection { } } -impl Connection { +impl Connection +where + Tx: Sink + Unpin, +{ /// Return a new connection from its channels, services, and shared state. pub(crate) fn new( inbound_service: S, @@ -645,9 +654,9 @@ where // the request completes (or times out). match future::select(peer_rx.next(), self.client_rx.next()).await { Either::Left((None, _)) => { - self.fail_with(PeerError::ConnectionClosed); + self.fail_with(PeerError::ConnectionClosed).await; } - Either::Left((Some(Err(e)), _)) => self.fail_with(e), + Either::Left((Some(Err(e)), _)) => self.fail_with(e).await, Either::Left((Some(Ok(msg)), _)) => { let unhandled_msg = self.handle_message_as_request(msg).await; @@ -663,7 +672,8 @@ where // There are no requests to be flushed, // but we need to set an error and update metrics. - self.shutdown(PeerError::ClientDropped); + // (We don't want to log this error, because it's normal behaviour.) + self.shutdown_async(PeerError::ClientDropped).await; break; } Either::Right((Some(req), _)) => { @@ -753,8 +763,10 @@ where .instrument(span.clone()) .await { - Either::Right((None, _)) => self.fail_with(PeerError::ConnectionClosed), - Either::Right((Some(Err(e)), _)) => self.fail_with(e), + Either::Right((None, _)) => { + self.fail_with(PeerError::ConnectionClosed).await + } + Either::Right((Some(Err(e)), _)) => self.fail_with(e).await, Either::Right((Some(Ok(peer_msg)), _cancel)) => { self.update_state_metrics(format!("Out::Rsp::{}", peer_msg.command())); @@ -813,7 +825,7 @@ where // So we do the state request cleanup manually. let e = SharedPeerError::from(e); let _ = tx.send(Err(e.clone())); - self.fail_with(e); + self.fail_with(e).await; State::Failed } // Other request timeouts fail the request. @@ -840,6 +852,8 @@ where } } + // TODO: close peer_rx here, after changing it from a stream to a channel + let error = self.error_slot.try_get_error(); assert!( error.is_some(), @@ -849,18 +863,21 @@ where self.update_state_metrics(error.expect("checked is_some").to_string()); } - /// Fail this connection. + /// Fail this connection, log the failure, and shut it down. + /// See [`Self::shutdown_async()`] for details. /// - /// If the connection has errored already, re-use the original error. - /// Otherwise, fail the connection with `error`. - fn fail_with(&mut self, error: impl Into) { + /// Use [`Self::shutdown_async()`] to avoid logging the failure, + /// and [`Self::shutdown()`] from non-async code. + async fn fail_with(&mut self, error: impl Into) { let error = error.into(); - debug!(%error, - client_receiver = ?self.client_rx, - "failing peer service with error"); + debug!( + %error, + client_receiver = ?self.client_rx, + "failing peer service with error" + ); - self.shutdown(error); + self.shutdown_async(error).await; } /// Handle an internal client request, possibly generating outgoing messages to the @@ -1052,7 +1069,7 @@ where Err(error) => { let error = SharedPeerError::from(error); let _ = tx.send(Err(error.clone())); - self.fail_with(error); + self.fail_with(error).await; } }; } @@ -1075,17 +1092,17 @@ where Message::Ping(nonce) => { trace!(?nonce, "responding to heartbeat"); if let Err(e) = self.peer_tx.send(Message::Pong(nonce)).await { - self.fail_with(e); + self.fail_with(e).await; } Consumed } // These messages shouldn't be sent outside of a handshake. Message::Version { .. } => { - self.fail_with(PeerError::DuplicateHandshake); + self.fail_with(PeerError::DuplicateHandshake).await; Consumed } Message::Verack { .. } => { - self.fail_with(PeerError::DuplicateHandshake); + self.fail_with(PeerError::DuplicateHandshake).await; Consumed } // These messages should already be handled as a response if they @@ -1267,7 +1284,7 @@ where tokio::task::yield_now().await; if self.svc.ready().await.is_err() { - self.fail_with(PeerError::ServiceShutdown); + self.fail_with(PeerError::ServiceShutdown).await; return; } @@ -1312,7 +1329,7 @@ where Response::Nil => { /* generic success, do nothing */ } Response::Peers(addrs) => { if let Err(e) = self.peer_tx.send(Message::Addr(addrs)).await { - self.fail_with(e); + self.fail_with(e).await; } } Response::Transactions(transactions) => { @@ -1324,7 +1341,7 @@ where match transaction { Available(transaction) => { if let Err(e) = self.peer_tx.send(Message::Tx(transaction)).await { - self.fail_with(e); + self.fail_with(e).await; return; } } @@ -1334,7 +1351,7 @@ where if !missing_ids.is_empty() { if let Err(e) = self.peer_tx.send(Message::NotFound(missing_ids)).await { - self.fail_with(e); + self.fail_with(e).await; return; } } @@ -1348,7 +1365,7 @@ where match block { Available(block) => { if let Err(e) = self.peer_tx.send(Message::Block(block)).await { - self.fail_with(e); + self.fail_with(e).await; return; } } @@ -1358,7 +1375,7 @@ where if !missing_hashes.is_empty() { if let Err(e) = self.peer_tx.send(Message::NotFound(missing_hashes)).await { - self.fail_with(e); + self.fail_with(e).await; return; } } @@ -1369,12 +1386,12 @@ where .send(Message::Inv(hashes.into_iter().map(Into::into).collect())) .await { - self.fail_with(e) + self.fail_with(e).await } } Response::BlockHeaders(headers) => { if let Err(e) = self.peer_tx.send(Message::Headers(headers)).await { - self.fail_with(e) + self.fail_with(e).await } } Response::TransactionIds(hashes) => { @@ -1402,7 +1419,7 @@ where .collect(); if let Err(e) = self.peer_tx.send(Message::Inv(hashes)).await { - self.fail_with(e) + self.fail_with(e).await } } } @@ -1454,7 +1471,7 @@ where ); self.update_state_metrics(format!("In::Req::{}/Rsp::Overload::Error", req.command())); - self.fail_with(PeerError::Overloaded); + self.fail_with(PeerError::Overloaded).await; } else { self.update_state_metrics(format!("In::Req::{}/Rsp::Overload::Ignored", req.command())); metrics::counter!("pool.ignored.loadshed", 1); @@ -1499,7 +1516,10 @@ fn overload_drop_connection_probability(now: Instant, prev: Option) -> raw_drop_probability.clamp(MIN_OVERLOAD_DROP_PROBABILITY, MAX_OVERLOAD_DROP_PROBABILITY) } -impl Connection { +impl Connection +where + Tx: Sink + Unpin, +{ /// Update the connection state metrics for this connection, /// using `extra_state_info` as additional state information. fn update_state_metrics(&mut self, extra_state_info: impl Into>) { @@ -1538,18 +1558,32 @@ impl Connection { } } - /// Marks the peer as having failed with `error`, and performs connection cleanup. + /// Marks the peer as having failed with `error`, and performs connection cleanup, + /// including async channel closes. /// /// If the connection has errored already, re-use the original error. /// Otherwise, fail the connection with `error`. + async fn shutdown_async(&mut self, error: impl Into) { + // Close async channels first, so other tasks can start shutting down. + // There's nothing we can do about errors while shutting down, and some errors are expected. + // + // TODO: close peer_tx and peer_rx in shutdown() and Drop, after: + // - using channels instead of streams/sinks? + // - exposing the underlying implementation rather than using generics and closures? + // - adding peer_rx to the connection struct (optional) + let _ = self.peer_tx.close().await; + + self.shutdown(error); + } + + /// Marks the peer as having failed with `error`, and performs connection cleanup. + /// See [`Self::shutdown_async()`] for details. + /// + /// Call [`Self::shutdown_async()`] in async code, because it can shut down more channels. fn shutdown(&mut self, error: impl Into) { let mut error = error.into(); // Close channels first, so other tasks can start shutting down. - // - // TODO: close peer_tx and peer_rx, after: - // - adapting them using a struct with a Stream impl, rather than closures - // - making the struct forward `close` to the inner channel self.client_rx.close(); // Update the shared error slot @@ -1617,7 +1651,10 @@ impl Connection { } } -impl Drop for Connection { +impl Drop for Connection +where + Tx: Sink + Unpin, +{ fn drop(&mut self) { self.shutdown(PeerError::ConnectionDropped); diff --git a/zebra-network/src/peer/connection/peer_tx.rs b/zebra-network/src/peer/connection/peer_tx.rs index 7e17196d95d..47df6504903 100644 --- a/zebra-network/src/peer/connection/peer_tx.rs +++ b/zebra-network/src/peer/connection/peer_tx.rs @@ -1,6 +1,6 @@ //! The peer message sender channel. -use futures::{Sink, SinkExt}; +use futures::{FutureExt, Sink, SinkExt}; use zebra_chain::serialization::SerializationError; @@ -10,7 +10,10 @@ use crate::{constants::REQUEST_TIMEOUT, protocol::external::Message, PeerError}; /// /// Used to apply a timeout to send messages. #[derive(Clone, Debug)] -pub struct PeerTx { +pub struct PeerTx +where + Tx: Sink + Unpin, +{ /// A channel for sending Zcash messages to the connected peer. /// /// This channel accepts [`Message`]s. @@ -28,10 +31,28 @@ where .map_err(|_| PeerError::ConnectionSendTimeout)? .map_err(Into::into) } + + /// Flush any remaining output and close this [`PeerTx`], if necessary. + pub async fn close(&mut self) -> Result<(), SerializationError> { + self.inner.close().await + } } -impl From for PeerTx { +impl From for PeerTx +where + Tx: Sink + Unpin, +{ fn from(tx: Tx) -> Self { PeerTx { inner: tx } } } + +impl Drop for PeerTx +where + Tx: Sink + Unpin, +{ + fn drop(&mut self) { + // Do a last-ditch close attempt on the sink + self.close().now_or_never(); + } +} diff --git a/zebra-network/src/peer_set/set.rs b/zebra-network/src/peer_set/set.rs index abdd2a87495..8611ef7c633 100644 --- a/zebra-network/src/peer_set/set.rs +++ b/zebra-network/src/peer_set/set.rs @@ -98,6 +98,7 @@ use std::{ fmt::Debug, future::Future, marker::PhantomData, + net::IpAddr, pin::Pin, task::{Context, Poll}, time::Instant, @@ -109,6 +110,7 @@ use futures::{ prelude::*, stream::FuturesUnordered, }; +use itertools::Itertools; use tokio::{ sync::{broadcast, oneshot::error::TryRecvError, watch}, task::JoinHandle, @@ -123,6 +125,7 @@ use zebra_chain::chain_tip::ChainTip; use crate::{ address_book::AddressMetrics, + constants::MIN_PEER_SET_LOG_INTERVAL, peer::{LoadTrackedClient, MinimumPeerVersion}, peer_set::{ unready_service::{Error as UnreadyError, UnreadyService}, @@ -810,33 +813,84 @@ where (self.ready_services.len() + 1) / 2 } - /// Logs the peer set size. + /// Returns the list of addresses in the peer set. + fn peer_set_addresses(&self) -> Vec { + self.ready_services + .keys() + .chain(self.cancel_handles.keys()) + .cloned() + .collect() + } + + /// Logs the peer set size, and any potential connectivity issues. fn log_peer_set_size(&mut self) { let ready_services_len = self.ready_services.len(); let unready_services_len = self.unready_services.len(); trace!(ready_peers = ?ready_services_len, unready_peers = ?unready_services_len); - if ready_services_len > 0 { - return; - } + let now = Instant::now(); // These logs are designed to be human-readable in a terminal, at the // default Zebra log level. If you need to know the peer set size for // every request, use the trace-level logs, or the metrics exporter. if let Some(last_peer_log) = self.last_peer_log { // Avoid duplicate peer set logs - if Instant::now().duration_since(last_peer_log).as_secs() < 60 { + if now.duration_since(last_peer_log) < MIN_PEER_SET_LOG_INTERVAL { return; } } else { // Suppress initial logs until the peer set has started up. // There can be multiple initial requests before the first peer is // ready. - self.last_peer_log = Some(Instant::now()); + self.last_peer_log = Some(now); return; } - self.last_peer_log = Some(Instant::now()); + self.last_peer_log = Some(now); + + // Log potential duplicate connections. + let peers = self.peer_set_addresses(); + + // Check for duplicates by address and port: these are unexpected and represent a bug. + let duplicates: Vec = peers.iter().duplicates().cloned().collect(); + + let mut peer_counts = peers.iter().counts(); + peer_counts.retain(|peer, _count| duplicates.contains(peer)); + + if !peer_counts.is_empty() { + let duplicate_connections: usize = peer_counts.values().sum(); + + warn!( + ?duplicate_connections, + duplicated_peers = ?peer_counts.len(), + peers = ?peers.len(), + "duplicate peer connections in peer set" + ); + } + + // Check for duplicates by address: these can happen if there are multiple nodes + // behind a NAT or on a single server. + let peers: Vec = peers.iter().map(|addr| addr.ip()).collect(); + let duplicates: Vec = peers.iter().duplicates().cloned().collect(); + + let mut peer_counts = peers.iter().counts(); + peer_counts.retain(|peer, _count| duplicates.contains(peer)); + + if !peer_counts.is_empty() { + let duplicate_connections: usize = peer_counts.values().sum(); + + info!( + ?duplicate_connections, + duplicated_peers = ?peer_counts.len(), + peers = ?peers.len(), + "duplicate IP addresses in peer set" + ); + } + + // Only log connectivity warnings if all our peers are busy (or there are no peers). + if ready_services_len > 0 { + return; + } let address_metrics = *self.address_metrics.borrow(); if unready_services_len == 0 { From 59086c7d00c88dc68b462cedede6ab8cefc62e1b Mon Sep 17 00:00:00 2001 From: Arya Date: Wed, 7 Jun 2023 02:03:42 -0400 Subject: [PATCH 051/265] fix(zebrad): accept default subcommand arguments and print consistent usage information for top-level 'help' subcommand (#6801) * updates Cargo.toml * Migrate to abscissa 0.7.0 * Avoid panic from calling color_eyre::install twice * Uses 'start' as the default subcommand * updates default cmd logic * Fixes minor cli issues * removes outdated check in acceptance test * Adds a test for process_cli_args, fixes version_args test. Adds -V to process_cli_args match case * Revert "fix(clippy): Silence future-incompat warnings until we upgrade Abscissa (#6024)" This reverts commit dd90f79b4824af6b1ae22ee69abdf0f605da84cd. * Drops the worker guard to flush logs when zebra shuts down * Adds cargo feature to clap * restores process_cli_args * updates deny.toml * Updates EntryPoint help template * Updates subcommand help msgs * removes trailing whitespace, capitalizes sentences * Apply suggestions from code review Co-authored-by: teor * revert parts of revert "Revert fix(clippy): Silence future-incompat warnings until we upgrade Abscissa" * Applies suggestions from code review * Moves EntryPoint to its own module * fixes version_args test * Updates changelog * Prunes redundant test cases * Apply suggestions from code review Co-authored-by: teor * Revert "Prunes redundant test cases" This reverts commit 3f7397918489144805c17d0594775aa699e87b9d. * Update zebrad/src/commands/entry_point.rs Co-authored-by: teor * Add missing import * Updates `process_cli_args` to return a result --------- Co-authored-by: teor --- .cargo/config.toml | 6 - CHANGELOG.md | 12 + Cargo.lock | 327 ++++++++------------- deny.toml | 5 +- zebrad/Cargo.toml | 6 +- zebrad/src/application.rs | 159 +++------- zebrad/src/application/entry_point.rs | 111 ------- zebrad/src/bin/zebrad/main.rs | 4 +- zebrad/src/commands.rs | 52 ++-- zebrad/src/commands/copy_state.rs | 29 +- zebrad/src/commands/download.rs | 6 +- zebrad/src/commands/entry_point.rs | 134 +++++++++ zebrad/src/commands/generate.rs | 17 +- zebrad/src/commands/start.rs | 22 +- zebrad/src/commands/tests.rs | 46 +++ zebrad/src/commands/tip_height.rs | 15 +- zebrad/src/commands/version.rs | 18 -- zebrad/src/components/tokio.rs | 2 +- zebrad/src/components/tracing/component.rs | 17 +- zebrad/src/components/tracing/endpoint.rs | 8 +- zebrad/src/prelude.rs | 2 +- zebrad/tests/acceptance.rs | 26 +- 22 files changed, 472 insertions(+), 552 deletions(-) delete mode 100644 zebrad/src/application/entry_point.rs create mode 100644 zebrad/src/commands/entry_point.rs create mode 100644 zebrad/src/commands/tests.rs delete mode 100644 zebrad/src/commands/version.rs diff --git a/.cargo/config.toml b/.cargo/config.toml index b75f6352f5b..5b0f95da173 100644 --- a/.cargo/config.toml +++ b/.cargo/config.toml @@ -1,11 +1,5 @@ # Zebra cargo configuration -# Disabled until we upgrade to abscissa 0.7 or later: -# https://github.com/ZcashFoundation/zebra/issues/5502 -# https://doc.rust-lang.org/cargo/reference/future-incompat-report.html -[future-incompat-report] -frequency = "never" - # Flags that apply to all Zebra crates and configurations [target.'cfg(all())'] rustflags = [ diff --git a/CHANGELOG.md b/CHANGELOG.md index ef19f83b3f0..960a13e4ad6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,18 @@ All notable changes to Zebra are documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org). +## [Zebra 1.0.0-rc.9](https://github.com/ZcashFoundation/zebra/releases/tag/v1.0.0-rc.9) - XXXX-XX-XX + +In this release ... + +### Breaking Changes + +- The version subcommand has been replaced with a --version/-V flag ([#6801](https://github.com/ZcashFoundation/zebra/pull/6801)) +- Zebra now accepts filters for the start command when no subcommand is provided ([#6801](https://github.com/ZcashFoundation/zebra/pull/6801)) + +... + + ## [Zebra 1.0.0-rc.8](https://github.com/ZcashFoundation/zebra/releases/tag/v1.0.0-rc.8) - 2023-05-10 Starting in this release, Zebra has implemented an "end of support" halt. Just like `zcashd`, the `zebrad` binary will stop running 16 weeks after the last release date. diff --git a/Cargo.lock b/Cargo.lock index a455225f74f..54883a32013 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4,39 +4,36 @@ version = 3 [[package]] name = "abscissa_core" -version = "0.5.2" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a07677093120a02583717b6dd1ef81d8de1e8d01bd226c83f0f9bdf3e56bb3a" +checksum = "8346a52bf3fb445d5949d144c37360ad2f1d7950cfcc6d4e9e4999b1cd1bd42a" dependencies = [ "abscissa_derive", + "arc-swap", "backtrace", "canonical-path", - "chrono", - "color-backtrace", - "generational-arena", - "gumdrop", - "libc", + "clap 4.3.0", + "color-eyre", + "fs-err", "once_cell", "regex", "secrecy", - "semver 0.9.0", + "semver 1.0.17", "serde", - "signal-hook", "termcolor", "toml 0.5.11", "tracing", "tracing-log", - "tracing-subscriber 0.1.6", + "tracing-subscriber", "wait-timeout", ] [[package]] name = "abscissa_derive" -version = "0.5.0" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74f5722bc48763cb9d81d8427ca05b6aa2842f6632cf8e4c0a29eef9baececcc" +checksum = "55bfb86e57d13c06e482c570826ddcddcc8f07fab916760e8911141d4fda8b62" dependencies = [ - "darling 0.10.2", "ident_case", "proc-macro2 1.0.56", "quote 1.0.27", @@ -144,20 +141,26 @@ checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" [[package]] name = "ansi_term" -version = "0.11.0" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee49baf6cb617b853aa8d93bf420db2383fab46d314482ca2803b40d5fde979b" +checksum = "d52a9bb7ec0cf484c551830a7ce27bd20d67eac647e1befb56b0be4ee39a55d2" dependencies = [ "winapi", ] [[package]] -name = "ansi_term" -version = "0.12.1" +name = "anstream" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d52a9bb7ec0cf484c551830a7ce27bd20d67eac647e1befb56b0be4ee39a55d2" +checksum = "0ca84f3628370c59db74ee214b3263d58f9aadd9b4fe7e711fd87dc452b7f163" dependencies = [ - "winapi", + "anstyle", + "anstyle-parse", + "anstyle-query", + "anstyle-wincon", + "colorchoice", + "is-terminal", + "utf8parse", ] [[package]] @@ -166,12 +169,46 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "41ed9a86bf92ae6580e0a31281f65a1b1d867c0cc68d5346e2ae128dddfa6a7d" +[[package]] +name = "anstyle-parse" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e765fd216e48e067936442276d1d57399e37bce53c264d6fefbe298080cb57ee" +dependencies = [ + "utf8parse", +] + +[[package]] +name = "anstyle-query" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5ca11d4be1bab0c8bc8734a9aa7bf4ee8316d462a08c6ac5052f888fef5b494b" +dependencies = [ + "windows-sys 0.48.0", +] + +[[package]] +name = "anstyle-wincon" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "180abfa45703aebe0093f79badacc01b8fd4ea2e35118747e5811127f926e188" +dependencies = [ + "anstyle", + "windows-sys 0.48.0", +] + [[package]] name = "anyhow" version = "1.0.71" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c7d0618f0e0b7e8ff11427422b64564d5fb0be1940354bfe2e0529b18a9d9b8" +[[package]] +name = "arc-swap" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bddcadddf5e9015d310179a59bb28c4d4b9920ad0f11e8e14dbadf654890c9a6" + [[package]] name = "arrayref" version = "0.3.7" @@ -659,11 +696,8 @@ checksum = "ec837a71355b28f6556dbd569b37b3f363091c0bd4b2e735674521b4c5fd9bc5" dependencies = [ "android-tzdata", "iana-time-zone", - "js-sys", "num-traits", "serde", - "time 0.1.45", - "wasm-bindgen", "winapi", ] @@ -722,7 +756,7 @@ version = "2.34.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a0610544180c38b88101fecf2dd634b174a62eef6946f84dfc6a7127512b381c" dependencies = [ - "ansi_term 0.12.1", + "ansi_term", "atty", "bitflags 1.3.2", "strsim 0.8.0", @@ -738,6 +772,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "93aae7a4192245f70fe75dd9157fc7b4a5bf53e88d30bd4396f7d8f9284d5acc" dependencies = [ "clap_builder", + "clap_derive", + "once_cell", ] [[package]] @@ -746,9 +782,24 @@ version = "4.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4f423e341edefb78c9caba2d9c7f7687d0e72e89df3ce3394554754393ac3990" dependencies = [ + "anstream", "anstyle", "bitflags 1.3.2", "clap_lex", + "once_cell", + "strsim 0.10.0", +] + +[[package]] +name = "clap_derive" +version = "4.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "191d9573962933b4027f932c600cd252ce27a8ad5979418fe78e43c07996f27b" +dependencies = [ + "heck 0.4.1", + "proc-macro2 1.0.56", + "quote 1.0.27", + "syn 2.0.15", ] [[package]] @@ -767,17 +818,6 @@ dependencies = [ "unicode-width", ] -[[package]] -name = "color-backtrace" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65d13f1078cc63c791d0deba0dd43db37c9ec02b311f10bed10b577016f3a957" -dependencies = [ - "atty", - "backtrace", - "termcolor", -] - [[package]] name = "color-eyre" version = "0.6.2" @@ -806,6 +846,12 @@ dependencies = [ "tracing-error", ] +[[package]] +name = "colorchoice" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7" + [[package]] name = "console" version = "0.15.5" @@ -852,7 +898,7 @@ dependencies = [ "tonic", "tracing", "tracing-core", - "tracing-subscriber 0.3.17", + "tracing-subscriber", ] [[package]] @@ -1074,16 +1120,6 @@ dependencies = [ "syn 2.0.15", ] -[[package]] -name = "darling" -version = "0.10.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d706e75d87e35569db781a9b5e2416cff1236a47ed380831f959382ccd5f858" -dependencies = [ - "darling_core 0.10.2", - "darling_macro 0.10.2", -] - [[package]] name = "darling" version = "0.13.4" @@ -1104,20 +1140,6 @@ dependencies = [ "darling_macro 0.20.1", ] -[[package]] -name = "darling_core" -version = "0.10.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0c960ae2da4de88a91b2d920c2a7233b400bc33cb28453a2987822d8392519b" -dependencies = [ - "fnv", - "ident_case", - "proc-macro2 1.0.56", - "quote 1.0.27", - "strsim 0.9.3", - "syn 1.0.109", -] - [[package]] name = "darling_core" version = "0.13.4" @@ -1146,17 +1168,6 @@ dependencies = [ "syn 2.0.15", ] -[[package]] -name = "darling_macro" -version = "0.10.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9b5a2f4ac4969822c62224815d069952656cadc7084fdca9751e6d959189b72" -dependencies = [ - "darling_core 0.10.2", - "quote 1.0.27", - "syn 1.0.109", -] - [[package]] name = "darling_macro" version = "0.13.4" @@ -1483,6 +1494,12 @@ dependencies = [ "num-traits", ] +[[package]] +name = "fs-err" +version = "2.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0845fa252299212f0389d64ba26f34fa32cfe41588355f21ed507c59a0f64541" + [[package]] name = "funty" version = "2.0.0" @@ -1578,15 +1595,6 @@ dependencies = [ "slab", ] -[[package]] -name = "generational-arena" -version = "0.2.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e1d3b771574f62d0548cee0ad9057857e9fc25d7a3335f140c84f6acd0bf601" -dependencies = [ - "cfg-if 0.1.10", -] - [[package]] name = "generic-array" version = "0.14.7" @@ -1671,26 +1679,6 @@ dependencies = [ "subtle", ] -[[package]] -name = "gumdrop" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee50908bc1beeac1f2902e0b4e0cd0d844e716f5ebdc6f0cfc1163fe5e10bcde" -dependencies = [ - "gumdrop_derive", -] - -[[package]] -name = "gumdrop_derive" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90454ce4de40b7ca6a8968b5ef367bdab48413962588d0d2b1638d60090c35d7" -dependencies = [ - "proc-macro2 1.0.56", - "quote 1.0.27", - "syn 1.0.109", -] - [[package]] name = "h2" version = "0.3.18" @@ -2422,15 +2410,6 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ffbee8634e0d45d258acb448e7eaab3fce7a0a467395d4d9f228e3c1f01fb2e4" -[[package]] -name = "matchers" -version = "0.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f099785f7595cc4b4553a174ce30dd7589ef93391ff414dbb67f62392b9e0ce1" -dependencies = [ - "regex-automata", -] - [[package]] name = "matchers" version = "0.1.0" @@ -2456,12 +2435,6 @@ dependencies = [ "rayon", ] -[[package]] -name = "maybe-uninit" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00" - [[package]] name = "memchr" version = "2.5.0" @@ -2882,15 +2855,6 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" -[[package]] -name = "owning_ref" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ff55baddef9e4ad00f88b6c743a2a8062d4c6ade126c2a528644b8e444d52ce" -dependencies = [ - "stable_deref_trait", -] - [[package]] name = "owo-colors" version = "3.5.0" @@ -2973,7 +2937,7 @@ dependencies = [ "instant", "libc", "redox_syscall 0.2.16", - "smallvec 1.10.0", + "smallvec", "winapi", ] @@ -2986,7 +2950,7 @@ dependencies = [ "cfg-if 1.0.0", "libc", "redox_syscall 0.2.16", - "smallvec 1.10.0", + "smallvec", "windows-sys 0.45.0", ] @@ -3939,9 +3903,9 @@ dependencies = [ [[package]] name = "secrecy" -version = "0.6.0" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9182278ed645df3477a9c27bfee0621c621aa16f6972635f7f795dae3d81070f" +checksum = "9bd1c54ea06cfd2f6b63219704de0b9b4f72dcc2b8fdef820be6cd799780e91e" dependencies = [ "serde", "zeroize", @@ -3977,7 +3941,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403" dependencies = [ "semver-parser", - "serde", ] [[package]] @@ -3985,6 +3948,9 @@ name = "semver" version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bebd363326d05ec3e2f532ab7660680f3b02130d780c299bca73469d521bc0ed" +dependencies = [ + "serde", +] [[package]] name = "semver-parser" @@ -4058,7 +4024,7 @@ dependencies = [ "sentry-backtrace", "sentry-core", "tracing-core", - "tracing-subscriber 0.3.17", + "tracing-subscriber", ] [[package]] @@ -4073,7 +4039,7 @@ dependencies = [ "serde", "serde_json", "thiserror", - "time 0.3.21", + "time", "url", "uuid", ] @@ -4163,7 +4129,7 @@ dependencies = [ "serde", "serde_json", "serde_with_macros 3.0.0", - "time 0.3.21", + "time", ] [[package]] @@ -4229,16 +4195,6 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "43b2853a4d09f215c24cc5489c992ce46052d359b5109343cbafbf26bc62f8a3" -[[package]] -name = "signal-hook" -version = "0.1.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e31d442c16f047a671b5a71e2161d6e68814012b7f5379d269ebd915fac2729" -dependencies = [ - "libc", - "signal-hook-registry", -] - [[package]] name = "signal-hook-registry" version = "1.4.1" @@ -4269,15 +4225,6 @@ dependencies = [ "autocfg", ] -[[package]] -name = "smallvec" -version = "0.6.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b97fcaeba89edba30f044a10c6a3cc39df9c3f17d7cd829dd1446cab35f890e0" -dependencies = [ - "maybe-uninit", -] - [[package]] name = "smallvec" version = "1.10.0" @@ -4331,12 +4278,6 @@ dependencies = [ "lock_api", ] -[[package]] -name = "stable_deref_trait" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" - [[package]] name = "static_assertions" version = "1.1.0" @@ -4355,12 +4296,6 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a" -[[package]] -name = "strsim" -version = "0.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6446ced80d6c486436db5c078dde11a9f73d42b57fb273121e160b84f63d894c" - [[package]] name = "strsim" version = "0.10.0" @@ -4515,17 +4450,6 @@ dependencies = [ "once_cell", ] -[[package]] -name = "time" -version = "0.1.45" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b797afad3f312d1c66a56d11d0316f916356d11bd158fbc6ca6389ff6bf805a" -dependencies = [ - "libc", - "wasi 0.10.0+wasi-snapshot-preview1", - "winapi", -] - [[package]] name = "time" version = "0.3.21" @@ -4879,8 +4803,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09d48f71a791638519505cefafe162606f706c25592e4bde4d97600c0195312e" dependencies = [ "crossbeam-channel", - "time 0.3.21", - "tracing-subscriber 0.3.17", + "time", + "tracing-subscriber", ] [[package]] @@ -4911,7 +4835,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d686ec1c0f384b1277f097b2f279a2ecc11afe8c133c1aabf036a27cb4cd206e" dependencies = [ "tracing", - "tracing-subscriber 0.3.17", + "tracing-subscriber", ] [[package]] @@ -4922,7 +4846,7 @@ checksum = "0bae117ee14789185e129aaee5d93750abe67fdc5a9a62650452bfe4e122a3a9" dependencies = [ "lazy_static", "tracing", - "tracing-subscriber 0.3.17", + "tracing-subscriber", ] [[package]] @@ -4943,7 +4867,7 @@ checksum = "ba316a74e8fc3c3896a850dba2375928a9fa171b085ecddfc7c054d39970f3fd" dependencies = [ "libc", "tracing-core", - "tracing-subscriber 0.3.17", + "tracing-subscriber", ] [[package]] @@ -4957,35 +4881,18 @@ dependencies = [ "tracing-core", ] -[[package]] -name = "tracing-subscriber" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "192ca16595cdd0661ce319e8eede9c975f227cdaabc4faaefdc256f43d852e45" -dependencies = [ - "ansi_term 0.11.0", - "chrono", - "lazy_static", - "matchers 0.0.1", - "owning_ref", - "regex", - "smallvec 0.6.14", - "tracing-core", - "tracing-log", -] - [[package]] name = "tracing-subscriber" version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "30a651bc37f915e81f087d86e62a18eec5f79550c7faff886f7090b4ea757c77" dependencies = [ - "matchers 0.1.0", + "matchers", "nu-ansi-term", "once_cell", "regex", "sharded-slab", - "smallvec 1.10.0", + "smallvec", "thread_local", "tracing", "tracing-core", @@ -5000,7 +4907,7 @@ checksum = "3a2c0ff408fe918a94c428a3f2ad04e4afd5c95bbc08fcf868eff750c15728a4" dependencies = [ "lazy_static", "tracing-core", - "tracing-subscriber 0.3.17", + "tracing-subscriber", "tracing-test-macro", ] @@ -5157,6 +5064,12 @@ dependencies = [ "serde", ] +[[package]] +name = "utf8parse" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a" + [[package]] name = "uuid" version = "1.3.2" @@ -5195,7 +5108,7 @@ dependencies = [ "git2", "rustc_version 0.4.0", "rustversion", - "time 0.3.21", + "time", ] [[package]] @@ -5245,12 +5158,6 @@ version = "0.9.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" -[[package]] -name = "wasi" -version = "0.10.0+wasi-snapshot-preview1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f" - [[package]] name = "wasi" version = "0.11.0+wasi-snapshot-preview1" @@ -5830,7 +5737,7 @@ dependencies = [ "tracing", "tracing-error", "tracing-futures", - "tracing-subscriber 0.3.17", + "tracing-subscriber", "zcash_proofs", "zebra-chain", "zebra-node-services", @@ -6001,7 +5908,7 @@ dependencies = [ "tower", "tracing", "tracing-error", - "tracing-subscriber 0.3.17", + "tracing-subscriber", ] [[package]] @@ -6019,7 +5926,7 @@ dependencies = [ "tinyvec", "tokio", "tracing-error", - "tracing-subscriber 0.3.17", + "tracing-subscriber", "zebra-chain", "zebra-node-services", "zebra-rpc", @@ -6032,11 +5939,11 @@ dependencies = [ "abscissa_core", "atty", "chrono", + "clap 4.3.0", "color-eyre", "console-subscriber", "dirs", "futures", - "gumdrop", "hex", "howudoin", "humantime-serde", @@ -6077,7 +5984,7 @@ dependencies = [ "tracing-flame", "tracing-futures", "tracing-journald", - "tracing-subscriber 0.3.17", + "tracing-subscriber", "tracing-test", "vergen", "zebra-chain", diff --git a/deny.toml b/deny.toml index 683aa100196..aa2f1a2cf9b 100644 --- a/deny.toml +++ b/deny.toml @@ -90,9 +90,10 @@ skip-tree = [ # Optional dependencies # upgrade abscissa (required dependency) and arti (optional dependency) - { name = "darling", version = "=0.10.2" }, { name = "semver", version = "=0.9.0" }, - { name = "tracing-subscriber", version = "=0.1.6" }, + + # wait for packed_simd_2 to upgrade + { name = "libm", version = "=0.1.4" }, # Elasticsearch dependencies diff --git a/zebrad/Cargo.toml b/zebrad/Cargo.toml index 12a9bcc83ac..2c7566a43f9 100644 --- a/zebrad/Cargo.toml +++ b/zebrad/Cargo.toml @@ -115,8 +115,8 @@ zebra-node-services = { path = "../zebra-node-services" } zebra-rpc = { path = "../zebra-rpc" } zebra-state = { path = "../zebra-state" } -abscissa_core = "0.5" -gumdrop = { version = "0.7", features = ["default_expr"]} +abscissa_core = "0.7.0" +clap = { version = "4.3.0", features = ["cargo"] } chrono = { version = "0.4.26", default-features = false, features = ["clock", "std"] } humantime-serde = "1.1.1" indexmap = "1.9.3" @@ -191,7 +191,7 @@ vergen = { version = "8.2.1", default-features = false, features = ["cargo", "gi tonic-build = { version = "0.9.2", optional = true } [dev-dependencies] -abscissa_core = { version = "0.5", features = ["testing"] } +abscissa_core = { version = "0.7.0", features = ["testing"] } hex = "0.4.3" jsonrpc-core = "18.0.0" once_cell = "1.18.0" diff --git a/zebrad/src/application.rs b/zebrad/src/application.rs index d5f3e9e81cd..4b3809593ed 100644 --- a/zebrad/src/application.rs +++ b/zebrad/src/application.rs @@ -1,27 +1,24 @@ //! Zebrad Abscissa Application -use std::{fmt::Write as _, io::Write as _, process}; +use std::{env, fmt::Write as _, io::Write as _, process, sync::Arc}; use abscissa_core::{ application::{self, AppCell}, - config::{self, Configurable}, + config::CfgCell, status_err, terminal::{component::Terminal, stderr, stdout, ColorChoice}, - Application, Component, FrameworkError, Shutdown, StandardPaths, Version, + Application, Component, Configurable, FrameworkError, Shutdown, StandardPaths, Version, }; use zebra_network::constants::PORT_IN_USE_ERROR; use zebra_state::constants::{DATABASE_FORMAT_VERSION, LOCK_FILE_ERROR}; use crate::{ - commands::ZebradCmd, + commands::EntryPoint, components::{sync::end_of_support::EOS_PANIC_MESSAGE_HEADER, tracing::Tracing}, config::ZebradConfig, }; -mod entry_point; -use entry_point::EntryPoint; - /// See /// Print a fatal error message and exit fn fatal_error(app_name: String, err: &dyn std::error::Error) -> ! { @@ -32,25 +29,6 @@ fn fatal_error(app_name: String, err: &dyn std::error::Error) -> ! { /// Application state pub static APPLICATION: AppCell = AppCell::new(); -/// Obtain a read-only (multi-reader) lock on the application state. -/// -/// Panics if the application state has not been initialized. -pub fn app_reader() -> application::lock::Reader { - APPLICATION.read() -} - -/// Obtain an exclusive mutable lock on the application state. -pub fn app_writer() -> application::lock::Writer { - APPLICATION.write() -} - -/// Obtain a read-only (multi-reader) lock on the application configuration. -/// -/// Panics if the application configuration has not been loaded. -pub fn app_config() -> config::Reader { - config::Reader::new(&APPLICATION) -} - /// Returns the zebrad version for this build, in SemVer 2.0 format. /// /// Includes the git commit and the number of commits since the last version @@ -117,10 +95,10 @@ pub fn user_agent() -> String { } /// Zebrad Application -#[derive(Debug)] +#[derive(Debug, Default)] pub struct ZebradApp { /// Application configuration. - config: Option, + config: CfgCell, /// Application state. state: application::State, @@ -147,21 +125,6 @@ impl ZebradApp { } } -/// Initialize a new application instance. -/// -/// By default no configuration is loaded, and the framework state is -/// initialized to a default, empty state (no components, threads, etc). -#[allow(unknown_lints)] -#[allow(clippy::derivable_impls)] -impl Default for ZebradApp { - fn default() -> Self { - Self { - config: None, - state: application::State::default(), - } - } -} - impl Application for ZebradApp { /// Entrypoint command for this application. type Cmd = EntryPoint; @@ -173,8 +136,8 @@ impl Application for ZebradApp { type Paths = StandardPaths; /// Accessor for application configuration. - fn config(&self) -> &ZebradConfig { - self.config.as_ref().expect("config not loaded") + fn config(&self) -> Arc { + self.config.read() } /// Borrow the application state immutably. @@ -182,34 +145,22 @@ impl Application for ZebradApp { &self.state } - /// Borrow the application state mutably. - fn state_mut(&mut self) -> &mut application::State { - &mut self.state - } - /// Returns the framework components used by this application. fn framework_components( &mut self, - command: &Self::Cmd, + _command: &Self::Cmd, ) -> Result>>, FrameworkError> { - // Automatically use color if we're outputting to a terminal + // TODO: Open a PR in abscissa to add a TerminalBuilder for opting out + // of the `color_eyre::install` part of `Terminal::new` without + // ColorChoice::Never? + + // The Tracing component uses stdout directly and will apply colors + // `if Self::outputs_are_ttys() && config.tracing.use_colors` // - // The `abcissa` docs claim that abscissa implements `Auto`, but it - // does not - except in `color_backtrace` backtraces. - let mut term_colors = self.term_colors(command); - if term_colors == ColorChoice::Auto { - // We want to disable colors on a per-stream basis, but that feature - // can only be implemented inside the terminal component streams. - // Instead, if either output stream is not a terminal, disable - // colors. - // - // We'd also like to check `config.tracing.use_color` here, but the - // config has not been loaded yet. - if !Self::outputs_are_ttys() { - term_colors = ColorChoice::Never; - } - } - let terminal = Terminal::new(term_colors); + // Note: It's important to use `ColorChoice::Never` here to avoid panicking in + // `register_components()` below if `color_eyre::install()` is called + // after `color_spantrace` has been initialized. + let terminal = Terminal::new(ColorChoice::Never); Ok(vec![Box::new(terminal)]) } @@ -394,23 +345,9 @@ impl Application for ZebradApp { .build_global() .expect("unable to initialize rayon thread pool"); - self.config = Some(config); - - let cfg_ref = self - .config - .as_ref() - .expect("config is loaded before register_components"); - - let default_filter = command - .command - .as_ref() - .map(|zcmd| zcmd.default_tracing_filter(command.verbose, command.help)) - .unwrap_or("warn"); - let is_server = command - .command - .as_ref() - .map(ZebradCmd::is_server) - .unwrap_or(false); + let cfg_ref = &config; + let default_filter = command.cmd().default_tracing_filter(command.verbose); + let is_server = command.cmd().is_server(); // Ignore the configured tracing filter for short-lived utility commands let mut tracing_config = cfg_ref.tracing.clone(); @@ -436,7 +373,7 @@ impl Application for ZebradApp { // Activate the global span, so it's visible when we load the other // components. Space is at a premium here, so we use an empty message, // short commit hash, and the unique part of the network name. - let net = &self.config.clone().unwrap().network.network.to_string()[..4]; + let net = &config.network.network.to_string()[..4]; let global_span = if let Some(git_commit) = ZebradApp::git_commit() { error_span!("", zebrad = git_commit, net) } else { @@ -459,7 +396,10 @@ impl Application for ZebradApp { components.push(Box::new(MetricsEndpoint::new(&metrics_config)?)); } - self.state.components.register(components) + self.state.components_mut().register(components)?; + + // Fire callback to signal state in the application lifecycle + self.after_config(config) } /// Load this application's configuration and initialize its components. @@ -468,16 +408,7 @@ impl Application for ZebradApp { // Create and register components with the application. // We do this first to calculate a proper dependency ordering before // application configuration is processed - self.register_components(command)?; - - // Fire callback to signal state in the application lifecycle - let config = self - .config - .take() - .expect("register_components always populates the config"); - self.after_config(config)?; - - Ok(()) + self.register_components(command) } /// Post-configuration lifecycle callback. @@ -487,13 +418,13 @@ impl Application for ZebradApp { /// possible. fn after_config(&mut self, config: Self::Cfg) -> Result<(), FrameworkError> { // Configure components - self.state.components.after_config(&config)?; - self.config = Some(config); + self.state.components_mut().after_config(&config)?; + self.config.set_once(config); Ok(()) } - fn shutdown(&mut self, shutdown: Shutdown) -> ! { + fn shutdown(&self, shutdown: Shutdown) -> ! { // Some OSes require a flush to send all output to the terminal. // zebrad's logging uses Abscissa, so we flush its streams. // @@ -503,25 +434,33 @@ impl Application for ZebradApp { let _ = stdout().lock().flush(); let _ = stderr().lock().flush(); - if let Err(e) = self.state().components.shutdown(self, shutdown) { - let app_name = self.name().to_string(); + let shutdown_result = self.state().components().shutdown(self, shutdown); - // Swap out a fake app so we can trigger the destructor on the original - let _ = std::mem::take(self); + self.state() + .components_mut() + .get_downcast_mut::() + .map(Tracing::shutdown); + + if let Err(e) = shutdown_result { + let app_name = self.name().to_string(); fatal_error(app_name, &e); } - // Swap out a fake app so we can trigger the destructor on the original - let _ = std::mem::take(self); - match shutdown { Shutdown::Graceful => process::exit(0), Shutdown::Forced => process::exit(1), Shutdown::Crash => process::exit(2), } } +} - fn version(&self) -> Version { - app_version() - } +/// Boot the given application, parsing subcommand and options from +/// command-line arguments, and terminating when complete. +// +pub fn boot(app_cell: &'static AppCell) -> ! { + let args = + EntryPoint::process_cli_args(env::args_os().collect()).unwrap_or_else(|err| err.exit()); + + ZebradApp::run(app_cell, args); + process::exit(0); } diff --git a/zebrad/src/application/entry_point.rs b/zebrad/src/application/entry_point.rs deleted file mode 100644 index 16f262d1394..00000000000 --- a/zebrad/src/application/entry_point.rs +++ /dev/null @@ -1,111 +0,0 @@ -//! Zebrad EntryPoint - -use crate::{ - commands::{StartCmd, ZebradCmd}, - config::ZebradConfig, -}; - -use std::path::PathBuf; - -use abscissa_core::{ - command::{Command, Usage}, - config::Configurable, - FrameworkError, Options, Runnable, -}; - -// (See https://docs.rs/abscissa_core/0.5.2/src/abscissa_core/command/entrypoint.rs.html) -/// Toplevel entrypoint command. -/// -/// Handles obtaining toplevel help as well as verbosity settings. -#[derive(Debug, Options)] -pub struct EntryPoint { - /// Path to the configuration file - #[options(short = "c", help = "path to configuration file")] - pub config: Option, - - /// Obtain help about the current command - #[options(short = "h", help = "print help message")] - pub help: bool, - - /// Increase verbosity setting - #[options(short = "v", help = "be verbose")] - pub verbose: bool, - - /// Subcommand to execute. - /// - /// The `command` option will delegate option parsing to the command type, - /// starting at the first free argument. Defaults to start. - #[options(command, default_expr = "Some(ZebradCmd::Start(StartCmd::default()))")] - pub command: Option, -} - -impl EntryPoint { - /// Borrow the underlying command type - fn command(&self) -> &ZebradCmd { - if self.help { - let _ = Usage::for_command::().print_info(); - let _ = Usage::for_command::().print_usage(); - let _ = Usage::for_command::().print_usage(); - std::process::exit(0); - } - - self.command - .as_ref() - .expect("Some(ZebradCmd::Start(StartCmd::default()) as default value") - } -} - -impl Runnable for EntryPoint { - fn run(&self) { - self.command().run() - } -} - -impl Command for EntryPoint { - /// Name of this program as a string - fn name() -> &'static str { - ZebradCmd::name() - } - - /// Description of this program - fn description() -> &'static str { - ZebradCmd::description() - } - - /// Version of this program - fn version() -> &'static str { - ZebradCmd::version() - } - - /// Authors of this program - fn authors() -> &'static str { - ZebradCmd::authors() - } - - /// Get usage information for a particular subcommand (if available) - fn subcommand_usage(command: &str) -> Option { - ZebradCmd::subcommand_usage(command) - } -} - -impl Configurable for EntryPoint { - /// Path to the command's configuration file - fn config_path(&self) -> Option { - match &self.config { - // Use explicit `-c`/`--config` argument if passed - Some(cfg) => Some(cfg.clone()), - - // Otherwise defer to the toplevel command's config path logic - None => self.command.as_ref().and_then(|cmd| cmd.config_path()), - } - } - - /// Process the configuration after it has been loaded, potentially - /// modifying it or returning an error if options are incompatible - fn process_config(&self, config: ZebradConfig) -> Result { - match &self.command { - Some(cmd) => cmd.process_config(config), - None => Ok(config), - } - } -} diff --git a/zebrad/src/bin/zebrad/main.rs b/zebrad/src/bin/zebrad/main.rs index 962be2407a6..914fcc256b0 100644 --- a/zebrad/src/bin/zebrad/main.rs +++ b/zebrad/src/bin/zebrad/main.rs @@ -1,8 +1,8 @@ //! Main entry point for Zebrad -use zebrad::application::APPLICATION; +use zebrad::application::{boot, APPLICATION}; /// Process entry point for `zebrad` fn main() { - abscissa_core::boot(&APPLICATION); + boot(&APPLICATION); } diff --git a/zebrad/src/commands.rs b/zebrad/src/commands.rs index a306a5ab840..2f005f799b3 100644 --- a/zebrad/src/commands.rs +++ b/zebrad/src/commands.rs @@ -2,62 +2,49 @@ mod copy_state; mod download; +mod entry_point; mod generate; mod start; mod tip_height; -mod version; + +#[cfg(test)] +mod tests; use self::ZebradCmd::*; use self::{ copy_state::CopyStateCmd, download::DownloadCmd, generate::GenerateCmd, - tip_height::TipHeightCmd, version::VersionCmd, + tip_height::TipHeightCmd, }; -pub use self::start::StartCmd; +pub use self::{entry_point::EntryPoint, start::StartCmd}; use crate::config::ZebradConfig; -use abscissa_core::{ - config::Override, Command, Configurable, FrameworkError, Help, Options, Runnable, -}; +use abscissa_core::{config::Override, Command, Configurable, FrameworkError, Runnable}; use std::path::PathBuf; /// Zebrad Configuration Filename pub const CONFIG_FILE: &str = "zebrad.toml"; /// Zebrad Subcommands -#[derive(Command, Debug, Options)] +#[derive(Command, Debug, clap::Subcommand)] pub enum ZebradCmd { - /// The `copy-state` subcommand, used to debug cached chain state + /// The `copy-state` subcommand, used to debug cached chain state (expert users only) // TODO: hide this command from users in release builds (#3279) - #[options(help = "copy cached chain state (debug only)")] CopyState(CopyStateCmd), - /// The `download` subcommand - #[options(help = "pre-download required parameter files")] + // The `download` subcommand + /// Pre-download required Zcash Sprout and Sapling parameter files Download(DownloadCmd), - /// The `generate` subcommand - #[options(help = "generate a skeleton configuration")] + /// Generate a default `zebrad.toml` configuration Generate(GenerateCmd), - /// The `help` subcommand - #[options(help = "get usage information, \ - use help for subcommand usage information, \ - or --help flag to see top-level options")] - Help(Help), - - /// The `start` subcommand - #[options(help = "start the application")] + /// Start the application (default command) Start(StartCmd), - /// The `tip-height` subcommand - #[options(help = "get the block height of Zebra's persisted chain state")] + /// Print the tip block height of Zebra's chain state on disk TipHeight(TipHeightCmd), - - /// The `version` subcommand - #[options(help = "display version information")] - Version(VersionCmd), } impl ZebradCmd { @@ -73,27 +60,26 @@ impl ZebradCmd { CopyState(_) | Start(_) => true, // Utility commands that don't use server components - Download(_) | Generate(_) | Help(_) | TipHeight(_) | Version(_) => false, + Download(_) | Generate(_) | TipHeight(_) => false, } } /// Returns the default log level for this command, based on the `verbose` command line flag. /// /// Some commands need to be quiet by default. - pub(crate) fn default_tracing_filter(&self, verbose: bool, help: bool) -> &'static str { + pub(crate) fn default_tracing_filter(&self, verbose: bool) -> &'static str { let only_show_warnings = match self { // Commands that generate quiet output by default. // This output: // - is used by automated tools, or // - needs to be read easily. - Generate(_) | TipHeight(_) | Help(_) | Version(_) => true, + Generate(_) | TipHeight(_) => true, // Commands that generate informative logging output by default. CopyState(_) | Download(_) | Start(_) => false, }; - // set to warn so that usage info is printed without info-level logs from component registration - if help || (only_show_warnings && !verbose) { + if only_show_warnings && !verbose { "warn" } else if only_show_warnings || !verbose { "info" @@ -109,10 +95,8 @@ impl Runnable for ZebradCmd { CopyState(cmd) => cmd.run(), Download(cmd) => cmd.run(), Generate(cmd) => cmd.run(), - ZebradCmd::Help(cmd) => cmd.run(), Start(cmd) => cmd.run(), TipHeight(cmd) => cmd.run(), - Version(cmd) => cmd.run(), } } } diff --git a/zebrad/src/commands/copy_state.rs b/zebrad/src/commands/copy_state.rs index 11d024d2c73..ffe9575ddec 100644 --- a/zebrad/src/commands/copy_state.rs +++ b/zebrad/src/commands/copy_state.rs @@ -35,7 +35,7 @@ use std::{cmp::min, path::PathBuf}; -use abscissa_core::{config, Command, FrameworkError, Options, Runnable}; +use abscissa_core::{config, Command, FrameworkError, Runnable}; use color_eyre::eyre::{eyre, Report}; use tokio::time::Instant; use tower::{Service, ServiceExt}; @@ -45,6 +45,7 @@ use zebra_state as old_zs; use zebra_state as new_zs; use crate::{ + application::ZebradApp, components::tokio::{RuntimeRun, TokioComponent}, config::ZebradConfig, prelude::*, @@ -54,11 +55,11 @@ use crate::{ /// How often we log info-level progress messages const PROGRESS_HEIGHT_INTERVAL: u32 = 5_000; -/// `copy-state` subcommand -#[derive(Command, Debug, Options)] +/// copy cached chain state (expert users only) +#[derive(Command, Debug, clap::Parser)] pub struct CopyStateCmd { /// Source height that the copy finishes at. - #[options(help = "stop copying at this source height")] + #[clap(long, short, help = "stop copying at this source height")] max_source_height: Option, /// Path to a Zebra config.toml for the target state. @@ -66,26 +67,30 @@ pub struct CopyStateCmd { /// /// Zebra only uses the state options from this config. /// All other options are ignored. - #[options(help = "config file path for the target state (default: ephemeral), \ - the source state uses the main zebrad config")] + #[clap( + long, + short, + help = "config file path for the target state (default: ephemeral), \ + the source state uses the main zebrad config" + )] target_config_path: Option, /// Filter strings which override the config file and defaults - #[options(free, help = "tracing filters which override the zebrad.toml config")] + #[clap(help = "tracing filters which override the zebrad.toml config")] filters: Vec, } impl CopyStateCmd { /// Configure and launch the copy command async fn start(&self) -> Result<(), Report> { - let base_config = app_config().clone(); + let base_config = APPLICATION.config(); let source_config = base_config.state.clone(); // The default load_config impl doesn't actually modify the app config. let target_config = self .target_config_path .as_ref() - .map(|path| app_writer().load_config(path)) + .map(|path| ZebradApp::default().load_config(path)) .transpose()? .map(|app_config| app_config.state) .unwrap_or_else(new_zs::Config::ephemeral); @@ -394,9 +399,9 @@ impl Runnable for CopyStateCmd { target_config_path = ?self.target_config_path, "starting cached chain state copy" ); - let rt = app_writer() - .state_mut() - .components + let rt = APPLICATION + .state() + .components_mut() .get_downcast_mut::() .expect("TokioComponent should be available") .rt diff --git a/zebrad/src/commands/download.rs b/zebrad/src/commands/download.rs index 83881c07140..4feefcb9e58 100644 --- a/zebrad/src/commands/download.rs +++ b/zebrad/src/commands/download.rs @@ -5,10 +5,10 @@ //! This command should be used if you're launching lots of `zebrad start` instances for testing, //! or you want to include the parameter files in a distribution package. -use abscissa_core::{Command, Options, Runnable}; +use abscissa_core::{Command, Runnable}; -/// `download` subcommand -#[derive(Command, Debug, Default, Options)] +/// Pre-download required Zcash Sprout and Sapling parameter files +#[derive(Command, Debug, Default, clap::Parser)] pub struct DownloadCmd {} impl DownloadCmd { diff --git a/zebrad/src/commands/entry_point.rs b/zebrad/src/commands/entry_point.rs new file mode 100644 index 00000000000..5888c0e5ef4 --- /dev/null +++ b/zebrad/src/commands/entry_point.rs @@ -0,0 +1,134 @@ +//! Zebrad EntryPoint + +use std::cmp::min; + +use abscissa_core::{Command, Configurable, FrameworkError, Runnable}; +use clap::Parser; +use std::{ffi::OsString, path::PathBuf}; + +use crate::config::ZebradConfig; + +use super::ZebradCmd; + +/// Toplevel entrypoint command. +/// +/// Handles obtaining toplevel help as well as verbosity settings. +#[derive(Debug, clap::Parser)] +#[clap( + version = clap::crate_version!(), + author="Zcash Foundation ", + help_template = "\ +{name} {version}\n +{author}\n +{usage-heading} {usage}\n +{all-args}\ +" +)] +pub struct EntryPoint { + /// Subcommand to execute. + /// + /// The `command` option will delegate option parsing to the command type, + /// starting at the first free argument. Defaults to start. + #[clap(subcommand)] + pub cmd: Option, + + /// Path to the configuration file + #[clap(long, short, help = "path to configuration file")] + pub config: Option, + + /// Increase verbosity setting + #[clap(long, short, help = "be verbose")] + pub verbose: bool, + + /// Filter strings which override the config file and defaults + // This can be applied to the default start command if no subcommand is provided. + #[clap(help = "tracing filters which override the zebrad.toml config")] + filters: Vec, +} + +impl EntryPoint { + /// Borrow the command in the option + /// + /// # Panics + /// + /// If `cmd` is None + pub fn cmd(&self) -> &ZebradCmd { + self.cmd + .as_ref() + .expect("should default to start if not provided") + } + + /// Returns a string that parses to the default subcommand + pub fn default_cmd_as_str() -> &'static str { + "start" + } + + /// Process command arguments and insert the default subcommand + /// if no subcommand is provided. + pub fn process_cli_args(mut args: Vec) -> clap::error::Result> { + // Check if the provided arguments include a subcommand + let should_add_default_subcommand = EntryPoint::try_parse_from(&args)?.cmd.is_none(); + + // Add the default subcommand to args after the top-level args if cmd is None + if should_add_default_subcommand { + // try_parse_from currently produces an error if the first argument is not the binary name, + let mut num_top_level_args = 1; + + // update last_top_level_arg_idx to the number of top-level args + for (idx, arg) in args.iter().enumerate() { + num_top_level_args = match arg.to_str() { + Some("--verbose" | "-v" | "--version" | "-V" | "--help") => idx + 1, + Some("--config" | "-c") => idx + 2, + _ => num_top_level_args, + } + } + + num_top_level_args = min(num_top_level_args, args.len()); + args.insert(num_top_level_args, EntryPoint::default_cmd_as_str().into()); + } + + Ok(args) + } +} + +impl Runnable for EntryPoint { + fn run(&self) { + self.cmd().run() + } +} + +impl Command for EntryPoint { + /// Name of this program as a string + fn name() -> &'static str { + ZebradCmd::name() + } + + /// Description of this program + fn description() -> &'static str { + ZebradCmd::description() + } + + /// Authors of this program + fn authors() -> &'static str { + ZebradCmd::authors() + } +} + +impl Configurable for EntryPoint { + /// Path to the command's configuration file + fn config_path(&self) -> Option { + match &self.config { + // Use explicit `-c`/`--config` argument if passed + Some(cfg) => Some(cfg.clone()), + + // Otherwise defer to the toplevel command's config path logic + None => self.cmd().config_path(), + } + } + + /// Process the configuration after it has been loaded, potentially + /// modifying it or returning an error if options are incompatible + fn process_config(&self, config: ZebradConfig) -> Result { + self.cmd().process_config(config) + } +} diff --git a/zebrad/src/commands/generate.rs b/zebrad/src/commands/generate.rs index 649e029a171..de9a3019c53 100644 --- a/zebrad/src/commands/generate.rs +++ b/zebrad/src/commands/generate.rs @@ -1,13 +1,20 @@ -//! `generate` subcommand - generates a skeleton config. +//! `generate` subcommand - generates a default `zebrad.toml` config. use crate::config::ZebradConfig; -use abscissa_core::{Command, Options, Runnable}; +use abscissa_core::{Command, Runnable}; +use clap::Parser; -/// `generate` subcommand -#[derive(Command, Debug, Options)] +/// Generate a default `zebrad.toml` configuration +#[derive(Command, Debug, Default, Parser)] pub struct GenerateCmd { /// The file to write the generated config to. - #[options(help = "The file to write the generated config to (stdout if unspecified)")] + // + // TODO: use PathBuf here instead, to support non-UTF-8 paths + #[clap( + long, + short, + help = "The file to write the generated config to (stdout if unspecified)" + )] output_file: Option, } diff --git a/zebrad/src/commands/start.rs b/zebrad/src/commands/start.rs index 3de502113a3..edbc29d291d 100644 --- a/zebrad/src/commands/start.rs +++ b/zebrad/src/commands/start.rs @@ -71,7 +71,7 @@ //! //! Some of the diagnostic features are optional, and need to be enabled at compile-time. -use abscissa_core::{config, Command, FrameworkError, Options, Runnable}; +use abscissa_core::{config, Command, FrameworkError, Runnable}; use color_eyre::eyre::{eyre, Report}; use futures::FutureExt; use tokio::{pin, select, sync::oneshot}; @@ -94,17 +94,17 @@ use crate::{ prelude::*, }; -/// `start` subcommand -#[derive(Command, Debug, Options, Default)] +/// Start the application (default command) +#[derive(Command, Debug, Default, clap::Parser)] pub struct StartCmd { /// Filter strings which override the config file and defaults - #[options(free, help = "tracing filters which override the zebrad.toml config")] + #[clap(help = "tracing filters which override the zebrad.toml config")] filters: Vec, } impl StartCmd { async fn start(&self) -> Result<(), Report> { - let config = app_config().clone(); + let config = APPLICATION.config(); info!("initializing node state"); let (_, max_checkpoint_height) = zebra_consensus::router::init_checkpoint_list( @@ -202,9 +202,9 @@ impl StartCmd { // Launch RPC server let (rpc_task_handle, rpc_tx_queue_task_handle, rpc_server) = RpcServer::spawn( - config.rpc, + config.rpc.clone(), #[cfg(feature = "getblocktemplate-rpcs")] - config.mining, + config.mining.clone(), #[cfg(not(feature = "getblocktemplate-rpcs"))] (), app_version(), @@ -428,7 +428,7 @@ impl StartCmd { /// Returns the bound for the state service buffer, /// based on the configurations of the services that use the state concurrently. fn state_buffer_bound() -> usize { - let config = app_config().clone(); + let config = APPLICATION.config(); // Ignore the checkpoint verify limit, because it is very large. // @@ -450,9 +450,9 @@ impl Runnable for StartCmd { /// Start the application. fn run(&self) { info!("Starting zebrad"); - let rt = app_writer() - .state_mut() - .components + let rt = APPLICATION + .state() + .components_mut() .get_downcast_mut::() .expect("TokioComponent should be available") .rt diff --git a/zebrad/src/commands/tests.rs b/zebrad/src/commands/tests.rs new file mode 100644 index 00000000000..f87c48b119a --- /dev/null +++ b/zebrad/src/commands/tests.rs @@ -0,0 +1,46 @@ +//! Tests for parsing zebrad commands + +use clap::Parser; + +use crate::commands::ZebradCmd; + +use super::EntryPoint; + +#[test] +fn args_with_subcommand_pass_through() { + let test_cases = [ + (false, true, false, vec!["zebrad"]), + (false, true, true, vec!["zebrad", "-v"]), + (false, true, true, vec!["zebrad", "--verbose"]), + (true, false, false, vec!["zebrad", "-h"]), + (true, false, false, vec!["zebrad", "--help"]), + (false, true, false, vec!["zebrad", "start"]), + (false, true, true, vec!["zebrad", "-v", "start"]), + (false, true, false, vec!["zebrad", "warn"]), + (false, true, false, vec!["zebrad", "start", "warn"]), + (true, false, false, vec!["zebrad", "help", "warn"]), + ]; + + for (should_exit, should_be_start, should_be_verbose, args) in test_cases { + let args = EntryPoint::process_cli_args(args.iter().map(Into::into).collect()); + + if should_exit { + args.expect_err("parsing invalid args or 'help'/'--help' should return an error"); + continue; + } + + let args: Vec = args.expect("args should parse into EntryPoint"); + + let args = + EntryPoint::try_parse_from(args).expect("hardcoded args should parse successfully"); + + assert!(args.config.is_none(), "args.config should be none"); + assert!(args.cmd.is_some(), "args.cmd should not be none"); + assert_eq!( + args.verbose, should_be_verbose, + "process_cli_args should preserve top-level args" + ); + + assert_eq!(matches!(args.cmd(), ZebradCmd::Start(_)), should_be_start,); + } +} diff --git a/zebrad/src/commands/tip_height.rs b/zebrad/src/commands/tip_height.rs index bc045175d4c..8ace683d5ac 100644 --- a/zebrad/src/commands/tip_height.rs +++ b/zebrad/src/commands/tip_height.rs @@ -5,7 +5,8 @@ use std::path::PathBuf; -use abscissa_core::{Command, Options, Runnable}; +use abscissa_core::{Application, Command, Runnable}; +use clap::Parser; use color_eyre::eyre::{eyre, Result}; use zebra_chain::{ @@ -15,17 +16,17 @@ use zebra_chain::{ }; use zebra_state::LatestChainTip; -use crate::prelude::app_config; +use crate::prelude::APPLICATION; -/// `zebra-tip-height` subcommand -#[derive(Command, Debug, Options)] +/// Print the tip block height of Zebra's chain state on disk +#[derive(Command, Debug, Default, Parser)] pub struct TipHeightCmd { /// Path to Zebra's cached state. - #[options(help = "path to directory with the Zebra chain state")] + #[clap(long, short, help = "path to directory with the Zebra chain state")] cache_dir: Option, /// The network to obtain the chain tip. - #[options(default = "mainnet", help = "the network of the chain to load")] + #[clap(long, short, help = "the network of the chain to load")] network: Network, } @@ -54,7 +55,7 @@ impl TipHeightCmd { /// Starts a state service using the `cache_dir` and `network` from the provided arguments. fn load_latest_chain_tip(&self) -> LatestChainTip { - let mut config = app_config().state.clone(); + let mut config = APPLICATION.config().state.clone(); if let Some(cache_dir) = self.cache_dir.clone() { config.cache_dir = cache_dir; diff --git a/zebrad/src/commands/version.rs b/zebrad/src/commands/version.rs deleted file mode 100644 index 047b9b12a75..00000000000 --- a/zebrad/src/commands/version.rs +++ /dev/null @@ -1,18 +0,0 @@ -//! `version` subcommand - -#![allow(clippy::never_loop)] - -use super::ZebradCmd; -use abscissa_core::{Command, Options, Runnable}; - -/// `version` subcommand -#[derive(Command, Debug, Default, Options)] -pub struct VersionCmd {} - -impl Runnable for VersionCmd { - /// Print version message - #[allow(clippy::print_stdout)] - fn run(&self) { - println!("{} {}", ZebradCmd::name(), ZebradCmd::version()); - } -} diff --git a/zebrad/src/components/tokio.rs b/zebrad/src/components/tokio.rs index f4225bebd57..802ebc31052 100644 --- a/zebrad/src/components/tokio.rs +++ b/zebrad/src/components/tokio.rs @@ -83,7 +83,7 @@ impl RuntimeRun for Runtime { } Err(error) => { warn!(?error, "shutting down Zebra due to an error"); - app_writer().shutdown(Shutdown::Forced); + APPLICATION.shutdown(Shutdown::Forced); } } } diff --git a/zebrad/src/components/tracing/component.rs b/zebrad/src/components/tracing/component.rs index 0cf0456ee6d..769d6db46f7 100644 --- a/zebrad/src/components/tracing/component.rs +++ b/zebrad/src/components/tracing/component.rs @@ -47,7 +47,7 @@ pub struct Tracing { /// responsible for flushing any remaining logs when the program terminates. // // Correctness: must be listed last in the struct, so it drops after other drops have logged. - _guard: WorkerGuard, + _guard: Option, } impl Tracing { @@ -94,7 +94,7 @@ impl Tracing { // Builds a lossy NonBlocking logger with a default line limit of 128_000 or an explicit buffer_limit. // The write method queues lines down a bounded channel with this capacity to a worker thread that writes to stdout. // Increments error_counter and drops lines when the buffer is full. - let (non_blocking, _guard) = NonBlockingBuilder::default() + let (non_blocking, worker_guard) = NonBlockingBuilder::default() .buffered_lines_limit(config.buffer_limit.max(100)) .finish(writer); @@ -275,10 +275,21 @@ impl Tracing { initial_filter: filter, #[cfg(feature = "flamegraph")] flamegrapher, - _guard, + _guard: Some(worker_guard), }) } + /// Drops guard for worker thread of non-blocking logger, + /// to flush any remaining logs when the program terminates. + pub fn shutdown(&mut self) { + self.filter_handle.take(); + + #[cfg(feature = "flamegraph")] + self.flamegrapher.take(); + + self._guard.take(); + } + /// Return the currently-active tracing filter. pub fn filter(&self) -> String { if let Some(filter_handle) = self.filter_handle.as_ref() { diff --git a/zebrad/src/components/tracing/endpoint.rs b/zebrad/src/components/tracing/endpoint.rs index 2bd95059246..c4c6440def8 100644 --- a/zebrad/src/components/tracing/endpoint.rs +++ b/zebrad/src/components/tracing/endpoint.rs @@ -120,9 +120,9 @@ To set the filter, POST the new filter string to /filter: (&Method::GET, "/filter") => Response::builder() .status(StatusCode::OK) .body(Body::from( - app_reader() + APPLICATION .state() - .components + .components() .get_downcast_ref::() .expect("Tracing component should be available") .filter(), @@ -130,9 +130,9 @@ To set the filter, POST the new filter string to /filter: .expect("response with known status code cannot fail"), (&Method::POST, "/filter") => match read_filter(req).await { Ok(filter) => { - app_reader() + APPLICATION .state() - .components + .components() .get_downcast_ref::() .expect("Tracing component should be available") .reload_filter(filter); diff --git a/zebrad/src/prelude.rs b/zebrad/src/prelude.rs index b4d6eb835a6..b537590734e 100644 --- a/zebrad/src/prelude.rs +++ b/zebrad/src/prelude.rs @@ -2,7 +2,7 @@ //! which are generally useful and should be available everywhere. /// Application state accessors -pub use crate::application::{app_config, app_reader, app_writer}; +pub use crate::application::APPLICATION; /// Commonly used Abscissa traits pub use abscissa_core::{Application, Command, Runnable}; diff --git a/zebrad/tests/acceptance.rs b/zebrad/tests/acceptance.rs index 22fb1eeac79..c9953fba1e2 100644 --- a/zebrad/tests/acceptance.rs +++ b/zebrad/tests/acceptance.rs @@ -267,11 +267,11 @@ fn help_no_args() -> Result<()> { is_zebrad_version, &output.output.stdout, "stdout", - "a valid zebrad semantic version", + "are valid zebrad semantic versions", )?; - // Make sure we are in help by looking usage string - output.stdout_line_contains("USAGE:")?; + // Make sure we are in help by looking for the usage string + output.stdout_line_contains("Usage:")?; Ok(()) } @@ -536,7 +536,7 @@ fn version_no_args() -> Result<()> { let testdir = testdir()?.with_config(&mut default_test_config()?)?; - let child = testdir.spawn_child(args!["version"])?; + let child = testdir.spawn_child(args!["--version"])?; let output = child.wait_with_output()?; let output = output.assert_success()?; @@ -558,15 +558,23 @@ fn version_args() -> Result<()> { let testdir = testdir()?.with_config(&mut default_test_config()?)?; let testdir = &testdir; - // unexpected free argument `argument` - let child = testdir.spawn_child(args!["version", "argument"])?; + // unrecognized option `-f` + let child = testdir.spawn_child(args!["tip-height", "-f"])?; let output = child.wait_with_output()?; output.assert_failure()?; - // unrecognized option `-f` - let child = testdir.spawn_child(args!["version", "-f"])?; + // unrecognized option `-f` is ignored + let child = testdir.spawn_child(args!["--version", "-f"])?; let output = child.wait_with_output()?; - output.assert_failure()?; + let output = output.assert_success()?; + + // The output should only contain the version + output.output_check( + is_zebrad_version, + &output.output.stdout, + "stdout", + "a valid zebrad semantic version", + )?; Ok(()) } From f3e330995ff87b992b22ecebc158ab33282a0312 Mon Sep 17 00:00:00 2001 From: teor Date: Wed, 7 Jun 2023 16:04:04 +1000 Subject: [PATCH 052/265] fix(build): Fix new nightly clippy lints and cargo feature resolution (#6814) * Implement minor and patch database format versions * Log and update database format versions when opening database * Refactor the current list of column families into a constant * Open all available column families, including from future Zebra versions * Refactor note commitment tree lookups to go through the height methods * Make Sapling/Orchard note commitment tree lookup forwards compatible * Ignore errors reading column family lists from disk * Update format version comments and TODOs * Correctly log newly created database formats * Fix a new cargo lint about resolver versions * cargo clippy --fix --all-features --all-targets * cargo fmt --all * Add missing tokio feature in the state, revealed by the new resolver * Add missing dev dependencies in zebra-node-services * Add a missing `tokio` feature from PR #6813 --------- Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> --- Cargo.toml | 3 +++ zebra-network/Cargo.toml | 2 +- .../src/peer/connection/tests/prop.rs | 2 +- .../src/peer_set/initialize/tests/vectors.rs | 8 +++--- zebra-network/src/peer_set/set/tests/prop.rs | 2 +- .../src/peer_set/set/tests/vectors.rs | 27 +++++++------------ zebra-node-services/Cargo.toml | 8 ++++++ zebra-rpc/src/methods/tests/prop.rs | 24 ++++++++--------- zebra-rpc/src/methods/tests/vectors.rs | 14 +++++----- zebra-rpc/src/server/tests/vectors.rs | 8 +++--- zebra-state/Cargo.toml | 2 +- .../components/inbound/tests/fake_peer_set.rs | 20 +++++++------- .../components/inbound/tests/real_peer_set.rs | 20 +++++++------- zebrad/src/components/sync/tests/vectors.rs | 10 +++---- 14 files changed, 76 insertions(+), 74 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 48ded2707ac..d77eb87b45a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -14,6 +14,9 @@ members = [ "tower-fallback", ] +# Use the edition 2021 dependency resolver in the workspace, to match the crates +resolver = "2" + [profile.dev] panic = "abort" diff --git a/zebra-network/Cargo.toml b/zebra-network/Cargo.toml index 93b87959e3b..3c841809af4 100644 --- a/zebra-network/Cargo.toml +++ b/zebra-network/Cargo.toml @@ -43,7 +43,7 @@ tempfile = "3.5.0" thiserror = "1.0.40" futures = "0.3.28" -tokio = { version = "1.28.2", features = ["fs", "net", "time", "tracing", "macros", "rt-multi-thread"] } +tokio = { version = "1.28.2", features = ["fs", "io-util", "net", "time", "tracing", "macros", "rt-multi-thread"] } tokio-stream = { version = "0.1.14", features = ["sync", "time"] } tokio-util = { version = "0.7.8", features = ["codec"] } tower = { version = "0.4.13", features = ["retry", "discover", "load", "load-shed", "timeout", "util", "buffer"] } diff --git a/zebra-network/src/peer/connection/tests/prop.rs b/zebra-network/src/peer/connection/tests/prop.rs index 9fc0390a981..3c4b2d51ca2 100644 --- a/zebra-network/src/peer/connection/tests/prop.rs +++ b/zebra-network/src/peer/connection/tests/prop.rs @@ -112,7 +112,7 @@ proptest! { // Check the state after the response let error = shared_error_slot.try_get_error(); - assert!(matches!(error, None)); + assert!(error.is_none()); inbound_service.expect_no_requests().await?; diff --git a/zebra-network/src/peer_set/initialize/tests/vectors.rs b/zebra-network/src/peer_set/initialize/tests/vectors.rs index 9df6b29ec0b..d9fdf9a1535 100644 --- a/zebra-network/src/peer_set/initialize/tests/vectors.rs +++ b/zebra-network/src/peer_set/initialize/tests/vectors.rs @@ -1176,7 +1176,7 @@ async fn add_initial_peers_is_rate_limited() { // Check for panics or errors in the address book updater task. let updater_result = address_book_updater_task_handle.now_or_never(); assert!( - matches!(updater_result, None) + updater_result.is_none() || matches!(updater_result, Some(Err(ref join_error)) if join_error.is_cancelled()) // The task method only returns one kind of error. // We can't check for error equality due to type erasure, @@ -1643,8 +1643,7 @@ where // Check for panics or errors in the crawler. let crawl_result = crawl_task_handle.now_or_never(); assert!( - matches!(crawl_result, None) - || matches!(crawl_result, Some(Err(ref e)) if e.is_cancelled()), + crawl_result.is_none() || matches!(crawl_result, Some(Err(ref e)) if e.is_cancelled()), "unexpected error or panic in peer crawler task: {crawl_result:?}", ); @@ -1749,8 +1748,7 @@ where // Check for panics or errors in the listener. let listen_result = listen_task_handle.now_or_never(); assert!( - matches!(listen_result, None) - || matches!(listen_result, Some(Err(ref e)) if e.is_cancelled()), + listen_result.is_none() || matches!(listen_result, Some(Err(ref e)) if e.is_cancelled()), "unexpected error or panic in inbound peer listener task: {listen_result:?}", ); diff --git a/zebra-network/src/peer_set/set/tests/prop.rs b/zebra-network/src/peer_set/set/tests/prop.rs index 1a95f31e642..f8388880bae 100644 --- a/zebra-network/src/peer_set/set/tests/prop.rs +++ b/zebra-network/src/peer_set/set/tests/prop.rs @@ -306,7 +306,7 @@ where .all(|harness| harness.remote_version() < minimum_version); if all_peers_are_outdated { - prop_assert!(matches!(poll_result, None)); + prop_assert!(poll_result.is_none()); } else { prop_assert!(matches!(poll_result, Some(Ok(_)))); } diff --git a/zebra-network/src/peer_set/set/tests/vectors.rs b/zebra-network/src/peer_set/set/tests/vectors.rs index 1f58f0f0b0f..89ff294a827 100644 --- a/zebra-network/src/peer_set/set/tests/vectors.rs +++ b/zebra-network/src/peer_set/set/tests/vectors.rs @@ -336,12 +336,10 @@ fn peer_set_route_inv_advertised_registry_order(advertised_first: bool) { }; assert!( - matches!( - other_handle - .try_to_receive_outbound_client_request() - .request(), - None - ), + other_handle + .try_to_receive_outbound_client_request() + .request() + .is_none(), "request routed to non-advertised peer", ); }); @@ -430,12 +428,10 @@ fn peer_set_route_inv_missing_registry_order(missing_first: bool) { }; assert!( - matches!( - missing_handle - .try_to_receive_outbound_client_request() - .request(), - None - ), + missing_handle + .try_to_receive_outbound_client_request() + .request() + .is_none(), "request routed to missing peer", ); @@ -529,12 +525,9 @@ fn peer_set_route_inv_all_missing_fail() { let missing_handle = &mut handles[0]; assert!( - matches!( - missing_handle + missing_handle .try_to_receive_outbound_client_request() - .request(), - None - ), + .request().is_none(), "request routed to missing peer", ); diff --git a/zebra-node-services/Cargo.toml b/zebra-node-services/Cargo.toml index 2ac0df46cb1..942093cd06b 100644 --- a/zebra-node-services/Cargo.toml +++ b/zebra-node-services/Cargo.toml @@ -37,3 +37,11 @@ jsonrpc-core = { version = "18.0.0", optional = true } reqwest = { version = "0.11.18", optional = true } serde = { version = "1.0.163", optional = true } serde_json = { version = "1.0.95", optional = true } + +[dev-dependencies] + +color-eyre = "0.6.2" +jsonrpc-core = "18.0.0" +reqwest = "0.11.18" +serde = "1.0.163" +serde_json = "1.0.95" diff --git a/zebra-rpc/src/methods/tests/prop.rs b/zebra-rpc/src/methods/tests/prop.rs index 9d2e6610b7d..388e2600728 100644 --- a/zebra-rpc/src/methods/tests/prop.rs +++ b/zebra-rpc/src/methods/tests/prop.rs @@ -76,7 +76,7 @@ proptest! { // The queue task should continue without errors or panics let rpc_tx_queue_task_result = rpc_tx_queue_task_handle.now_or_never(); - prop_assert!(matches!(rpc_tx_queue_task_result, None)); + prop_assert!(rpc_tx_queue_task_result.is_none()); Ok::<_, TestCaseError>(()) })?; @@ -137,7 +137,7 @@ proptest! { // The queue task should continue without errors or panics let rpc_tx_queue_task_result = rpc_tx_queue_task_handle.now_or_never(); - prop_assert!(matches!(rpc_tx_queue_task_result, None)); + prop_assert!(rpc_tx_queue_task_result.is_none()); Ok::<_, TestCaseError>(()) })?; @@ -197,7 +197,7 @@ proptest! { // The queue task should continue without errors or panics let rpc_tx_queue_task_result = rpc_tx_queue_task_handle.now_or_never(); - prop_assert!(matches!(rpc_tx_queue_task_result, None)); + prop_assert!(rpc_tx_queue_task_result.is_none()); Ok::<_, TestCaseError>(()) })?; @@ -251,7 +251,7 @@ proptest! { // The queue task should continue without errors or panics let rpc_tx_queue_task_result = rpc_tx_queue_task_handle.now_or_never(); - prop_assert!(matches!(rpc_tx_queue_task_result, None)); + prop_assert!(rpc_tx_queue_task_result.is_none()); Ok::<_, TestCaseError>(()) })?; @@ -307,7 +307,7 @@ proptest! { // The queue task should continue without errors or panics let rpc_tx_queue_task_result = rpc_tx_queue_task_handle.now_or_never(); - prop_assert!(matches!(rpc_tx_queue_task_result, None)); + prop_assert!(rpc_tx_queue_task_result.is_none()); Ok::<_, TestCaseError>(()) })?; @@ -405,7 +405,7 @@ proptest! { // The queue task should continue without errors or panics let rpc_tx_queue_task_result = rpc_tx_queue_task_handle.now_or_never(); - prop_assert!(matches!(rpc_tx_queue_task_result, None)); + prop_assert!(rpc_tx_queue_task_result.is_none()); Ok::<_, TestCaseError>(()) })?; @@ -461,7 +461,7 @@ proptest! { // The queue task should continue without errors or panics let rpc_tx_queue_task_result = rpc_tx_queue_task_handle.now_or_never(); - prop_assert!(matches!(rpc_tx_queue_task_result, None)); + prop_assert!(rpc_tx_queue_task_result.is_none()); Ok::<_, TestCaseError>(()) })?; @@ -519,7 +519,7 @@ proptest! { // The queue task should continue without errors or panics let rpc_tx_queue_task_result = rpc_tx_queue_task_handle.now_or_never(); - prop_assert!(matches!(rpc_tx_queue_task_result, None)); + prop_assert!(rpc_tx_queue_task_result.is_none()); Ok::<_, TestCaseError>(()) })?; @@ -552,7 +552,7 @@ proptest! { // The queue task should continue without errors or panics let rpc_tx_queue_task_result = rpc_tx_queue_task_handle.now_or_never(); - prop_assert!(matches!(rpc_tx_queue_task_result, None)); + prop_assert!(rpc_tx_queue_task_result.is_none()); runtime.block_on(async move { mempool.expect_no_requests().await?; @@ -631,7 +631,7 @@ proptest! { // The queue task should continue without errors or panics let rpc_tx_queue_task_result = rpc_tx_queue_task_handle.now_or_never(); - prop_assert!(matches!(rpc_tx_queue_task_result, None)); + prop_assert!(rpc_tx_queue_task_result.is_none()); // check no requests were made during this test runtime.block_on(async move { @@ -855,7 +855,7 @@ proptest! { // The queue task should continue without errors or panics let rpc_tx_queue_task_result = rpc_tx_queue_task_handle.now_or_never(); - prop_assert!(matches!(rpc_tx_queue_task_result, None)); + prop_assert!(rpc_tx_queue_task_result.is_none()); Ok::<_, TestCaseError>(()) })?; @@ -955,7 +955,7 @@ proptest! { // The queue task should continue without errors or panics let rpc_tx_queue_task_result = rpc_tx_queue_task_handle.now_or_never(); - prop_assert!(matches!(rpc_tx_queue_task_result, None)); + prop_assert!(rpc_tx_queue_task_result.is_none()); Ok::<_, TestCaseError>(()) })?; diff --git a/zebra-rpc/src/methods/tests/vectors.rs b/zebra-rpc/src/methods/tests/vectors.rs index 2185a74d158..f3b22fce482 100644 --- a/zebra-rpc/src/methods/tests/vectors.rs +++ b/zebra-rpc/src/methods/tests/vectors.rs @@ -52,7 +52,7 @@ async fn rpc_getinfo() { // The queue task should continue without errors or panics let rpc_tx_queue_task_result = rpc_tx_queue_task_handle.now_or_never(); - assert!(matches!(rpc_tx_queue_task_result, None)); + assert!(rpc_tx_queue_task_result.is_none()); } #[tokio::test(flavor = "multi_thread")] @@ -211,7 +211,7 @@ async fn rpc_getblock() { // The queue task should continue without errors or panics let rpc_tx_queue_task_result = rpc_tx_queue_task_handle.now_or_never(); - assert!(matches!(rpc_tx_queue_task_result, None)); + assert!(rpc_tx_queue_task_result.is_none()); } #[tokio::test(flavor = "multi_thread")] @@ -253,7 +253,7 @@ async fn rpc_getblock_parse_error() { // The queue task should continue without errors or panics let rpc_tx_queue_task_result = rpc_tx_queue_task_handle.now_or_never(); - assert!(matches!(rpc_tx_queue_task_result, None)); + assert!(rpc_tx_queue_task_result.is_none()); } #[tokio::test(flavor = "multi_thread")] @@ -307,7 +307,7 @@ async fn rpc_getblock_missing_error() { // The queue task should continue without errors or panics let rpc_tx_queue_task_result = rpc_tx_queue_task_handle.now_or_never(); - assert!(matches!(rpc_tx_queue_task_result, None)); + assert!(rpc_tx_queue_task_result.is_none()); } #[tokio::test(flavor = "multi_thread")] @@ -355,7 +355,7 @@ async fn rpc_getbestblockhash() { // The queue task should continue without errors or panics let rpc_tx_queue_task_result = rpc_tx_queue_task_handle.now_or_never(); - assert!(matches!(rpc_tx_queue_task_result, None)); + assert!(rpc_tx_queue_task_result.is_none()); } #[tokio::test(flavor = "multi_thread")] @@ -519,7 +519,7 @@ async fn rpc_getrawtransaction() { // The queue task should continue without errors or panics let rpc_tx_queue_task_result = rpc_tx_queue_task_handle.now_or_never(); - assert!(matches!(rpc_tx_queue_task_result, None)); + assert!(rpc_tx_queue_task_result.is_none()); } #[tokio::test(flavor = "multi_thread")] @@ -625,7 +625,7 @@ async fn rpc_getaddresstxids_invalid_arguments() { // The queue task should continue without errors or panics let rpc_tx_queue_task_result = rpc_tx_queue_task_handle.now_or_never(); - assert!(matches!(rpc_tx_queue_task_result, None)); + assert!(rpc_tx_queue_task_result.is_none()); } #[tokio::test(flavor = "multi_thread")] diff --git a/zebra-rpc/src/server/tests/vectors.rs b/zebra-rpc/src/server/tests/vectors.rs index 6ad2594eb84..657106a56b7 100644 --- a/zebra-rpc/src/server/tests/vectors.rs +++ b/zebra-rpc/src/server/tests/vectors.rs @@ -78,10 +78,10 @@ fn rpc_server_spawn(parallel_cpu_threads: bool) { // The server and queue tasks should continue without errors or panics let rpc_server_task_result = rpc_server_task_handle.now_or_never(); - assert!(matches!(rpc_server_task_result, None)); + assert!(rpc_server_task_result.is_none()); let rpc_tx_queue_task_result = rpc_tx_queue_task_handle.now_or_never(); - assert!(matches!(rpc_tx_queue_task_result, None)); + assert!(rpc_tx_queue_task_result.is_none()); }); info!("waiting for RPC server to shut down..."); @@ -184,10 +184,10 @@ fn rpc_server_spawn_unallocated_port(parallel_cpu_threads: bool, do_shutdown: bo } else { // The server and queue tasks should continue without errors or panics let rpc_server_task_result = rpc_server_task_handle.now_or_never(); - assert!(matches!(rpc_server_task_result, None)); + assert!(rpc_server_task_result.is_none()); let rpc_tx_queue_task_result = rpc_tx_queue_task_handle.now_or_never(); - assert!(matches!(rpc_tx_queue_task_result, None)); + assert!(rpc_tx_queue_task_result.is_none()); } }); diff --git a/zebra-state/Cargo.toml b/zebra-state/Cargo.toml index 8caba9ecbc3..b165671832e 100644 --- a/zebra-state/Cargo.toml +++ b/zebra-state/Cargo.toml @@ -52,7 +52,7 @@ tempfile = "3.5.0" thiserror = "1.0.40" rayon = "1.7.0" -tokio = { version = "1.28.2", features = ["sync", "tracing"] } +tokio = { version = "1.28.2", features = ["rt-multi-thread", "sync", "tracing"] } tower = { version = "0.4.13", features = ["buffer", "util"] } tracing = "0.1.37" diff --git a/zebrad/src/components/inbound/tests/fake_peer_set.rs b/zebrad/src/components/inbound/tests/fake_peer_set.rs index 1c0bdd34973..fd30be4fdb1 100644 --- a/zebrad/src/components/inbound/tests/fake_peer_set.rs +++ b/zebrad/src/components/inbound/tests/fake_peer_set.rs @@ -117,13 +117,13 @@ async fn mempool_requests_for_transactions() { let sync_gossip_result = sync_gossip_task_handle.now_or_never(); assert!( - matches!(sync_gossip_result, None), + sync_gossip_result.is_none(), "unexpected error or panic in sync gossip task: {sync_gossip_result:?}", ); let tx_gossip_result = tx_gossip_task_handle.now_or_never(); assert!( - matches!(tx_gossip_result, None), + tx_gossip_result.is_none(), "unexpected error or panic in transaction gossip task: {tx_gossip_result:?}", ); } @@ -208,13 +208,13 @@ async fn mempool_push_transaction() -> Result<(), crate::BoxError> { let sync_gossip_result = sync_gossip_task_handle.now_or_never(); assert!( - matches!(sync_gossip_result, None), + sync_gossip_result.is_none(), "unexpected error or panic in sync gossip task: {sync_gossip_result:?}", ); let tx_gossip_result = tx_gossip_task_handle.now_or_never(); assert!( - matches!(tx_gossip_result, None), + tx_gossip_result.is_none(), "unexpected error or panic in transaction gossip task: {tx_gossip_result:?}", ); @@ -313,13 +313,13 @@ async fn mempool_advertise_transaction_ids() -> Result<(), crate::BoxError> { let sync_gossip_result = sync_gossip_task_handle.now_or_never(); assert!( - matches!(sync_gossip_result, None), + sync_gossip_result.is_none(), "unexpected error or panic in sync gossip task: {sync_gossip_result:?}", ); let tx_gossip_result = tx_gossip_task_handle.now_or_never(); assert!( - matches!(tx_gossip_result, None), + tx_gossip_result.is_none(), "unexpected error or panic in transaction gossip task: {tx_gossip_result:?}", ); @@ -629,13 +629,13 @@ async fn mempool_transaction_expiration() -> Result<(), crate::BoxError> { let sync_gossip_result = sync_gossip_task_handle.now_or_never(); assert!( - matches!(sync_gossip_result, None), + sync_gossip_result.is_none(), "unexpected error or panic in sync gossip task: {sync_gossip_result:?}", ); let tx_gossip_result = tx_gossip_task_handle.now_or_never(); assert!( - matches!(tx_gossip_result, None), + tx_gossip_result.is_none(), "unexpected error or panic in transaction gossip task: {tx_gossip_result:?}", ); @@ -727,13 +727,13 @@ async fn inbound_block_height_lookahead_limit() -> Result<(), crate::BoxError> { let sync_gossip_result = sync_gossip_task_handle.now_or_never(); assert!( - matches!(sync_gossip_result, None), + sync_gossip_result.is_none(), "unexpected error or panic in sync gossip task: {sync_gossip_result:?}", ); let tx_gossip_result = tx_gossip_task_handle.now_or_never(); assert!( - matches!(tx_gossip_result, None), + tx_gossip_result.is_none(), "unexpected error or panic in transaction gossip task: {tx_gossip_result:?}", ); diff --git a/zebrad/src/components/inbound/tests/real_peer_set.rs b/zebrad/src/components/inbound/tests/real_peer_set.rs index 35cfe7345d7..6e734aae1de 100644 --- a/zebrad/src/components/inbound/tests/real_peer_set.rs +++ b/zebrad/src/components/inbound/tests/real_peer_set.rs @@ -108,13 +108,13 @@ async fn inbound_peers_empty_address_book() -> Result<(), crate::BoxError> { let block_gossip_result = block_gossip_task_handle.now_or_never(); assert!( - matches!(block_gossip_result, None), + block_gossip_result.is_none(), "unexpected error or panic in block gossip task: {block_gossip_result:?}", ); let tx_gossip_result = tx_gossip_task_handle.now_or_never(); assert!( - matches!(tx_gossip_result, None), + tx_gossip_result.is_none(), "unexpected error or panic in transaction gossip task: {tx_gossip_result:?}", ); @@ -191,13 +191,13 @@ async fn inbound_block_empty_state_notfound() -> Result<(), crate::BoxError> { let block_gossip_result = block_gossip_task_handle.now_or_never(); assert!( - matches!(block_gossip_result, None), + block_gossip_result.is_none(), "unexpected error or panic in block gossip task: {block_gossip_result:?}", ); let tx_gossip_result = tx_gossip_task_handle.now_or_never(); assert!( - matches!(tx_gossip_result, None), + tx_gossip_result.is_none(), "unexpected error or panic in transaction gossip task: {tx_gossip_result:?}", ); @@ -311,13 +311,13 @@ async fn inbound_tx_empty_state_notfound() -> Result<(), crate::BoxError> { let block_gossip_result = block_gossip_task_handle.now_or_never(); assert!( - matches!(block_gossip_result, None), + block_gossip_result.is_none(), "unexpected error or panic in block gossip task: {block_gossip_result:?}", ); let tx_gossip_result = tx_gossip_task_handle.now_or_never(); assert!( - matches!(tx_gossip_result, None), + tx_gossip_result.is_none(), "unexpected error or panic in transaction gossip task: {tx_gossip_result:?}", ); @@ -461,13 +461,13 @@ async fn outbound_tx_unrelated_response_notfound() -> Result<(), crate::BoxError let block_gossip_result = block_gossip_task_handle.now_or_never(); assert!( - matches!(block_gossip_result, None), + block_gossip_result.is_none(), "unexpected error or panic in block gossip task: {block_gossip_result:?}", ); let tx_gossip_result = tx_gossip_task_handle.now_or_never(); assert!( - matches!(tx_gossip_result, None), + tx_gossip_result.is_none(), "unexpected error or panic in transaction gossip task: {tx_gossip_result:?}", ); @@ -574,13 +574,13 @@ async fn outbound_tx_partial_response_notfound() -> Result<(), crate::BoxError> let block_gossip_result = block_gossip_task_handle.now_or_never(); assert!( - matches!(block_gossip_result, None), + block_gossip_result.is_none(), "unexpected error or panic in block gossip task: {block_gossip_result:?}", ); let tx_gossip_result = tx_gossip_task_handle.now_or_never(); assert!( - matches!(tx_gossip_result, None), + tx_gossip_result.is_none(), "unexpected error or panic in transaction gossip task: {tx_gossip_result:?}", ); diff --git a/zebrad/src/components/sync/tests/vectors.rs b/zebrad/src/components/sync/tests/vectors.rs index f406d3dbfe7..a9155184240 100644 --- a/zebrad/src/components/sync/tests/vectors.rs +++ b/zebrad/src/components/sync/tests/vectors.rs @@ -255,7 +255,7 @@ async fn sync_blocks_ok() -> Result<(), crate::BoxError> { let chain_sync_result = chain_sync_task_handle.now_or_never(); assert!( - matches!(chain_sync_result, None), + chain_sync_result.is_none(), "unexpected error or panic in chain sync task: {chain_sync_result:?}", ); @@ -486,7 +486,7 @@ async fn sync_blocks_duplicate_hashes_ok() -> Result<(), crate::BoxError> { let chain_sync_result = chain_sync_task_handle.now_or_never(); assert!( - matches!(chain_sync_result, None), + chain_sync_result.is_none(), "unexpected error or panic in chain sync task: {chain_sync_result:?}", ); @@ -539,7 +539,7 @@ async fn sync_block_lookahead_drop() -> Result<(), crate::BoxError> { let chain_sync_result = chain_sync_task_handle.now_or_never(); assert!( - matches!(chain_sync_result, None), + chain_sync_result.is_none(), "unexpected error or panic in chain sync task: {chain_sync_result:?}", ); @@ -694,7 +694,7 @@ async fn sync_block_too_high_obtain_tips() -> Result<(), crate::BoxError> { let chain_sync_result = chain_sync_task_handle.now_or_never(); assert!( - matches!(chain_sync_result, None), + chain_sync_result.is_none(), "unexpected error or panic in chain sync task: {chain_sync_result:?}", ); @@ -915,7 +915,7 @@ async fn sync_block_too_high_extend_tips() -> Result<(), crate::BoxError> { let chain_sync_result = chain_sync_task_handle.now_or_never(); assert!( - matches!(chain_sync_result, None), + chain_sync_result.is_none(), "unexpected error or panic in chain sync task: {chain_sync_result:?}", ); From 6d4083010b9d9ba2e9c24592381926811a452575 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 7 Jun 2023 07:49:27 +0000 Subject: [PATCH 053/265] build(deps): bump tj-actions/changed-files from 36.0.17 to 36.0.18 (#6835) Bumps [tj-actions/changed-files](https://github.com/tj-actions/changed-files) from 36.0.17 to 36.0.18. - [Release notes](https://github.com/tj-actions/changed-files/releases) - [Changelog](https://github.com/tj-actions/changed-files/blob/main/HISTORY.md) - [Commits](https://github.com/tj-actions/changed-files/compare/v36.0.17...v36.0.18) --- updated-dependencies: - dependency-name: tj-actions/changed-files dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/lint.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index c2c04a157a6..1125ee01bf7 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -37,7 +37,7 @@ jobs: - name: Rust files id: changed-files-rust - uses: tj-actions/changed-files@v36.0.17 + uses: tj-actions/changed-files@v36.0.18 with: files: | **/*.rs @@ -49,7 +49,7 @@ jobs: - name: Workflow files id: changed-files-workflows - uses: tj-actions/changed-files@v36.0.17 + uses: tj-actions/changed-files@v36.0.18 with: files: | .github/workflows/*.yml From 38206373b4acd68a6386b1fd4a2ad793d6500c86 Mon Sep 17 00:00:00 2001 From: teor Date: Thu, 8 Jun 2023 01:35:32 +1000 Subject: [PATCH 054/265] fix(ci): Add jobs to allow the full sync to finish before the first stable release (#6846) * Delete an unused CI job that was previously partially deleted * Add 2 more jobs to the full sync test * Increase Rust test time: current expected time is 60 hours --- .github/workflows/deploy-gcp-tests.yml | 192 ++++++++++++++++++------- zebrad/tests/common/sync.rs | 2 +- 2 files changed, 139 insertions(+), 55 deletions(-) diff --git a/.github/workflows/deploy-gcp-tests.yml b/.github/workflows/deploy-gcp-tests.yml index 6386a791fb0..6aaf754c34e 100644 --- a/.github/workflows/deploy-gcp-tests.yml +++ b/.github/workflows/deploy-gcp-tests.yml @@ -570,59 +570,6 @@ jobs: ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} \ " - - # follow the logs of the test we just launched, up to Sapling activation (or the test finishing) - # - # If `inputs.is_long_test` is `false`, this job is skipped. - logs-sprout: - name: Log ${{ inputs.test_id }} test (sprout) - # We run exactly one of without-cached-state or with-cached-state, and we always skip the other one. - needs: [ launch-with-cached-state, launch-without-cached-state ] - # If the previous job fails, we still want to show the logs. - if: ${{ !cancelled() && inputs.is_long_test }} - runs-on: ubuntu-latest - permissions: - contents: 'read' - id-token: 'write' - steps: - - uses: actions/checkout@v3.5.2 - with: - persist-credentials: false - fetch-depth: '2' - # We can't use the standard Rust problem matchers on these jobs, - # because they produce a lot of output. - # - # TODO: create a custom matcher config for these specific jobs - #- uses: r7kamura/rust-problem-matchers@v1.3.0 - - - name: Inject slug/short variables - uses: rlespinasse/github-slug-action@v4 - with: - short-length: 7 - - # Install our SSH secret - - name: Install private SSH key - uses: shimataro/ssh-key-action@v2.5.1 - with: - key: ${{ secrets.GCP_SSH_PRIVATE_KEY }} - name: google_compute_engine - known_hosts: unnecessary - - - name: Generate public SSH key - run: ssh-keygen -y -f ~/.ssh/google_compute_engine > ~/.ssh/google_compute_engine.pub - - # Setup gcloud CLI - - name: Authenticate to Google Cloud - id: auth - uses: google-github-actions/auth@v1.1.1 - with: - retries: '3' - workload_identity_provider: '${{ vars.GCP_WIF }}' - service_account: '${{ vars.GCP_DEPLOYMENTS_SA }}' - - - name: Set up Cloud SDK - uses: google-github-actions/setup-gcloud@v1.1.1 - # follow the logs of the test we just launched, up to Canopy activation (or the test finishing) # # If `inputs.is_long_test` is `false`, this job is skipped. @@ -1382,11 +1329,148 @@ jobs: -e 'test result:.*finished in' \ " + # follow the logs of the test we just launched, up to block 2,030,000 or later + # (or the test finishing) + # + # We chose this height because it was about 4.5 hours from the last job, in June 2023. + logs-2030k: + name: Log ${{ inputs.test_id }} test (2030k) + needs: [ logs-1960k ] + # If the previous job fails, we still want to show the logs. + if: ${{ !cancelled() && inputs.is_long_test }} + runs-on: ubuntu-latest + permissions: + contents: 'read' + id-token: 'write' + steps: + - uses: actions/checkout@v3.5.2 + with: + persist-credentials: false + fetch-depth: '2' + + - name: Inject slug/short variables + uses: rlespinasse/github-slug-action@v4 + with: + short-length: 7 + + # Install our SSH secret + - name: Install private SSH key + uses: shimataro/ssh-key-action@v2.5.1 + with: + key: ${{ secrets.GCP_SSH_PRIVATE_KEY }} + name: google_compute_engine + known_hosts: unnecessary + + - name: Generate public SSH key + run: ssh-keygen -y -f ~/.ssh/google_compute_engine > ~/.ssh/google_compute_engine.pub + + # Setup gcloud CLI + - name: Authenticate to Google Cloud + id: auth + uses: google-github-actions/auth@v1.1.1 + with: + retries: '3' + workload_identity_provider: '${{ vars.GCP_WIF }}' + service_account: '${{ vars.GCP_DEPLOYMENTS_SA }}' + + - name: Set up Cloud SDK + uses: google-github-actions/setup-gcloud@v1.1.1 + + # Show recent logs, following until block 2,030,000 (or the test finishes) + - name: Show logs for ${{ inputs.test_id }} test (2030k) + run: | + gcloud compute ssh ${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }} \ + --zone ${{ vars.GCP_ZONE }} \ + --ssh-flag="-o ServerAliveInterval=5" \ + --ssh-flag="-o ConnectionAttempts=20" \ + --ssh-flag="-o ConnectTimeout=5" \ + --command \ + "\ + sudo docker logs \ + --tail all \ + --follow \ + ${{ inputs.test_id }} | \ + tee --output-error=exit /dev/stderr | \ + grep --max-count=1 --extended-regexp --color=always \ + -e 'estimated progress.*current_height.*=.*20[3-9][0-9][0-9][0-9][0-9].*remaining_sync_blocks' \ + -e 'estimated progress.*current_height.*=.*2[1-9][0-9][0-9][0-9][0-9][0-9].*remaining_sync_blocks' \ + -e 'estimated progress.*current_height.*=.*[3-9][0-9][0-9][0-9][0-9][0-9][0-9].*remaining_sync_blocks' \ + -e 'test result:.*finished in' \ + " + + # follow the logs of the test we just launched, up to block 2,100,000 or later + # (or the test finishing) + # + # We chose this height because we guessed it was 4.5 hours from the last job, in June 2023. + logs-2100k: + name: Log ${{ inputs.test_id }} test (2100k) + needs: [ logs-2030k ] + # If the previous job fails, we still want to show the logs. + if: ${{ !cancelled() && inputs.is_long_test }} + runs-on: ubuntu-latest + permissions: + contents: 'read' + id-token: 'write' + steps: + - uses: actions/checkout@v3.5.2 + with: + persist-credentials: false + fetch-depth: '2' + + - name: Inject slug/short variables + uses: rlespinasse/github-slug-action@v4 + with: + short-length: 7 + + # Install our SSH secret + - name: Install private SSH key + uses: shimataro/ssh-key-action@v2.5.1 + with: + key: ${{ secrets.GCP_SSH_PRIVATE_KEY }} + name: google_compute_engine + known_hosts: unnecessary + + - name: Generate public SSH key + run: ssh-keygen -y -f ~/.ssh/google_compute_engine > ~/.ssh/google_compute_engine.pub + + # Setup gcloud CLI + - name: Authenticate to Google Cloud + id: auth + uses: google-github-actions/auth@v1.1.1 + with: + retries: '3' + workload_identity_provider: '${{ vars.GCP_WIF }}' + service_account: '${{ vars.GCP_DEPLOYMENTS_SA }}' + + - name: Set up Cloud SDK + uses: google-github-actions/setup-gcloud@v1.1.1 + + # Show recent logs, following until block 2,100,000 (or the test finishes) + - name: Show logs for ${{ inputs.test_id }} test (2100k) + run: | + gcloud compute ssh ${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }} \ + --zone ${{ vars.GCP_ZONE }} \ + --ssh-flag="-o ServerAliveInterval=5" \ + --ssh-flag="-o ConnectionAttempts=20" \ + --ssh-flag="-o ConnectTimeout=5" \ + --command \ + "\ + sudo docker logs \ + --tail all \ + --follow \ + ${{ inputs.test_id }} | \ + tee --output-error=exit /dev/stderr | \ + grep --max-count=1 --extended-regexp --color=always \ + -e 'estimated progress.*current_height.*=.*2[1-9][0-9][0-9][0-9][0-9][0-9].*remaining_sync_blocks' \ + -e 'estimated progress.*current_height.*=.*[3-9][0-9][0-9][0-9][0-9][0-9][0-9].*remaining_sync_blocks' \ + -e 'test result:.*finished in' \ + " + # follow the logs of the test we just launched, up to the last checkpoint, or the test finishing, # or for lightwalletd tests, about 5 hours into the full lightwalletd sync (block 1880k) logs-checkpoint: name: Log ${{ inputs.test_id }} test (checkpoint) - needs: [ logs-1960k ] + needs: [ logs-2100k ] # If the previous job fails, we still want to show the logs. if: ${{ !cancelled() && inputs.is_long_test }} runs-on: ubuntu-latest diff --git a/zebrad/tests/common/sync.rs b/zebrad/tests/common/sync.rs index ff483827a7a..d7bc91d352c 100644 --- a/zebrad/tests/common/sync.rs +++ b/zebrad/tests/common/sync.rs @@ -83,7 +83,7 @@ pub const FINISH_PARTIAL_SYNC_TIMEOUT: Duration = Duration::from_secs(11 * 60 * /// The maximum time to wait for Zebrad to synchronize up to the chain tip starting from the /// genesis block. -pub const FINISH_FULL_SYNC_TIMEOUT: Duration = Duration::from_secs(58 * 60 * 60); +pub const FINISH_FULL_SYNC_TIMEOUT: Duration = Duration::from_secs(72 * 60 * 60); /// The test sync height where we switch to using the default lookahead limit. /// From 8bd5a9818696abbf04cd9f12583727403eb391c0 Mon Sep 17 00:00:00 2001 From: Deirdre Connolly Date: Wed, 7 Jun 2023 17:10:22 -0400 Subject: [PATCH 055/265] Regenerate lockfile against 38206373b4acd68a6386b1fd4a2ad793d6500c86 (#6844) --- Cargo.lock | 514 +++++++++++++++++++++++++---------------------------- 1 file changed, 246 insertions(+), 268 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 54883a32013..83e86557cce 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -12,7 +12,7 @@ dependencies = [ "arc-swap", "backtrace", "canonical-path", - "clap 4.3.0", + "clap 4.3.2", "color-eyre", "fs-err", "once_cell", @@ -35,8 +35,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "55bfb86e57d13c06e482c570826ddcddcc8f07fab916760e8911141d4fda8b62" dependencies = [ "ident_case", - "proc-macro2 1.0.56", - "quote 1.0.27", + "proc-macro2 1.0.59", + "quote 1.0.28", "syn 1.0.109", "synstructure", ] @@ -83,7 +83,7 @@ version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fcb51a0695d8f838b1ee009b3fbf66bda078cd64590202a864a8f3e8c4315c47" dependencies = [ - "getrandom 0.2.9", + "getrandom 0.2.10", "once_cell", "version_check", ] @@ -95,7 +95,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2c99f64d1e06488f620f932677e24bc6e2897582980441ae90a671415bd7ec2f" dependencies = [ "cfg-if 1.0.0", - "getrandom 0.2.9", + "getrandom 0.2.10", "once_cell", "version_check", ] @@ -111,9 +111,9 @@ dependencies = [ [[package]] name = "aho-corasick" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67fc08ce920c31afb70f013dcce1bfc3a3195de6a228474e45e1f145b36f8d04" +checksum = "43f6cb1bf222025340178f382c426f13757b2960e89779dfcb319c32542a5a41" dependencies = [ "memchr", ] @@ -251,9 +251,9 @@ version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ - "proc-macro2 1.0.56", - "quote 1.0.27", - "syn 2.0.15", + "proc-macro2 1.0.59", + "quote 1.0.28", + "syn 2.0.18", ] [[package]] @@ -262,9 +262,9 @@ version = "0.1.68" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b9ccdd8f2a161be9bd5c023df56f1b2a0bd1d83872ae53b71a84a12c9bf6e842" dependencies = [ - "proc-macro2 1.0.56", - "quote 1.0.27", - "syn 2.0.15", + "proc-macro2 1.0.59", + "quote 1.0.28", + "syn 2.0.18", ] [[package]] @@ -358,9 +358,9 @@ checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" [[package]] name = "base64" -version = "0.21.0" +version = "0.21.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4a4ddaa51a5bc52a6948f74c06d20aaaddb71924eab79b8c97a8c556e942d6a" +checksum = "604178f6c5c21f02dc555784810edfb88d34ac2c73b2eae109655649ee73ce3d" [[package]] name = "base64ct" @@ -417,13 +417,13 @@ dependencies = [ "lazycell", "log", "peeking_take_while", - "prettyplease 0.2.4", - "proc-macro2 1.0.56", - "quote 1.0.27", + "prettyplease 0.2.6", + "proc-macro2 1.0.59", + "quote 1.0.28", "regex", "rustc-hash", "shlex", - "syn 2.0.15", + "syn 2.0.18", "which", ] @@ -564,9 +564,9 @@ dependencies = [ [[package]] name = "bstr" -version = "1.4.0" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3d4260bcc2e8fc9df1eac4919a720effeb63a3f0952f5bf4944adfa18897f09" +checksum = "a246e68bb43f6cd9db24bea052a53e40405417c5fb372e3d1a8a7f770a564ef5" dependencies = [ "memchr", "serde", @@ -574,9 +574,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.12.2" +version = "3.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c6ed94e98ecff0c12dd1b04c15ec0d7d9458ca8fe806cea6f12954efe74c63b" +checksum = "a3e2c3daef883ecc1b5d58c15adae93470a91d425f3532ba1695849656af3fc1" [[package]] name = "byte-slice-cast" @@ -767,9 +767,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.3.0" +version = "4.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93aae7a4192245f70fe75dd9157fc7b4a5bf53e88d30bd4396f7d8f9284d5acc" +checksum = "401a4694d2bf92537b6867d94de48c4842089645fdcdf6c71865b175d836e9c2" dependencies = [ "clap_builder", "clap_derive", @@ -778,9 +778,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.3.0" +version = "4.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f423e341edefb78c9caba2d9c7f7687d0e72e89df3ce3394554754393ac3990" +checksum = "72394f3339a76daf211e57d4bcb374410f3965dcc606dd0e03738c7888766980" dependencies = [ "anstream", "anstyle", @@ -792,14 +792,14 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.3.0" +version = "4.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "191d9573962933b4027f932c600cd252ce27a8ad5979418fe78e43c07996f27b" +checksum = "b8cd2b2a819ad6eec39e8f1d6b53001af1e5469f8c177579cdaeb313115b825f" dependencies = [ "heck 0.4.1", - "proc-macro2 1.0.56", - "quote 1.0.27", - "syn 2.0.15", + "proc-macro2 1.0.59", + "quote 1.0.28", + "syn 2.0.18", ] [[package]] @@ -854,15 +854,15 @@ checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7" [[package]] name = "console" -version = "0.15.5" +version = "0.15.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3d79fbe8970a77e3e34151cc13d3b3e248aa0faaecb9f6091fa07ebefe5ad60" +checksum = "c926e00cc70edefdc64d3a5ff31cc65bb97a3460097762bd23afb4d8145fccf8" dependencies = [ "encode_unicode", "lazy_static", "libc", "unicode-width", - "windows-sys 0.42.0", + "windows-sys 0.45.0", ] [[package]] @@ -950,7 +950,7 @@ dependencies = [ "anes", "cast", "ciborium", - "clap 4.3.0", + "clap 4.3.2", "criterion-plot", "is-terminal", "itertools", @@ -1076,31 +1076,16 @@ dependencies = [ "link-cplusplus", ] -[[package]] -name = "cxx-build" -version = "1.0.94" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12cee708e8962df2aeb38f594aae5d827c022b6460ac71a7a3e2c3c2aae5a07b" -dependencies = [ - "cc", - "codespan-reporting", - "once_cell", - "proc-macro2 1.0.56", - "quote 1.0.27", - "scratch", - "syn 2.0.15", -] - [[package]] name = "cxx-gen" -version = "0.7.94" +version = "0.7.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee165c38de64e6761c2f38b7e9beee0721110f8585165987ef9db2a753ee4176" +checksum = "b677bcf759c79656defee3b0374aeff759122d3fc80edb0b77eeb0fd06e8fd20" dependencies = [ "codespan-reporting", - "proc-macro2 1.0.56", - "quote 1.0.27", - "syn 2.0.15", + "proc-macro2 1.0.59", + "quote 1.0.28", + "syn 2.0.18", ] [[package]] @@ -1115,9 +1100,9 @@ version = "1.0.94" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2345488264226bf682893e25de0769f3360aac9957980ec49361b083ddaa5bc5" dependencies = [ - "proc-macro2 1.0.56", - "quote 1.0.27", - "syn 2.0.15", + "proc-macro2 1.0.59", + "quote 1.0.28", + "syn 2.0.18", ] [[package]] @@ -1148,8 +1133,8 @@ checksum = "859d65a907b6852c9361e3185c862aae7fafd2887876799fa55f5f99dc40d610" dependencies = [ "fnv", "ident_case", - "proc-macro2 1.0.56", - "quote 1.0.27", + "proc-macro2 1.0.59", + "quote 1.0.28", "strsim 0.10.0", "syn 1.0.109", ] @@ -1162,10 +1147,10 @@ checksum = "ab8bfa2e259f8ee1ce5e97824a3c55ec4404a0d772ca7fa96bf19f0752a046eb" dependencies = [ "fnv", "ident_case", - "proc-macro2 1.0.56", - "quote 1.0.27", + "proc-macro2 1.0.59", + "quote 1.0.28", "strsim 0.10.0", - "syn 2.0.15", + "syn 2.0.18", ] [[package]] @@ -1175,7 +1160,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c972679f83bdf9c42bd905396b6c3588a843a17f0f16dfcfa3e2c5d57441835" dependencies = [ "darling_core 0.13.4", - "quote 1.0.27", + "quote 1.0.28", "syn 1.0.109", ] @@ -1186,8 +1171,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "29a358ff9f12ec09c3e61fef9b5a9902623a695a46a917b07f269bff1445611a" dependencies = [ "darling_core 0.20.1", - "quote 1.0.27", - "syn 2.0.15", + "quote 1.0.28", + "syn 2.0.18", ] [[package]] @@ -1211,9 +1196,9 @@ dependencies = [ [[package]] name = "digest" -version = "0.10.6" +version = "0.10.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8168378f4e5023e7218c89c891c0fd8ecdb5e5e4f18cb78f38cf245dd021e76f" +checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" dependencies = [ "block-buffer 0.10.4", "crypto-common", @@ -1256,9 +1241,9 @@ version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" dependencies = [ - "proc-macro2 1.0.56", - "quote 1.0.27", - "syn 2.0.15", + "proc-macro2 1.0.59", + "quote 1.0.28", + "syn 2.0.18", ] [[package]] @@ -1473,9 +1458,9 @@ checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" [[package]] name = "form_urlencoded" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9c384f161156f5260c24a097c56119f9be8c798586aecc13afbcbe7b7e26bf8" +checksum = "a62bc1cf6f830c2ec14a513a9fb124d0a213a629668a4186f329db21fe045652" dependencies = [ "percent-encoding", ] @@ -1488,7 +1473,7 @@ checksum = "26c4b37de5ae15812a764c958297cfc50f5c010438f60c6ce75d11b802abd404" dependencies = [ "cbc", "cipher", - "libm 0.2.6", + "libm 0.2.7", "num-bigint", "num-integer", "num-traits", @@ -1560,9 +1545,9 @@ version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ - "proc-macro2 1.0.56", - "quote 1.0.27", - "syn 2.0.15", + "proc-macro2 1.0.59", + "quote 1.0.28", + "syn 2.0.18", ] [[package]] @@ -1618,9 +1603,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.9" +version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c85e1d9ab2eadba7e5040d4e09cbd6d072b76a557ad64e797c2cb9d4da21d7e4" +checksum = "be4136b2a15dd319360be1c07d9933517ccf0be8f16bf62a3bee4f0d618df427" dependencies = [ "cfg-if 1.0.0", "js-sys", @@ -1637,9 +1622,9 @@ checksum = "ad0a93d233ebf96623465aad4046a8d3aa4da22d4f4beba5388838c8a434bbb4" [[package]] name = "git2" -version = "0.17.1" +version = "0.17.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b7905cdfe33d31a88bb2e8419ddd054451f5432d1da9eaf2ac7804ee1ea12d5" +checksum = "7b989d6a7ca95a362cf2cfc5ad688b3a467be1f87e480b8dad07fee8c79b0044" dependencies = [ "bitflags 1.3.2", "libc", @@ -1681,9 +1666,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.3.18" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17f8a914c2987b688368b5138aa05321db91f4090cf26118185672ad588bce21" +checksum = "d357c7ae988e7d2182f7d7871d0b963962420b0678b0997ce7de72001aeab782" dependencies = [ "bytes", "fnv", @@ -1841,7 +1826,7 @@ version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e" dependencies = [ - "digest 0.10.6", + "digest 0.10.7", ] [[package]] @@ -1980,9 +1965,9 @@ dependencies = [ [[package]] name = "iana-time-zone" -version = "0.1.56" +version = "0.1.57" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0722cd7114b7de04316e7ea5456a0bbb20e4adb46fd27a3697adb812cff0f37c" +checksum = "2fad5b825842d2b38bd206f3e81d6957625fd7f0a361e345c30e01a0ae2dd613" dependencies = [ "android_system_properties", "core-foundation-sys", @@ -1994,12 +1979,11 @@ dependencies = [ [[package]] name = "iana-time-zone-haiku" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0703ae284fc167426161c2e3f1da3ea71d94b21bedbcc9494e92b28e334e3dca" +checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f" dependencies = [ - "cxx", - "cxx-build", + "cc", ] [[package]] @@ -2010,9 +1994,9 @@ checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" [[package]] name = "idna" -version = "0.3.0" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e14ddfc70884202db2244c223200c204c2bda1bc6e0998d11b5e024d657209e6" +checksum = "7d20d6b07bfbc108882d88ed8e37d39636dcc260e15e30c45e6ba089610b917c" dependencies = [ "unicode-bidi", "unicode-normalization", @@ -2033,8 +2017,8 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "11d7a9f6330b71fea57921c9b61c47ee6e84f72d394754eff6163ae67e7395eb" dependencies = [ - "proc-macro2 1.0.56", - "quote 1.0.27", + "proc-macro2 1.0.59", + "quote 1.0.28", "syn 1.0.109", ] @@ -2073,7 +2057,7 @@ dependencies = [ "console", "instant", "number_prefix", - "portable-atomic 1.3.2", + "portable-atomic 1.3.3", "unicode-width", ] @@ -2131,9 +2115,9 @@ dependencies = [ [[package]] name = "io-lifetimes" -version = "1.0.10" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c66c74d2ae7e79a5a8f7ac924adbe38ee42a859c6539ad869eb51f0b52dc220" +checksum = "eae7b9aee968036d54dce06cebaefd919e4472e753296daccd6d344e3e2df0c2" dependencies = [ "hermit-abi 0.3.1", "libc", @@ -2184,9 +2168,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.62" +version = "0.3.63" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68c16e1bfd491478ab155fd8b4896b86f9ede344949b641e61501e07c2b8b4d5" +checksum = "2f37a4a5928311ac501dee68b3c7613a1037d0edb30c8e5427bd832d55d1b790" dependencies = [ "wasm-bindgen", ] @@ -2213,8 +2197,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b939a78fa820cdfcb7ee7484466746a7377760970f6f9c6fe19f9edcc8a38d2" dependencies = [ "proc-macro-crate 0.1.5", - "proc-macro2 1.0.56", - "quote 1.0.27", + "proc-macro2 1.0.59", + "quote 1.0.28", "syn 1.0.109", ] @@ -2283,15 +2267,15 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.144" +version = "0.2.146" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b00cc1c228a6782d0f076e7b232802e0c5689d41bb5df366f2a6b6621cfdfe1" +checksum = "f92be4933c13fd498862a9e02a3055f8a8d9c039ce33db97306fd5a6caa7f29b" [[package]] name = "libgit2-sys" -version = "0.15.1+1.6.4" +version = "0.15.2+1.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb4577bde8cdfc7d6a2a4bcb7b049598597de33ffd337276e9c7db6cd4a2cee7" +checksum = "a80df2e11fb4a61f4ba2ab42dbe7f74468da143f1a75c74e11dee7c813f694fa" dependencies = [ "cc", "libc", @@ -2317,9 +2301,9 @@ checksum = "7fc7aa29613bd6a620df431842069224d8bc9011086b1db4c0e0cd47fa03ec9a" [[package]] name = "libm" -version = "0.2.6" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "348108ab3fba42ec82ff6e9564fc4ca0247bdccdc68dd8af9764bbc79c3c8ffb" +checksum = "f7012b1bbb0719e1097c47611d3898568c546d597c2e74d66f6087edd5233ff4" [[package]] name = "librocksdb-sys" @@ -2365,15 +2349,15 @@ checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" [[package]] name = "linux-raw-sys" -version = "0.3.7" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ece97ea872ece730aed82664c424eb4c8291e1ff2480247ccf7409044bc6479f" +checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519" [[package]] name = "lock_api" -version = "0.4.9" +version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "435011366fe56583b16cf956f9df0095b405b82d76425bc8981c0e22e60ec4df" +checksum = "c1cc9717a20b1bb222f333e6a92fd32f7d8a18ddc5a3191a11af45dcbf4dcd16" dependencies = [ "autocfg", "scopeguard", @@ -2478,7 +2462,7 @@ checksum = "aa8ebbd1a9e57bbab77b9facae7f5136aea44c356943bf9a198f647da64285d6" dependencies = [ "ahash 0.8.3", "metrics-macros 0.7.0", - "portable-atomic 1.3.2", + "portable-atomic 1.3.3", ] [[package]] @@ -2487,7 +2471,7 @@ version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a4964177ddfdab1e3a2b37aec7cf320e14169abb0ed73999f558136409178d5" dependencies = [ - "base64 0.21.0", + "base64 0.21.2", "hyper", "indexmap", "ipnet", @@ -2504,8 +2488,8 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "731f8ecebd9f3a4aa847dfe75455e4757a45da40a7793d2f0b1f9b6ed18b23f3" dependencies = [ - "proc-macro2 1.0.56", - "quote 1.0.27", + "proc-macro2 1.0.59", + "quote 1.0.28", "syn 1.0.109", ] @@ -2515,9 +2499,9 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ddece26afd34c31585c74a4db0630c376df271c285d682d1e55012197830b6df" dependencies = [ - "proc-macro2 1.0.56", - "quote 1.0.27", - "syn 2.0.15", + "proc-macro2 1.0.59", + "quote 1.0.28", + "syn 2.0.18", ] [[package]] @@ -2567,9 +2551,9 @@ dependencies = [ [[package]] name = "minreq" -version = "2.7.0" +version = "2.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41979ac2a5aa373c6e294b4a67fbe5e428e91a4cd0524376681f2bc6d872399b" +checksum = "3de406eeb24aba36ed3829532fa01649129677186b44a49debec0ec574ca7da7" dependencies = [ "log", "once_cell", @@ -2580,14 +2564,13 @@ dependencies = [ [[package]] name = "mio" -version = "0.8.6" +version = "0.8.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b9d9a46eff5b4ff64b45a9e316a6d1e0bc719ef429cbec4dc630684212bfdf9" +checksum = "927a765cd3fc26206e66b296465fa9d3e5ab003e651c1b3c060e7956d96b19d2" dependencies = [ "libc", - "log", "wasi 0.11.0+wasi-snapshot-preview1", - "windows-sys 0.45.0", + "windows-sys 0.48.0", ] [[package]] @@ -2608,7 +2591,7 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a51313c5820b0b02bd422f4b44776fbf47961755c74ce64afc73bfad10226c3" dependencies = [ - "getrandom 0.2.9", + "getrandom 0.2.10", ] [[package]] @@ -2704,7 +2687,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "578ede34cf02f8924ab9447f50c28075b4d3e5b269972345e7e0372b38c6cdcd" dependencies = [ "autocfg", - "libm 0.2.6", + "libm 0.2.7", ] [[package]] @@ -2725,9 +2708,9 @@ checksum = "830b246a0e5f20af87141b25c173cd1b609bd7779a4617d6ec582abaf90870f3" [[package]] name = "object" -version = "0.30.3" +version = "0.30.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea86265d3d3dcb6a27fc51bd29a4bf387fae9d2986b823079d4986af253eb439" +checksum = "03b4680b86d9cfafba8fc491dc9b6df26b68cf40e9e6cd73909194759a63c385" dependencies = [ "memchr", ] @@ -2752,9 +2735,9 @@ checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" [[package]] name = "openssl" -version = "0.10.52" +version = "0.10.54" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01b8574602df80f7b85fdfc5392fa884a4e3b3f4f35402c070ab34c3d3f78d56" +checksum = "69b3f656a17a6cbc115b5c7a40c616947d213ba182135b014d6051b73ab6f019" dependencies = [ "bitflags 1.3.2", "cfg-if 1.0.0", @@ -2771,9 +2754,9 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ - "proc-macro2 1.0.56", - "quote 1.0.27", - "syn 2.0.15", + "proc-macro2 1.0.59", + "quote 1.0.28", + "syn 2.0.18", ] [[package]] @@ -2784,9 +2767,9 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-sys" -version = "0.9.87" +version = "0.9.88" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e17f59264b2809d77ae94f0e1ebabc434773f370d6ca667bd223ea10e06cc7e" +checksum = "c2ce0f250f34a308dcfdbb351f511359857d4ed2134ba715a4eadd46e1ffd617" dependencies = [ "cc", "libc", @@ -2901,8 +2884,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "86b26a931f824dd4eca30b3e43bb4f31cd5f0d3a403c5f5ff27106b805bfde7b" dependencies = [ "proc-macro-crate 1.3.1", - "proc-macro2 1.0.56", - "quote 1.0.27", + "proc-macro2 1.0.59", + "quote 1.0.28", "syn 1.0.109", ] @@ -2924,7 +2907,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" dependencies = [ "lock_api", - "parking_lot_core 0.9.7", + "parking_lot_core 0.9.8", ] [[package]] @@ -2943,15 +2926,15 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.9.7" +version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9069cbb9f99e3a5083476ccb29ceb1de18b9118cafa53e90c9551235de2b9521" +checksum = "93f00c865fe7cabf650081affecd3871070f26767e7b2070a3ffae14c654b447" dependencies = [ "cfg-if 1.0.0", "libc", - "redox_syscall 0.2.16", + "redox_syscall 0.3.5", "smallvec", - "windows-sys 0.45.0", + "windows-targets 0.48.0", ] [[package]] @@ -2986,7 +2969,7 @@ version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "271779f35b581956db91a3e55737327a03aa051e90b1c47aeb189508533adfd7" dependencies = [ - "digest 0.10.6", + "digest 0.10.7", "password-hash", ] @@ -2998,9 +2981,9 @@ checksum = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099" [[package]] name = "percent-encoding" -version = "2.2.0" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "478c572c3d73181ff3c2539045f6eb99e5491218eae919370993b890cdbdd98e" +checksum = "9b2a4787296e9989611394c33f193f676704af1686e70b8f8033ab5ba9a35a94" [[package]] name = "pest" @@ -3030,9 +3013,9 @@ checksum = "6c435bf1076437b851ebc8edc3a18442796b30f1728ffea6262d59bbe28b077e" dependencies = [ "pest", "pest_meta", - "proc-macro2 1.0.56", - "quote 1.0.27", - "syn 2.0.15", + "proc-macro2 1.0.59", + "quote 1.0.28", + "syn 2.0.18", ] [[package]] @@ -3071,9 +3054,9 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "39407670928234ebc5e6e580247dd567ad73a3578460c5990f9503df207e8f07" dependencies = [ - "proc-macro2 1.0.56", - "quote 1.0.27", - "syn 2.0.15", + "proc-macro2 1.0.59", + "quote 1.0.28", + "syn 2.0.18", ] [[package]] @@ -3145,14 +3128,14 @@ version = "0.3.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e30165d31df606f5726b090ec7592c308a0eaf61721ff64c9a3018e344a8753e" dependencies = [ - "portable-atomic 1.3.2", + "portable-atomic 1.3.3", ] [[package]] name = "portable-atomic" -version = "1.3.2" +version = "1.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc59d1bcc64fc5d021d67521f818db868368028108d37f0e98d74e33f68297b5" +checksum = "767eb9f07d4a5ebcb39bbf2d452058a93c011373abf6832e24194a1c3f004794" [[package]] name = "ppv-lite86" @@ -3166,18 +3149,18 @@ version = "0.1.25" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c8646e95016a7a6c4adea95bafa8a16baab64b583356217f2c85db4a39d9a86" dependencies = [ - "proc-macro2 1.0.56", + "proc-macro2 1.0.59", "syn 1.0.109", ] [[package]] name = "prettyplease" -version = "0.2.4" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ceca8aaf45b5c46ec7ed39fff75f57290368c1846d33d24a122ca81416ab058" +checksum = "3b69d39aab54d069e7f2fe8cb970493e7834601ca2d8c65fd7bbd183578080d1" dependencies = [ - "proc-macro2 1.0.56", - "syn 2.0.15", + "proc-macro2 1.0.59", + "syn 2.0.18", ] [[package]] @@ -3217,8 +3200,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" dependencies = [ "proc-macro-error-attr", - "proc-macro2 1.0.56", - "quote 1.0.27", + "proc-macro2 1.0.59", + "quote 1.0.28", "syn 1.0.109", "version_check", ] @@ -3229,8 +3212,8 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" dependencies = [ - "proc-macro2 1.0.56", - "quote 1.0.27", + "proc-macro2 1.0.59", + "quote 1.0.28", "version_check", ] @@ -3245,9 +3228,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.56" +version = "1.0.59" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b63bdb0cd06f1f4dedf69b254734f9b45af66e4a031e42a7480257d9898b435" +checksum = "6aeca18b86b413c660b781aa319e4e2648a3e6f9eadc9b47e9038e6fe9f3451b" dependencies = [ "unicode-ident", ] @@ -3323,8 +3306,8 @@ checksum = "e5d2d8d10f3c6ded6da8b05b5fb3b8a5082514344d56c9f871412d29b4e075b4" dependencies = [ "anyhow", "itertools", - "proc-macro2 1.0.56", - "quote 1.0.27", + "proc-macro2 1.0.59", + "quote 1.0.28", "syn 1.0.109", ] @@ -3339,9 +3322,9 @@ dependencies = [ [[package]] name = "quanta" -version = "0.11.0" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8cc73c42f9314c4bdce450c77e6f09ecbddefbeddb1b5979ded332a3913ded33" +checksum = "a17e662a7a8291a865152364c20c7abc5e60486ab2001e8ec10b24862de0b9ab" dependencies = [ "crossbeam-utils", "libc", @@ -3386,8 +3369,8 @@ version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "608c156fd8e97febc07dc9c2e2c80bf74cfc6ef26893eae3daf8bc2bc94a4b7f" dependencies = [ - "proc-macro2 1.0.56", - "quote 1.0.27", + "proc-macro2 1.0.59", + "quote 1.0.28", "syn 1.0.109", ] @@ -3402,11 +3385,11 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.27" +version = "1.0.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f4f29d145265ec1c483c7c654450edde0bfe043d3938d6972630663356d9500" +checksum = "1b9ab9c7eadfd8df19006f1cf1a4aed13540ed5cbc047010ece5826e10825488" dependencies = [ - "proc-macro2 1.0.56", + "proc-macro2 1.0.59", ] [[package]] @@ -3474,7 +3457,7 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom 0.2.9", + "getrandom 0.2.10", ] [[package]] @@ -3581,7 +3564,7 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b033d837a7cf162d7993aded9304e30a83213c648b6e389db233191f891e5c2b" dependencies = [ - "getrandom 0.2.9", + "getrandom 0.2.10", "redox_syscall 0.2.16", "thiserror", ] @@ -3592,7 +3575,7 @@ version = "1.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d0ab3ca65655bb1e41f2a8c8cd662eb4fb035e67c3f78da1d61dffe89d07300f" dependencies = [ - "aho-corasick 1.0.1", + "aho-corasick 1.0.2", "memchr", "regex-syntax 0.7.2", ] @@ -3625,7 +3608,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cde824a14b7c14f85caff81225f411faacc04a2013f41670f41443742b1c1c55" dependencies = [ "async-compression", - "base64 0.21.0", + "base64 0.21.2", "bytes", "encoding_rs", "futures-core", @@ -3692,7 +3675,7 @@ version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bd124222d17ad93a644ed9d011a40f4fb64aa54275c08cc216524a9ea82fb09f" dependencies = [ - "digest 0.10.6", + "digest 0.10.7", ] [[package]] @@ -3805,7 +3788,7 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d194b56d58803a43635bdc398cd17e383d6f71f9182b9a192c127ca42494a59b" dependencies = [ - "base64 0.21.0", + "base64 0.21.2", ] [[package]] @@ -3866,12 +3849,6 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" -[[package]] -name = "scratch" -version = "1.0.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1792db035ce95be60c3f8853017b3999209281c24e2ba5bc8e59bf97a0c590c1" - [[package]] name = "sct" version = "0.7.0" @@ -3913,9 +3890,9 @@ dependencies = [ [[package]] name = "security-framework" -version = "2.8.2" +version = "2.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a332be01508d814fed64bf28f798a146d73792121129962fdf335bb3c49a4254" +checksum = "1fc758eb7bffce5b308734e9b0c1468893cae9ff70ebf13e7090be8dcbcc83a8" dependencies = [ "bitflags 1.3.2", "core-foundation", @@ -3926,9 +3903,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.8.0" +version = "2.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31c9bb296072e961fcbd8853511dd39c2d8be2deb1e17c6860b1d30732b323b4" +checksum = "f51d0c0d83bec45f16480d0ce0058397a69e48fcdc52d1dc8855fb68acbd31a7" dependencies = [ "core-foundation-sys", "libc", @@ -4034,7 +4011,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c56f616602a3b282bf4b4e8e5b4d10bcf9412a987df91c592b95a1f6ef1ee43" dependencies = [ "debugid", - "getrandom 0.2.9", + "getrandom 0.2.10", "hex", "serde", "serde_json", @@ -4068,9 +4045,9 @@ version = "1.0.163" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8c805777e3930c8883389c602315a24224bcc738b63905ef87cd1420353ea93e" dependencies = [ - "proc-macro2 1.0.56", - "quote 1.0.27", - "syn 2.0.15", + "proc-macro2 1.0.59", + "quote 1.0.28", + "syn 2.0.18", ] [[package]] @@ -4122,7 +4099,7 @@ version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9f02d8aa6e3c385bf084924f660ce2a3a6bd333ba55b35e8590b321f35d88513" dependencies = [ - "base64 0.21.0", + "base64 0.21.2", "chrono", "hex", "indexmap", @@ -4139,8 +4116,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e182d6ec6f05393cc0e5ed1bf81ad6db3a8feedf8ee515ecdd369809bcce8082" dependencies = [ "darling 0.13.4", - "proc-macro2 1.0.56", - "quote 1.0.27", + "proc-macro2 1.0.59", + "quote 1.0.28", "syn 1.0.109", ] @@ -4151,9 +4128,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "edc7d5d3932fb12ce722ee5e64dd38c504efba37567f0c402f6ca728c3b8b070" dependencies = [ "darling 0.20.1", - "proc-macro2 1.0.56", - "quote 1.0.27", - "syn 2.0.15", + "proc-macro2 1.0.59", + "quote 1.0.28", + "syn 2.0.18", ] [[package]] @@ -4177,7 +4154,7 @@ checksum = "82e6b795fe2e3b1e845bafcb27aa35405c4d47cdfc92af5fc8d3002f76cebdc0" dependencies = [ "cfg-if 1.0.0", "cpufeatures", - "digest 0.10.6", + "digest 0.10.7", ] [[package]] @@ -4258,8 +4235,8 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5bdfb59103e43a0f99a346b57860d50f2138a7008d08acd964e9ac0fef3ae9a5" dependencies = [ - "proc-macro2 1.0.56", - "quote 1.0.27", + "proc-macro2 1.0.59", + "quote 1.0.28", "syn 1.0.109", ] @@ -4321,8 +4298,8 @@ checksum = "dcb5ae327f9cc13b68763b5749770cb9e048a99bd9dfdfa58d0cf05d5f64afe0" dependencies = [ "heck 0.3.3", "proc-macro-error", - "proc-macro2 1.0.56", - "quote 1.0.27", + "proc-macro2 1.0.59", + "quote 1.0.28", "syn 1.0.109", ] @@ -4349,19 +4326,19 @@ version = "1.0.109" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" dependencies = [ - "proc-macro2 1.0.56", - "quote 1.0.27", + "proc-macro2 1.0.59", + "quote 1.0.28", "unicode-ident", ] [[package]] name = "syn" -version = "2.0.15" +version = "2.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a34fcf3e8b60f57e6a14301a2e916d323af98b0ea63c599441eec8558660c822" +checksum = "32d41677bcbe24c20c52e7c70b0d8db04134c5d1066bf98662e2871ad200ea3e" dependencies = [ - "proc-macro2 1.0.56", - "quote 1.0.27", + "proc-macro2 1.0.59", + "quote 1.0.28", "unicode-ident", ] @@ -4377,8 +4354,8 @@ version = "0.12.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f36bdaa60a83aca3921b5259d5400cbf5e90fc51931376a9bd4a0eb79aa7210f" dependencies = [ - "proc-macro2 1.0.56", - "quote 1.0.27", + "proc-macro2 1.0.59", + "quote 1.0.28", "syn 1.0.109", "unicode-xid 0.2.4", ] @@ -4391,15 +4368,16 @@ checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" [[package]] name = "tempfile" -version = "3.5.0" +version = "3.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9fbec84f381d5795b08656e4912bec604d162bff9291d6189a78f4c8ab87998" +checksum = "31c0432476357e58790aaa47a8efb0c5138f137343f3b5f23bd36a27e3b0a6d6" dependencies = [ + "autocfg", "cfg-if 1.0.0", "fastrand", "redox_syscall 0.3.5", "rustix", - "windows-sys 0.45.0", + "windows-sys 0.48.0", ] [[package]] @@ -4435,9 +4413,9 @@ version = "1.0.40" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f9456a42c5b0d803c8cd86e73dd7cc9edd429499f37a3550d286d5e86720569f" dependencies = [ - "proc-macro2 1.0.56", - "quote 1.0.27", - "syn 2.0.15", + "proc-macro2 1.0.59", + "quote 1.0.28", + "syn 2.0.18", ] [[package]] @@ -4538,9 +4516,9 @@ version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ - "proc-macro2 1.0.56", - "quote 1.0.27", - "syn 2.0.15", + "proc-macro2 1.0.59", + "quote 1.0.28", + "syn 2.0.18", ] [[package]] @@ -4648,9 +4626,9 @@ dependencies = [ [[package]] name = "toml_edit" -version = "0.19.9" +version = "0.19.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92d964908cec0d030b812013af25a0e57fddfadb1e066ecc6681d86253129d4f" +checksum = "2380d56e8670370eee6566b0bfd4265f65b3f432e8c6d85623f728d4fa31f739" dependencies = [ "indexmap", "serde", @@ -4667,7 +4645,7 @@ checksum = "3082666a3a6433f7f511c7192923fa1fe07c69332d3c6a2e6bb040b569199d5a" dependencies = [ "async-trait", "axum", - "base64 0.21.0", + "base64 0.21.2", "bytes", "futures-core", "futures-util", @@ -4694,9 +4672,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a6fdaae4c2c638bb70fe42803a26fbd6fc6ac8c72f5c59f67ecc2a2dcabf4b07" dependencies = [ "prettyplease 0.1.25", - "proc-macro2 1.0.56", + "proc-macro2 1.0.59", "prost-build", - "quote 1.0.27", + "quote 1.0.28", "syn 1.0.109", ] @@ -4813,16 +4791,16 @@ version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0f57e3ca2a01450b1a921183a9c9cbfda207fd822cef4ccb00a65402cbba7a74" dependencies = [ - "proc-macro2 1.0.56", - "quote 1.0.27", - "syn 2.0.15", + "proc-macro2 1.0.59", + "quote 1.0.28", + "syn 2.0.18", ] [[package]] name = "tracing-core" -version = "0.1.30" +version = "0.1.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24eb03ba0eab1fd845050058ce5e616558e8f8d8fca633e6b163fe25c797213a" +checksum = "0955b8137a1df6f1a2e9a37d8a6656291ff0297c1a97c24e0d8425fe2312f79a" dependencies = [ "once_cell", "valuable", @@ -4918,7 +4896,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "258bc1c4f8e2e73a977812ab339d503e6feeb92700f6d07a6de4d321522d5c08" dependencies = [ "lazy_static", - "quote 1.0.27", + "quote 1.0.28", "syn 1.0.109", ] @@ -4984,9 +4962,9 @@ checksum = "92888ba5573ff080736b3648696b70cafad7d250551175acbaa4e0385b3e1460" [[package]] name = "unicode-ident" -version = "1.0.8" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5464a87b239f13a63a501f2701565754bae92d243d4bb7eb12f6d57d2269bf4" +checksum = "b15811caf2415fb889178633e7724bad2509101cde276048e013b9def5e51fa0" [[package]] name = "unicode-normalization" @@ -5023,9 +5001,9 @@ checksum = "f962df74c8c05a667b5ee8bcf162993134c104e96440b663c8daa176dc772d8c" [[package]] name = "universal-hash" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d3160b73c9a19f7e2939a2fdad446c57c1bbbbf4d919d3213ff1267a580d8b5" +checksum = "fc1de2c688dc15305988b563c3854064043356019f97a4b46276fe734c4f07ea" dependencies = [ "crypto-common", "subtle", @@ -5054,9 +5032,9 @@ dependencies = [ [[package]] name = "url" -version = "2.3.1" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d68c799ae75762b8c3fe375feb6600ef5602c883c5d21eb51c09f22b83c4643" +checksum = "50bff7831e19200a85b17131d085c25d7811bc4e186efdaf54bbd132994a88cb" dependencies = [ "form_urlencoded", "idna", @@ -5072,11 +5050,11 @@ checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a" [[package]] name = "uuid" -version = "1.3.2" +version = "1.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4dad5567ad0cf5b760e5665964bec1b47dfd077ba8a2544b513f3556d3d239a2" +checksum = "345444e32442451b267fc254ae85a209c64be56d2890e601a0c37ff0c3c5ecd2" dependencies = [ - "getrandom 0.2.9", + "getrandom 0.2.10", "serde", ] @@ -5166,9 +5144,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.85" +version = "0.2.86" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b6cb788c4e39112fbe1822277ef6fb3c55cd86b95cb3d3c4c1c9597e4ac74b4" +checksum = "5bba0e8cb82ba49ff4e229459ff22a191bbe9a1cb3a341610c9c33efc27ddf73" dependencies = [ "cfg-if 1.0.0", "wasm-bindgen-macro", @@ -5176,24 +5154,24 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.85" +version = "0.2.86" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35e522ed4105a9d626d885b35d62501b30d9666283a5c8be12c14a8bdafe7822" +checksum = "19b04bc93f9d6bdee709f6bd2118f57dd6679cf1176a1af464fca3ab0d66d8fb" dependencies = [ "bumpalo", "log", "once_cell", - "proc-macro2 1.0.56", - "quote 1.0.27", - "syn 2.0.15", + "proc-macro2 1.0.59", + "quote 1.0.28", + "syn 2.0.18", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.35" +version = "0.4.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "083abe15c5d88556b77bdf7aef403625be9e327ad37c62c4e4129af740168163" +checksum = "2d1985d03709c53167ce907ff394f5316aa22cb4e12761295c5dc57dacb6297e" dependencies = [ "cfg-if 1.0.0", "js-sys", @@ -5203,38 +5181,38 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.85" +version = "0.2.86" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "358a79a0cb89d21db8120cbfb91392335913e4890665b1a7981d9e956903b434" +checksum = "14d6b024f1a526bb0234f52840389927257beb670610081360e5a03c5df9c258" dependencies = [ - "quote 1.0.27", + "quote 1.0.28", "wasm-bindgen-macro-support", ] [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.85" +version = "0.2.86" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4783ce29f09b9d93134d41297aded3a712b7b979e9c6f28c32cb88c973a94869" +checksum = "e128beba882dd1eb6200e1dc92ae6c5dbaa4311aa7bb211ca035779e5efc39f8" dependencies = [ - "proc-macro2 1.0.56", - "quote 1.0.27", - "syn 2.0.15", + "proc-macro2 1.0.59", + "quote 1.0.28", + "syn 2.0.18", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.85" +version = "0.2.86" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a901d592cafaa4d711bc324edfaff879ac700b19c3dfd60058d2b445be2691eb" +checksum = "ed9d5b4305409d1fc9482fee2d7f9bcbf24b3972bf59817ef757e23982242a93" [[package]] name = "web-sys" -version = "0.3.62" +version = "0.3.63" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16b5f940c7edfdc6d12126d98c9ef4d1b3d470011c47c76a6581df47ad9ba721" +checksum = "3bdd9ef4e984da1187bf8110c5cf5b845fbc87a23602cdf912386a76fcd3a7c2" dependencies = [ "js-sys", "wasm-bindgen", @@ -5939,7 +5917,7 @@ dependencies = [ "abscissa_core", "atty", "chrono", - "clap 4.3.0", + "clap 4.3.2", "color-eyre", "console-subscriber", "dirs", @@ -6012,7 +5990,7 @@ version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ - "proc-macro2 1.0.56", - "quote 1.0.27", - "syn 2.0.15", + "proc-macro2 1.0.59", + "quote 1.0.28", + "syn 2.0.18", ] From 9959a6ce04102e240d3396d7805417e90f8a4961 Mon Sep 17 00:00:00 2001 From: Deirdre Connolly Date: Wed, 7 Jun 2023 23:44:28 -0400 Subject: [PATCH 056/265] Release 1.0.0-rc.9 (#6821) * Bump semvers * Update zebra-utils/README.md * Updated mainnet checkpoints against commit b7029b8 * Add testnet checkpoints from b7029b8 * Bump zebrad rust-version to 1.70 * rust-version 1.68 Co-authored-by: teor * Add CHANGELOG for 1.0.0-rc.9 * Bump estimated release height to within june 7th 2023 utc-4 * Add #6801 to CHANGELOG in anticipation * Update CHANGELOG.md Co-authored-by: teor * Update CHANGELOG.md Co-authored-by: teor * Update CHANGELOG.md Co-authored-by: teor * Update CHANGELOG.md Co-authored-by: teor * Update CHANGELOG.md Co-authored-by: teor * Update breaking changes in 1.0.0-rc.9 changelog * changelog: move #6801 to Fix * Update CHANGELOG.md Co-authored-by: teor * Include #6832 in the changelog * Add missing changes to changelog * Remove #6801 from known issues in the README * Use the latest bug template link --------- Co-authored-by: teor --- CHANGELOG.md | 39 +++- Cargo.lock | 24 +- README.md | 5 +- book/src/user/docker.md | 4 +- tower-batch/Cargo.toml | 2 +- tower-fallback/Cargo.toml | 2 +- zebra-chain/Cargo.toml | 2 +- zebra-consensus/Cargo.toml | 2 +- .../src/checkpoint/main-checkpoints.txt | 206 ++++++++++++++++++ .../src/checkpoint/test-checkpoints.txt | 103 +++++++++ zebra-network/Cargo.toml | 2 +- zebra-node-services/Cargo.toml | 2 +- zebra-rpc/Cargo.toml | 2 +- zebra-script/Cargo.toml | 2 +- zebra-state/Cargo.toml | 2 +- zebra-test/Cargo.toml | 2 +- zebra-utils/Cargo.toml | 2 +- zebrad/Cargo.toml | 2 +- zebrad/src/components/sync/end_of_support.rs | 2 +- 19 files changed, 374 insertions(+), 33 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 960a13e4ad6..fb74c251214 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,16 +5,49 @@ All notable changes to Zebra are documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org). -## [Zebra 1.0.0-rc.9](https://github.com/ZcashFoundation/zebra/releases/tag/v1.0.0-rc.9) - XXXX-XX-XX -In this release ... +## [Zebra 1.0.0-rc.9](https://github.com/ZcashFoundation/zebra/releases/tag/v1.0.0-rc.9) - 2023-06-07 + +This release continues to address audit findings. It fixes multiple network protocol and RPC bugs, +and reduces sensitive information logging. + +This is the last release candidate before the 1.0.0 stable release. Please report bugs to [the Zebra GitHub repository](https://github.com/ZcashFoundation/zebra/issues/new?assignees=&labels=C-bug%2C+S-needs-triage&projects=&template=bug_report.yml&title=) ### Breaking Changes - The version subcommand has been replaced with a --version/-V flag ([#6801](https://github.com/ZcashFoundation/zebra/pull/6801)) + +### Security + +- Stop logging peer IP addresses, to protect user privacy ([#6662](https://github.com/ZcashFoundation/zebra/pull/6662)) +- Stop logging potentially sensitive user information from unmined transactions ([#6616](https://github.com/ZcashFoundation/zebra/pull/6616)) +- Rate-limit MetaAddrChange::Responded from peers ([#6738](https://github.com/ZcashFoundation/zebra/pull/6738)) +- Ignore out of order Address Book changes, unless they are concurrent ([#6717](https://github.com/ZcashFoundation/zebra/pull/6717)) +- Limit blocks and transactions sent in response to a single request ([#6679](https://github.com/ZcashFoundation/zebra/pull/6679)) +- Rate-limit and size-limit peer transaction ID messages ([#6625](https://github.com/ZcashFoundation/zebra/pull/6625)) +- Stop panicking on state RPC or block requests with very large heights ([#6699](https://github.com/ZcashFoundation/zebra/pull/6699)) +- Try harder to drop connections when they shut down, Credit: Ziggurat Team ([#6832](https://github.com/ZcashFoundation/zebra/pull/6832)) +- Randomly drop connections when inbound service is overloaded ([#6790](https://github.com/ZcashFoundation/zebra/pull/6790)) + +### Added + +- Report compiler version and Zebra features when starting Zebra ([#6606](https://github.com/ZcashFoundation/zebra/pull/6606)) +- Update Zebra book summary to include supported platforms, platform tier policy, and versioning ([#6683](https://github.com/ZcashFoundation/zebra/pull/6683)) +- Improve zebrad's help output, credit to @Rqnsom ([#6801](https://github.com/ZcashFoundation/zebra/pull/6801)) +- Cache a list of useful peers on disk ([#6739](https://github.com/ZcashFoundation/zebra/pull/6739)) +- Make the first stable release forward-compatible with planned state changes ([#6813](https://github.com/ZcashFoundation/zebra/pull/6813)) + +### Fixed + +- Limit RPC failure log length, add details to RPC failure logs ([#6754](https://github.com/ZcashFoundation/zebra/pull/6754)) +- Allow inbound connections to Zebra running in Docker ([#6755](https://github.com/ZcashFoundation/zebra/pull/6755)) - Zebra now accepts filters for the start command when no subcommand is provided ([#6801](https://github.com/ZcashFoundation/zebra/pull/6801)) +- Avoid panicking on state errors during shutdown ([#6828](https://github.com/ZcashFoundation/zebra/pull/6828)) -... +### Contributors + +Thank you to everyone who contributed to this release, we couldn't make Zebra without you: +@arya2, @mpguerra, @oxarbitrage, @teor2345 and @upbqdn ## [Zebra 1.0.0-rc.8](https://github.com/ZcashFoundation/zebra/releases/tag/v1.0.0-rc.8) - 2023-05-10 diff --git a/Cargo.lock b/Cargo.lock index 83e86557cce..3c3f57a2818 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4701,7 +4701,7 @@ dependencies = [ [[package]] name = "tower-batch" -version = "0.2.39" +version = "0.2.40" dependencies = [ "color-eyre", "ed25519-zebra", @@ -4725,7 +4725,7 @@ dependencies = [ [[package]] name = "tower-fallback" -version = "0.2.39" +version = "0.2.40" dependencies = [ "futures-core", "pin-project", @@ -5621,7 +5621,7 @@ dependencies = [ [[package]] name = "zebra-chain" -version = "1.0.0-beta.24" +version = "1.0.0-beta.25" dependencies = [ "bitflags 2.3.1", "bitflags-serde-legacy", @@ -5681,7 +5681,7 @@ dependencies = [ [[package]] name = "zebra-consensus" -version = "1.0.0-beta.24" +version = "1.0.0-beta.25" dependencies = [ "bellman", "blake2b_simd", @@ -5726,7 +5726,7 @@ dependencies = [ [[package]] name = "zebra-network" -version = "1.0.0-beta.24" +version = "1.0.0-beta.25" dependencies = [ "bitflags 2.3.1", "byteorder", @@ -5766,7 +5766,7 @@ dependencies = [ [[package]] name = "zebra-node-services" -version = "1.0.0-beta.24" +version = "1.0.0-beta.25" dependencies = [ "color-eyre", "jsonrpc-core", @@ -5778,7 +5778,7 @@ dependencies = [ [[package]] name = "zebra-rpc" -version = "1.0.0-beta.24" +version = "1.0.0-beta.25" dependencies = [ "chrono", "futures", @@ -5810,7 +5810,7 @@ dependencies = [ [[package]] name = "zebra-script" -version = "1.0.0-beta.24" +version = "1.0.0-beta.25" dependencies = [ "displaydoc", "hex", @@ -5823,7 +5823,7 @@ dependencies = [ [[package]] name = "zebra-state" -version = "1.0.0-beta.24" +version = "1.0.0-beta.25" dependencies = [ "bincode", "chrono", @@ -5864,7 +5864,7 @@ dependencies = [ [[package]] name = "zebra-test" -version = "1.0.0-beta.24" +version = "1.0.0-beta.25" dependencies = [ "color-eyre", "futures", @@ -5891,7 +5891,7 @@ dependencies = [ [[package]] name = "zebra-utils" -version = "1.0.0-beta.24" +version = "1.0.0-beta.25" dependencies = [ "color-eyre", "hex", @@ -5912,7 +5912,7 @@ dependencies = [ [[package]] name = "zebrad" -version = "1.0.0-rc.8" +version = "1.0.0-rc.9" dependencies = [ "abscissa_core", "atty", diff --git a/README.md b/README.md index 98bd9a8af04..9bfb4bf8ca3 100644 --- a/README.md +++ b/README.md @@ -54,7 +54,7 @@ section in the Zebra book for system requirements. This command will run our latest release, and sync it to the tip: ```sh -docker run zfnd/zebra:1.0.0-rc.8 +docker run zfnd/zebra:1.0.0-rc.9 ``` For more information, read our [Docker documentation](book/src/user/docker.md). @@ -101,7 +101,7 @@ Note that the package `clang` includes `libclang` as well as the C++ compiler. Once the dependencies are in place, you can build Zebra ```sh -cargo install --locked --git https://github.com/ZcashFoundation/zebra --tag v1.0.0-rc.8 zebrad +cargo install --locked --git https://github.com/ZcashFoundation/zebra --tag v1.0.0-rc.9 zebrad ``` You can start Zebra by @@ -161,7 +161,6 @@ There are a few bugs in Zebra that we're still working on fixing: - Experimental Tor support is disabled until [Zebra upgrades to the latest `arti-client`](https://github.com/ZcashFoundation/zebra/issues/5492). This happened due to a Rust dependency conflict, which could only be resolved by `arti` upgrading to a version of `x25519-dalek` with the dependency fix. -- Output of `help`, `--help` flag, and usage of invalid commands or options are inconsistent [#5502](https://github.com/ZcashFoundation/zebra/issues/5502). See the issue for details. ## Future Work diff --git a/book/src/user/docker.md b/book/src/user/docker.md index 65760f71fa5..a47bf2e9b64 100644 --- a/book/src/user/docker.md +++ b/book/src/user/docker.md @@ -11,13 +11,13 @@ You can deploy Zebra for a daily use with the images available in [Docker Hub](h ### Ready to use image ```shell -docker run --detach zfnd/zebra:1.0.0-rc.8 +docker run --detach zfnd/zebra:1.0.0-rc.9 ``` ### Build it locally ```shell -git clone --depth 1 --branch v1.0.0-rc.8 https://github.com/ZcashFoundation/zebra.git +git clone --depth 1 --branch v1.0.0-rc.9 https://github.com/ZcashFoundation/zebra.git docker build --file docker/Dockerfile --target runtime --tag zebra:local . docker run --detach zebra:local ``` diff --git a/tower-batch/Cargo.toml b/tower-batch/Cargo.toml index bfbea9d8468..cef74a39761 100644 --- a/tower-batch/Cargo.toml +++ b/tower-batch/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "tower-batch" -version = "0.2.39" +version = "0.2.40" authors = ["Zcash Foundation "] license = "MIT" edition = "2021" diff --git a/tower-fallback/Cargo.toml b/tower-fallback/Cargo.toml index 0376a8a2b3a..de3c4d7ae6c 100644 --- a/tower-fallback/Cargo.toml +++ b/tower-fallback/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "tower-fallback" -version = "0.2.39" +version = "0.2.40" authors = ["Zcash Foundation "] license = "MIT" edition = "2021" diff --git a/zebra-chain/Cargo.toml b/zebra-chain/Cargo.toml index 4a004651e80..c3d855a0e07 100644 --- a/zebra-chain/Cargo.toml +++ b/zebra-chain/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-chain" -version = "1.0.0-beta.24" +version = "1.0.0-beta.25" authors = ["Zcash Foundation "] license = "MIT OR Apache-2.0" edition = "2021" diff --git a/zebra-consensus/Cargo.toml b/zebra-consensus/Cargo.toml index 084cb357f36..789cbfc4cda 100644 --- a/zebra-consensus/Cargo.toml +++ b/zebra-consensus/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-consensus" -version = "1.0.0-beta.24" +version = "1.0.0-beta.25" authors = ["Zcash Foundation "] license = "MIT OR Apache-2.0" edition = "2021" diff --git a/zebra-consensus/src/checkpoint/main-checkpoints.txt b/zebra-consensus/src/checkpoint/main-checkpoints.txt index 5cc13e1d3b5..84e2b99561d 100644 --- a/zebra-consensus/src/checkpoint/main-checkpoints.txt +++ b/zebra-consensus/src/checkpoint/main-checkpoints.txt @@ -10667,3 +10667,209 @@ 2074108 00000000015062ecafe1e087ddc8cdca74c6fcabb7552b36b784c8e05020be80 2074332 0000000001830c15b8c76ef48cb08fe3fc5f362a6015f3bdf847933446c5a42f 2074713 0000000000b6a6f7e35eacfa84a205e2ac86da55b518d11134917f9da0778faf +2075113 0000000001a99f5cbd00ee510fbbdbbe7f72c694350045e66fbb79c7bd0af17f +2075355 000000000190bf4d0a14a1d653f16e44ed4dbbf317cc5aede4a7c45a42eddb11 +2075591 0000000000e2ac282b74d35eec430a46f159415dcee10c5baffc791410347e57 +2075830 000000000092281422f9bfa7c8a76d4d9a1f7673e7fb02631a5c4e7e911e7dfa +2076040 000000000019993f0bdb9370fe9712b77004682cb6c7c55821a1b39dea2f5a8b +2076239 000000000183d8c7e5a83c6645d9a8b6121192902fc68277e256dbeec175799f +2076465 000000000045ee10334a4f89e0c94f739b2b9e01f42f0fc3cd8e83583dd4eacb +2076621 00000000014975b2eeef5ddfeb41e4f35b703ed8e62817c709a2a12084dfc182 +2076909 0000000000ac0d6bb6b12c413e98554e67e08255548c2da1182700321336a4ff +2077156 00000000010dbf97b3209b05eabc9f01f089bd97d48622b36773b4c331382ed1 +2077291 00000000007d51fb3e413ddd36719e650ec4a1c727a30aa5380ad8af810d6654 +2077422 0000000000625df0e029adbd29103627b9425e770bc95fd5fc7bc0e1c2670b71 +2077550 00000000001bcba0aaa8a9587f3b97f5c387241dc10fb1b518b629b3ba9c6f5d +2077660 0000000000a6c3a14f93012e5106bde5120ea3a87eb233c5ffc277d0c5cbf0f3 +2077775 0000000001c2f9bec36c0903d8aebb10aee022f6c038827b0989c12c03b308c1 +2077898 0000000001282ebbff0a872d53231cebc08b43f82cf49568fbdb33996a6bd0d2 +2078041 000000000102cf8ca8bebd5a0c211369ab15a9bfc487c9cb5463a5beb8a7dbb4 +2078158 00000000005c84fae7637880208b1a5e56fe90010e1bbf8077de8a6805221610 +2078284 00000000013e02d5696bf80ebd6953a9d0b890d6ced8b5c7034c15016f5abd2b +2078402 0000000000737730069b08e3d1f59006e754045a0e2a643516930e7a12bb3601 +2078499 0000000000c1c6c6d7dadbfde9b3d52151aba05bf6053e1eb0ef4daa41ebe4b1 +2078604 0000000000bbc8474aabe166b367f053ab3ccefaf1c9f922e747c6e20deea7d4 +2078703 0000000000193a5c667ae00e51465ddefd6ad7ec9130db8d9909ba27bd88c5cf +2078813 000000000127c5c3016530ee95d7fbd826580600c1fd50ffb71190dc60e3572e +2078943 000000000133ff5ea8249786e58b9aa1f4eb4fee0abf8552e0f670396622710b +2079111 000000000019ed9cfc533b0b8307b2b69974b602afbcb9c458aea978bddc3a92 +2079277 000000000076658b08cea75506e7b3302a5b99cdefbb76292fa28fc67b98b52b +2079380 00000000015a2a71549a3fe6d658cc62224de854b98b54bc6a195dae41998b10 +2079523 00000000002aca3a883235acd879b2404f37528f5957d803e4f762257f0c20a5 +2079662 0000000001d5971debe4b032b9949b8e52aa35360c089011f9b5bb4871114a63 +2079807 00000000005854f7d4ce31683f79b8612b4506990d4a03f7c5fd11f3abd0269b +2079991 00000000012744f6be1d5d352630ad60bd06b7ac303313899da55dfe4b3d92dd +2080129 0000000001ed04450f257bc8b7c95629659688c6f36dfd9185f5d67715526f9b +2080309 0000000000ad74ed21dbe8a0807729759e4c0170f6740fea30fe7d0232e3311b +2080709 0000000000e0fee4e08e761d52a6d25c36763fc1788d809b008c4286feadc0c4 +2081029 00000000014ebb5c2cd581f8b3eebacea0d75f7659db2801e9c74dc2aafa2749 +2081185 000000000142344e494c8686e0828ca763841492a56d09f93b265c18db8eee31 +2081297 00000000017718db0074dd9903f4fb1e5a79ea81f7bca5dc55c8e9daa7a19401 +2081404 00000000003b81dd192a5ed2dcff69469bd1902b6a77edda77ae174a6f31c23e +2081514 00000000013f483f1a79d515beee205bf821ad36fecd4642e9627ea4b54fb14a +2081636 000000000054ec8a49b0b2b3638fbe5363cd62a4fceda2519d8b775580a106f7 +2081745 00000000019dd976a602f82cf79d2252f741836ef65de94d19817ceda3a9b046 +2081883 0000000000167f3797a557e26240e5ed10ff574d45d88d27bec44189e1f4a4f0 +2082013 00000000010dafd4f31b09d4f70a38e76fc1a40a67497ab4b0af35139bb972a2 +2082139 000000000180fa80db5c37e49ac748dda9adc81e97004eed1c67d898de2f408c +2082272 000000000093a93c48fbb4759f494599805206dd7db1e6cc483928e156b0c985 +2082403 000000000109103341c173547c6a6696025ed8d3b6d5c3b81e0fff73f62ddce2 +2082554 0000000001287ae5df7f6ba6497325863a956fd9482c3811d85e684f6b785cda +2082686 0000000000628e50012d4582a199a35ea7b13730ee315e327eb1548e1aeffcc4 +2082845 0000000000fea04ea60477b21314b0dbceefec88c269b968adf5fb6a68e69179 +2082969 0000000001cfadc95ac815fa8d4dc3210e844584744e69122a566f84155847ae +2083104 00000000013e581f16c4715f39d43f61c5db974664ee2cb1437134c9238a0b22 +2083229 000000000085e08c8f8a33889a7a6fb3a48aec7c2955b435e6d74df6f4d9caf3 +2083332 000000000136e02aa8e0d6bd970c9a3b7ef0cfdf3dc8bcc3ecfb32a5ecedd03f +2083435 000000000185e9c7af88403485a12a760574f3ae0479c671219f5a8c2dd1fbea +2083573 00000000012602f9a5270af537e0aad4fd955c08116ba65ae01071cf23e5b743 +2083706 0000000000b425d837a7091c4c94b16c8f634fe1f8616c6dbcc32605879c39a6 +2083811 0000000000acf4ccbfd1649a8fedd5cb5bc3ff3e84c4f6dd63782fd72d161990 +2083941 00000000009c2a22fe12fc1c7a37d47ed7c63753e1b492c82562cc7f62d7b15b +2084074 00000000003ecf1222c6b8786dfb7ebd9bcee5110fd84d02823f2ea7839db06a +2084195 0000000000c1c49ae460a1a6a06b6a7f76f7f7c49a98752f3ad2d52002e3349b +2084326 0000000000b9faee1b460f0053868ed7b7444566943780ffea4aedba153c7bfa +2084432 00000000014399d32f8b4c918e0a27ab2d9adc8e59541c0faf6f26e7e04304f7 +2084558 0000000000391f7fc515fd02df4ad7a9b87038ebf9a2f7838bfa4f1501cff24d +2084695 00000000006c2526054bcd1e3d770d5175b565f964ebcac3b6cda84495905a23 +2084839 00000000009a17158b0a1ed2b8301dc41fd0d3f79f7b248685e606383dd2d746 +2084965 000000000083a29f9b23c35799d15f31879fc3e7c4e14f99d971ed0ae47a4318 +2085087 000000000174e61ccb20f8b1a2e0cbc14e4ea4b7355daa4257ae77a90b9b3824 +2085222 0000000000ff69aee2b7e4ab0ce84a78220c2e238ae4cd4b7c7fe80b42d78a91 +2085302 0000000000a3c808c1bd1a2ce000bb2dae84ccf2bf61f7efb316cc8df74e14d8 +2085413 0000000001a411687105ec5e6df08a408094b6efeccdad9637d05f559705a750 +2085577 000000000036144af8d64b6eda6f7b6c3a0e072bfc7414a0fec80f74346a30e0 +2085719 0000000000f5e597f032c5c4c314bea90b5a615fedcd343a66d9b190c4b30728 +2085880 0000000000c2fd91ea1a788befa956d96191122f52ca7dd40e112b7fc1d368b7 +2086000 00000000010f5c6fb2c3a27c4c89f1d3c80ffb9478d5138dbf89d8c46705ab44 +2086114 0000000000e0609fe931e181ddfea66d5a156ab443fc9ce0c0323cd8a73c1715 +2086279 0000000000801ee3c97e79cfb0328673bb402927b199059cc01fc9078aee3a02 +2086411 00000000005825160e58a707808118d1c53c3b5fb1a7b23c4a71b587e77218ba +2086539 0000000000a6b8b89ce3753e5426912a3302b5f0815333eb34ac13f90892d9ca +2086652 000000000032c4aee2b456017f5c44ece27dfe6f82ce793d6a2c8e6cf9705181 +2086755 00000000008b381f7aa4911d580c7e9925f9547b5b035e49fb3b6052c3ffd208 +2086905 0000000001052632e3bb2bc5c1fb399b99ee42d0204d31bf42fec607705d6b1b +2087034 0000000001474b9758fab2f332507c27190707d4dd25a22ddb868cc46ae654fa +2087133 000000000111733ff1eb1398e0fc424ee0a790f48f4255ead39921f67aa6784c +2087243 000000000064babe446f9f017692c4014f57e7fe7ac3af54d8e0bb080b601a06 +2087398 00000000004a5564c138e812b337ca04688a8cdb986e605c625a68397cffe782 +2087520 0000000000658ce81447a526e4ab0cb86fe290d5c91b438b5045fe296dc2fc2b +2087648 0000000001912acf62fdb254caec219475bf19de2968530d6bae14e5abd6e132 +2087786 0000000000a776a8153c5d8d5df0c46f3dbdb06e0842176845cd189303cd0634 +2087891 00000000002ccb4b12628cbff1b098dc97067419dc4e9d6fb080a0332c853c46 +2088044 0000000000f6a45be7b62776431b2d9b8af4fc3ac41777d8e6641ef6c91a56f2 +2088195 000000000002a33003d8425848a941e31f271f52175297ce7b620ccc3e59d1bd +2088324 0000000001417d7d06bdbe473761c8682604892e59977e892e729a46ddc0ab46 +2088468 0000000000fbb79d003db0a0b4daf4b3e58127f2940565b03d05544bcfc9c922 +2088594 00000000013bac8323b8061d16f665010dfe67c3ec9f468cb9e61580e04b79c7 +2088726 0000000000fb4583ce7b2175a27ccb7e4aed7d79d81708cdb33c3a5b65c869e0 +2088848 0000000001958630b3397d77b058a659e47ef03a6c30c548f3817f618d6274ec +2089011 000000000111f5a56df6d45411256d7cc54ff69ff3d9e66b6c99b5980143fea8 +2089136 0000000000995a0fd722fe9521b641914cd42b87a0b3114856025514405c465b +2089290 00000000014f205ee8c866b48777bd5b8423161143c3304eb32680942a87bb46 +2089444 00000000005aade414a8e456d9726c6537136eff19fda583108fc6621244e0fe +2089551 0000000000e9c3ee19aa6145ba94c3f59e3c0396ee621baf6129b3528f539c12 +2089667 0000000000b1ea2da31d5d8eb8a18c621f6bc2c79fe95ecde1964af19c2b95eb +2089795 00000000006c9c29a3ebbbc0a88bed7e8a292fd004dbb4d8a31a6e7f48d3a881 +2089921 00000000001e842aea64af9109cc5fccde5452ceeaa01ce0d931ef43ff3c04f0 +2090048 000000000189b07ece1910e7cd1df564cad6d198b90bdde937ba8d0d8c0fb0cc +2090198 00000000001ca7557b6317bc1424714d64dedf382527ce5c2e522e063a773091 +2090321 000000000081e3efad4e1a4ca570adff25257c91504e1446a85601e97ed07b14 +2090491 000000000057c842f1b7c2872a23dd2b7a070980184f566d55614d89edf48b3c +2090619 00000000001d01eec1878379670550483e7b79c139f22d08179f09cf5e6f4be6 +2090714 00000000014056f035e3d7f93a92e2e761b8c39f155dbb965307e6ed9bb170e5 +2090850 000000000145aff813301f53126ab43e12322a39de94ef6b5c6415da041f3758 +2090979 000000000170349ba82f090f52557c75bc9af3194acd01b51fbf9c06b44f030c +2091126 0000000000c01ce110634c6e639a31a685453090b8068bb89368f2bb9eca1120 +2091251 0000000000d1c7eb27f2827306534733eb99f176a0c9d5dd438cd23938ef680c +2091367 000000000121421e33b4f6f7deb178eb367a88a42fa4cb128bebb5bcc8b53827 +2091494 000000000090ee533f3170cced652435525a92461da39f8a48e3109699fcc664 +2091642 0000000001741cb8f3fe51bc7d128cd54f796b4b5190872a230b2d5317675603 +2091743 000000000155855fa11342133b6fcfd5d9994739f456559b846ea5e19f4035de +2091862 0000000000b97d6804bc58ca8aadf678614f5a05885ff992ec39fdbfa500cbcf +2091969 000000000026c91a63f379118acd5b3b9ef4e5b63732d28ae7e4374a9efa539b +2092094 000000000005ed59479c82647e54020ede65c39a1fc8644c76c2ad47906bbec9 +2092243 00000000016d798087735a6adcfc45f1b453c99f3e815287b3eddde288a6f47f +2092354 0000000000df2e13976590ca0de726838139c3a4d26decea868797ceff8925b0 +2092442 00000000019fa546714a01c2c98cac576ede6c9b7245bfc06e30aa79132097e5 +2092577 00000000006fc88b68bcb75e2b9183cb65604d0297d095fcc5d1580d8696d26a +2092727 00000000003745c16340b006689f9f9beb68c5f05d06ab041a866205a1da0b99 +2092830 000000000109c548e333cc58a86278cf7a1e6fbdb956ce0077a250a9f8706b90 +2092947 000000000135e5a70a02cf995623c7e529fff35400dde4eddf5797c9a3aee732 +2093093 0000000000f9458e958ca596df0af857057e02b9c498055de0aa4ad531bf9b16 +2093209 000000000025246750e066373b65649a45b11304a18520f63c52b5db520eb063 +2093353 00000000010cef5c3ba1f1943596ce9e0a88937f0425b1c569e618133b695b4c +2093475 000000000127a92e27e42a379870f5ec6eef37272523537436ee50d284df35ba +2093599 0000000000f7478cad724e166935715a65536587b651445bd63185ba9f03681d +2093704 00000000001f605497e26424f85c9d0e7fe50d448bbbded57893536b137dc810 +2093847 00000000007a2b5cfa3d3b75d83a2e088706327dadcda59e7dbe7f461e1db70f +2093984 000000000147c1590105c0b33e4c5acb5a6c0e3cd0b97b4290578b641feb64de +2094130 0000000000e62e23345cbe34b2e8f9e670b1867512a363afed387ac44b25ca0c +2094272 0000000001081abca01a5593db3a73d7c15c34bf80b8c24a4f6c22a6c585c2d5 +2094400 00000000001d0e52398c31bd5e65306c4c98450633423e47a05657cb19970a97 +2094602 0000000001bb52a477ba7a6dcaa5b69edf249b193cfffb48dfbc7d58b5d81a3f +2094832 00000000019cb9d5f6dbd810a4024a519310fb5245fe571efcb1636411db050a +2095050 00000000012b4df2ac3c737227560b507437049fd3cf54f460f912e452ba7881 +2095276 00000000007d87573801bb0a1ebb0b62bcfa41920a48da68f36af0dd73163cd4 +2095507 000000000100405f5523ac56768cb001b7f1150f06b5b776a7e6fc5aae6b5f35 +2095768 00000000010c7c5d98a49d0ffc78309f461732b688a6f8f38883d54b5715eff2 +2095996 0000000000f9c91ffc58ba89159128a057ba0bd42c17416926a17f66529cabea +2096252 00000000015aa2059a7f4d8ae0d0e2c7830c887a46d2e888a28c4725d18e89ff +2096467 000000000098c095d51fb23327f2eef50af0ccd992c7c22fe82ad5fb494141f4 +2096660 0000000000d2fa4dbd07c6bf86768a7862d54b7dc9dd763ce3db9654e0eedef6 +2096903 0000000000e8fc62e98ad1c279523f04a55b8ad432c69bf2994e247e28aa551f +2097179 00000000010f68b4ca057a42199459d1bf7462c7d0e84faec16416081b3f02f8 +2097435 0000000000547ee4b69300de21a1ecba0b9c9a31f6d49c8e574cf549c41df924 +2097686 00000000005606ecf6d0f8a76f0c49b909cf8dc21092146501303ab1e8bedfae +2097908 00000000013e314a2da8a5c37fad8f66bb93549ee723fe64d5d2cd1789068a27 +2098115 0000000000619d8ebebc9c5e314ef50ed424680f72640393ae4cddb06787cbb5 +2098356 00000000006a5bd6a2cf39d29936a2fc3f24ef0b96ab6a3bf34fb30e0bb0fca1 +2098639 00000000016cbe6be35a7e51a5f7af4f61ddcac80cc091a220fc367e3383410c +2098824 000000000159d8583b4e5bb535705bf5bc87fa9c945aab9869c0a4c3b3855e71 +2099031 0000000000b2f71cff88dcb2733aed5295b8df299493ff376ab0c208a667c7ef +2099206 00000000004c580fa34990eef81ea528e76e2edcab313ee817afd14428c99124 +2099443 00000000013ee542bf98e6a5bbfdaefc63e80cc2f352d369f0b8c1c421b9f503 +2099688 0000000000bfdc4c16a54ac414edb19d0ff954a4e660e085aaf2c3ee04752ba2 +2099909 00000000008d1bb1394d7eb061d24d09313535c5390835faf8736c29413db3c9 +2100132 00000000008d8bc2ba2bab2ab1ec144ea90ae6eea9fc53e2625be5c2a2f31af7 +2100338 0000000000b7182364fab1c4d886c7320f760727fcd6bdc3579ec89c2bfdcae3 +2100563 0000000001709e775eb92dc1bb3947b43a2cebd6a1aa1f8361ca9171ee7e568b +2100799 00000000007a830c89fc58233dd9dcd920178e18e7ecefb7b7c41abad6ef63b3 +2101027 0000000000ba306e1b028fc7e6c9b596e5aea4c0e03deb5a261fd285b7807c1d +2101249 000000000147292eb7a34e69d7ada2434d17acf883edb97b480d8131a67e6695 +2101488 0000000000bb967770e76aa56783ecf9811be8509cee1185fe5e3ce459c430c3 +2101706 000000000064a94c632d64928781a822de69bca875fb2d728af2b8c6d76de035 +2102003 000000000006729c11fbd2b36cf45cef154b5be993271d13164d0535a6b28084 +2102282 00000000016a24b3ecd9a5645768ab60cacd3dba95ed66c969823cf7e064f644 +2102557 0000000001400d652c5013ad285d1c2091a281e8a50f46998d991ec941044b0e +2102794 000000000175190d14b451705b876ab43e1c0178aa58b40d9a0fd7a75f7c2d86 +2103089 0000000000124f0adb813216722c1d9a0f426e1a7db2d701800994f8474a5948 +2103399 000000000007ba597b2a6a5786a04b903ea59fa5d351b5330f650ac386e408f7 +2103668 0000000000bcf023e1f9325995fa9c7420e95d82354c9b70ec56760d4a12fd86 +2103935 00000000015705e5a3ab8b2602678411356198cb3a6bc961a601d796cc61e833 +2104217 000000000101c6a29a3e520ee6371acd728a4284c08ca37a943879b1f2550953 +2104503 00000000008426c5a699a586790038e3bd5cf5f02bf1558e5ace5a0c06acfd1b +2104771 00000000001c27faa1701f94afd0edfa4527b2464ca6cd243f597a52b2338b0a +2105042 000000000181438c35b05daa85e68e7c94987234b56619d0fdbbd35f30de88a7 +2105343 0000000000d7fb4c5a7b89a4bdec72c766c9cbbb30657164b7aaef5b12bb48f9 +2105640 0000000000edeca93d6da6f0d2165025328fd590a95fa13fa3ee1e944c42bbc9 +2105949 0000000000be17c29160f95f978bfdd44d3e9b0801fe5c15a370ef339634fd5e +2106261 0000000001cd0274c7b1e750eaeb56523228e3daa50933d3b5b1e4ab24c04c24 +2106595 000000000046b741edf7f7314ef6c62d571622a4c7257c050124d2e34df4b89a +2106871 00000000016ec9bc1eca28390b20e26a30a45588826ea986e1f8c3f550a435bd +2107132 0000000001642153d541f9744667aeacc50731682938bafaa2ee2ef0ca837bbc +2107416 0000000001c27104d4f31a26b484f7fb334b0e84907c6415a1342b11a6e0fdad +2107740 000000000169f8a49f66287ed3c2bf41df12592a5dc525b336622de12c79d0e9 +2108033 0000000001318d782ef724c07192781b0d69906a3ff1a570eebd239d41fa3b0d +2108322 000000000137548212186de70d9be0960655dd856620ab46d8459c32e557f294 +2108618 0000000000e9e4a451fd409324a7790496787ec829aee266cf076c1d16daac39 +2108909 0000000000c49cdc19e6d24a74905799219392bd8fd6232d0934d62b6d95e9d8 +2109225 00000000007b7daf13baac8d9eec989c559cbb9b3af7dd2a2f9096f4ffe20982 +2109509 00000000013ea4b51437a7d29d3eba586de862563535b59ea60b7dfc6609930c +2109811 00000000009ab0ab6fd7b5fb3e978d2d27e6e0351bb04b1ae9ef1e3fca705415 +2110101 00000000001221d61b8bd5178692a8c35418098b1920fb0470e447de1a2b8a38 +2110416 0000000001baef0c680fb91ffab2de7db11b0a57aab546325493e6bbc32bfc95 +2110816 0000000001be2f3576ea68beec4757c14446f2044d78681492b318aca9c89d8d +2111216 000000000046f9457ce05ad5c9d87a7d6de5f0d526290f2f71dc4feb723d579c +2111616 000000000122596e9a9897ed9130aeff0ec850e078ef65f623e5626414b7f0c9 +2112016 00000000011c14231f9405b2c4dddb7049677f85197af9059f0fb09ed8868d3f diff --git a/zebra-consensus/src/checkpoint/test-checkpoints.txt b/zebra-consensus/src/checkpoint/test-checkpoints.txt index 979f082390c..a95bdcc74c5 100644 --- a/zebra-consensus/src/checkpoint/test-checkpoints.txt +++ b/zebra-consensus/src/checkpoint/test-checkpoints.txt @@ -5827,3 +5827,106 @@ 2330400 008669568ece470d9864735b490caf727b68b74e757f162dd0ad3718123dfea8 2330800 007e9deec86c3d54574fed9a49e758dedfbb764cef3f74c19df64a0045596020 2331200 004580f2f264ff7157f510c680b4efe51bf1ca78e3fd551609e0a77bd60de4f2 +2331600 0009b548a3b16906c4d46672ef6beb0f3226307eecdadfba2582a6268954040a +2332000 00aa72040fa3145ba80541e37f4f6e9f70db71e0afe1e7a2b295f587d2bc0af8 +2332400 00139b8e079e992cd8ba70bf3d7244cd2be3f256702983dae000eaa935bd14e9 +2332800 0099e2baf3e1643694d85b94c0b5c33bc674af14599dd7bd000916977d12b51d +2333200 008abbcb1bac2807ba498201768b3822eac4e0e78252a8b4393e0b5f9054379c +2333600 002e725d23d6968f9c92781a65e0d459351e672272b626573ee0622f9bdef682 +2334000 0035c24576f6f50e63bd8de5a41a7d89f0dd7f30038131b44c723ce07c0338d1 +2334400 000f0523d5a9d678a910043b9381c7b2af2168d149c95488ba0ef373902d09cd +2334800 009e4d9a4c04b2a9ab7bc7fb5bab1285dc74814a36ddc244d294627aacee56d0 +2335200 000fa3566a2aa663b5c09126622a64ed693457813e7627a7659c58637c5c4bc3 +2335600 0161d8e382f90e119eafffc24f2144a2657431afd0f6995a3e9828e4d69330b4 +2336000 0082fec597fe97fcd30c735646f4603165d65223ebbf0a132da0b259b07a323b +2336400 0040abbdcadec76905536f8ad001a9ba6a10537d42b0ce98115f0f7e5adfb907 +2336800 003fe35636f76edc5ea3e75a36f92d03a149a85b44f428a15c8ff152f3b208e8 +2337200 00c38f200be5e9ddbdb18b0c7844b12c75f95028d472bc359b97655af822f3f5 +2337600 00de39f3f6783ecc0845028b5a08a73595895533d464d6cf0e603711a9cee770 +2338000 0005d876aaf60f9c28ef9a54ebd06bb2266d627715d7c3171198fa81809ff7c4 +2338400 00406b335801d4fa99c3c4250c13a61f69279453d82236d9787b51e0dc7530d4 +2338800 006fd53c497975438dc904aa93c365f2e0b5d9066c43acc1473622fb880ad7ee +2339200 00a31a27bdc1f593b3c625f817127b82b1075e9532dad4945e3215582cd93113 +2339600 00041c8a9bcddab0514870703fe8278255c2e92d32cc42b0cf60849efbb3d738 +2340000 002c18eedab7798ae6286a36d5e88fe076c5dd1b82309895536498d6a55c2300 +2340400 0065e036f01972b5ad0451b065a7ab2104f0e80680598f8f065dc200fca8fb02 +2340800 0088e3ec20439443bdfb329d2e79875d626ab401497467b29221b8603346d8b8 +2341200 0005ba8b7687bafe4bd5d6c4120caf2e50550ba5ac9871dc8616cee6322a6a2b +2341600 0033dccdfc3c42e3f2a2b9102d9f34c2635991a548f67702a77d2e7dbfdd2212 +2342000 005e714c96771eaec8d83cd0c4f7b59e09f46235e00e8f073c3bb8667479df87 +2342400 00655671a299d7f388239324db297b9418b01cc9212545eba77f8ad30d2f2c5e +2342800 00ec042621ba4a3ed3401fe0adb5a77318f1805c4f11fbe6b81919f920e0ddb6 +2343200 002d58735bad815988159e96baf4436de6639bbf168025a73ad36fc63de12057 +2343600 004c983227e2dcacbaa5977a1b460823bbf8bf9ef840c49ec9909fa86b35c611 +2344000 00bfe21a73d76c6ff21d4a8510cab28095961e9145ff792766c947bd84a92124 +2344400 003e68d18a9d946f310e59c0d305e6dedc65f79fab0b80e658395a7f4f994d38 +2344800 000223ee61a4906b733deb9e977bee05a4466fe405df0c89cc99e30397cd8260 +2345200 00dfcfaf90014a93dd46ffaeee52962e009fb21919c69603b27a2f62ed9b9012 +2345600 010100706849c4be8acd87b9ea422ff6cf94b1d74bb0aecfe4b9e60d02133a74 +2346000 002828b1a8d3e52bfa0e41bccffa2165e6b9ea9eb889a1931330a87aee6f6bb6 +2346400 005a52a2a7cde557512f276c0342c04939147bd1a9524c7e97aaed7f5255d55c +2346800 0095cbac48a6546ad31398e5372411e09ff9da184a82ec244f1a0ffeaa4d0394 +2347200 0078cd2181201db4a98a0dead70d1bd2f30d419425a5584e46aa791b6d4a7bfe +2347600 00329c81a460483493209cbf6ec41bb09deb1c1b7b6b65f8ec26aafda24a87c9 +2348000 0006d3cbfc8f0c441fd4edc9f53b0df7bc56c4d0f68e2c0e20aeec4a62e5ba17 +2348400 0082d919c612628ffc19129767c9f2040dfb9affcfac70b97daf031df68c3b42 +2348800 0051896f03230319b16947b723ceac4a3ee83d8ee51053f2008e5fb4fc3697d5 +2349200 0081d630671395b578e78f9c5ab2e86d9882aff8dafe3d90d74a909c80e47788 +2349600 00572ca27cd2c286e3a2a60f446cb7245fa6abaddb54f5baabd04843a7f700ef +2350000 003069053a60727191c6b37d3895682242b964889c5634ae4d348dc00c4619dc +2350400 01248ca3b868fc3108a909277037d9a567a275d6f30103faf375b87d54e5b29f +2350800 002f764225f023895da74fb99c34fe639c98df5c20d0cdebcc1914d82712c061 +2351200 004f24fd7a90d1b35c2b2d9ea299d3487566385bb18835e0849cc99e46e67189 +2351600 003c60f452959cca109c4fee5a848ad174f7f25a61d5e799b729da38721f5297 +2352000 00983f53efe520e91c8faefa5c92e8d601a9165ecf127d7cfe31ebe7c8fb86f7 +2352400 004961d0674de9009f77fe929382851b79f842acbacefe36654d48d26b5f93db +2352800 0021ba495ad27eab30ce8be12a9b75bb12ba70fc6d1435e051954066c46a069f +2353200 000e774789bf9600e46f489c1aaebfa9fa2e5c7ea50915c17c8cbd225a788f73 +2353600 00921d0864b9668fb2d89e32861527600bbfbd8c89cd7f1e8b4d848196494e4b +2354000 002a6729589cbe35528fe63b42168fa3ad42248b9168a00c6e76dac5693d3fd7 +2354400 0040c9fe0d48436c5c242d418e4d3e1785e2a06aeddff362e17e6bd83a1f4c02 +2354800 006f70ea8090d18b8b91b06d442f88170de92d51005edf94f595e0ddff4f693c +2355200 006c31fc1d68146f1190286a73e5230b922bb877704ef038631ebaa8b5a660ee +2355600 000dbf54a92779a7dc6abaf7af8572e4e8b8b2a200761e96acd003e4a9bfa5ea +2356000 000e1fe86ab564557cfde4a2aa4321b293b7a2b8ee74c595a6071a15206458e0 +2356400 004f8c72fa892a68ccba76b2a2a58d3950a650d7fe5e08132d523cbf7dc49d06 +2356800 000c345abfce35324d97821d9ddf2dfebee89c68641ef85fb2030f31b6b2934a +2357200 00586f1618612ae82a75a4dc8c4af2db3165c7ee7ada1cc51effdc81b7f2d588 +2357600 0002efcdbd777660634a77bd4b910344428c649bb169d1f2350341ffa189604f +2358000 016ce033b882949997208502e67986d4f72ee331b38186d30edbac7754f39248 +2358400 0036008ddbde2fa0047b61d34dd0dd4523191aa50ce5c3ee84f8bf3ebb08858e +2358800 00125c02d232037b34ccd5499e0b22ab52943561930948ad377693afed91982f +2359200 0044a5e6f02310b18b2abc57bbd09ec7934b3aa80df4fa8f81464266550cbc2b +2359600 00a484fa68a3ccc5dc55dda3d0c6a90ac0ab7b77fb7049fcaff519e3991a019e +2360000 0031d12534f35e54170ae9b8fedcd611c48f12a1be093d47bf1226e219beb418 +2360400 005963b47b255547135ca26224cc6a9b5257286f83bee1ff8512292b0f7f74bc +2360800 00822cc61eac54709c5874842ea3a6f2129d34c20022264147c1987247b4a96d +2361200 00359f742d9a06a76fee644305542517f6240b8fa86af388a574d2e1d5f12c59 +2361600 0021df1ce0a6971f3c6988933a3acc3ecf8fd94b251bc4a13d0a3ea7ffa8b20f +2362000 006d2ae23c9ae386d85cf17d8a14a54ff9e5bb49a93066098f732c644760fa5a +2362400 004856c1492a51ff1c082e9ae62e102b7f3590866f38bf243b170bdf57a1944a +2362800 001a6a6eb376abfb9dcd974b1a7e8065e353479e972334fe3fba1911f6136505 +2363200 009cf8e442dbb74299b4bc4bfd83fa43ac28c8ffb7630f55efb660bc9e5812a7 +2363600 001efe1adbde8a31e70c911be35561002bb30b48dc311920d474b1f1ac4012bf +2364000 004c11faf0bb27bb84cba0912b16fba211ec71bb4b69825f9a9a343cdebb2263 +2364400 004899370613a6893207f45da046907041ce17c06e6ac6b4ea79aa02cde132de +2364800 0028a024cc2e565b0206243b17d54655529831d70bfe6cc8c640cb64af634f78 +2365200 0094b6f96828a0295e198e32341424e2854bf6a062cc2c2e0855fb35995ff6c0 +2365600 00a52a846cbeaa6534605bb8396e3139594f5160e705f582acd3bc607d63b43f +2366000 0077758232f91645e9abaa27648b2ec5e7af9d61eaace15ff5751cdcd50a03a2 +2366400 002229c39b034e7c2b1eaf1b32b2c9f50e70559a075a7ef6ad642a61898eae0b +2366800 00315ac86628c5372f133436ea85d0c17bd5f98ed8a7b50039397eaff0334b30 +2367200 0058b2290e17df289cca7f4548bb9bb77e35fe5a0c6e5e57c8fa542c23e5f568 +2367600 0053add58a099464f321b8708c39b186b8af3a75b749de0a0caa16f9fb6bc81b +2368000 00225a4916fbc660f70e45558d4a99a602fe9cce26a2d8c1bb3356717805b174 +2368400 0064f94f5ff49088aee047af2b56619a86832bfc8266f5e136c957a7a6b7f036 +2368800 005d7aced5d77767ec25ad54ff28efb5bbdbdbb3e4ec159833212cb4a130c7fe +2369200 0005b8eb5b727e8971d467db170f9338e751bc9c7d02aefa61bac5599f83be50 +2369600 0043074760a32a7eae055124859f7f447fdbb77535b31d946d4e0ef962011065 +2370000 0000b3672c7628008574f5c0c83dd37b49eb45d353d2e578d9f47d188dcfa2c2 +2370400 001dfa30609c198411049da4a13709527289f483752e1afba516065cb939b8d8 +2370800 00124174ac647f00f6656209e0e7edb87bb76f8ba32ed3f929a0c7480bacc2fa +2371200 001dd4010f636def1bab43d7b5d621ca984712c7cd579995b0d9138c5f8bbb87 +2371600 00101a398fe99963ce0ae45f67091b8ee71673ef7ce6a16d4dd010712aca7f16 +2372000 002a414b6b69758f8408326c7b79f0607d27a95ffe2c46177c9dfc6179ee67df +2372400 00135546d02b716693f9b1c7764d30c7db7b876a4095cfd7b391f4a34f5bcaba diff --git a/zebra-network/Cargo.toml b/zebra-network/Cargo.toml index 3c841809af4..983e70374c1 100644 --- a/zebra-network/Cargo.toml +++ b/zebra-network/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-network" -version = "1.0.0-beta.24" +version = "1.0.0-beta.25" authors = ["Zcash Foundation "] license = "MIT OR Apache-2.0" edition = "2021" diff --git a/zebra-node-services/Cargo.toml b/zebra-node-services/Cargo.toml index 942093cd06b..3505d249b0f 100644 --- a/zebra-node-services/Cargo.toml +++ b/zebra-node-services/Cargo.toml @@ -2,7 +2,7 @@ name = "zebra-node-services" authors = ["Zcash Foundation "] license = "MIT OR Apache-2.0" -version = "1.0.0-beta.24" +version = "1.0.0-beta.25" edition = "2021" repository = "https://github.com/ZcashFoundation/zebra" diff --git a/zebra-rpc/Cargo.toml b/zebra-rpc/Cargo.toml index ddcbd0da7bc..9f96681a35d 100644 --- a/zebra-rpc/Cargo.toml +++ b/zebra-rpc/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-rpc" -version = "1.0.0-beta.24" +version = "1.0.0-beta.25" authors = ["Zcash Foundation "] license = "MIT OR Apache-2.0" edition = "2021" diff --git a/zebra-script/Cargo.toml b/zebra-script/Cargo.toml index 7dd3582879f..4d9d41efaf9 100644 --- a/zebra-script/Cargo.toml +++ b/zebra-script/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-script" -version = "1.0.0-beta.24" +version = "1.0.0-beta.25" authors = ["Zcash Foundation "] license = "MIT OR Apache-2.0" edition = "2021" diff --git a/zebra-state/Cargo.toml b/zebra-state/Cargo.toml index b165671832e..5550769cace 100644 --- a/zebra-state/Cargo.toml +++ b/zebra-state/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-state" -version = "1.0.0-beta.24" +version = "1.0.0-beta.25" authors = ["Zcash Foundation "] license = "MIT OR Apache-2.0" edition = "2021" diff --git a/zebra-test/Cargo.toml b/zebra-test/Cargo.toml index 695e9b4848e..db74ee3b325 100644 --- a/zebra-test/Cargo.toml +++ b/zebra-test/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-test" -version = "1.0.0-beta.24" +version = "1.0.0-beta.25" authors = ["Zcash Foundation "] license = "MIT OR Apache-2.0" edition = "2021" diff --git a/zebra-utils/Cargo.toml b/zebra-utils/Cargo.toml index 6d2d3c3b074..9d78f43aaf9 100644 --- a/zebra-utils/Cargo.toml +++ b/zebra-utils/Cargo.toml @@ -2,7 +2,7 @@ name = "zebra-utils" authors = ["Zcash Foundation "] license = "MIT OR Apache-2.0" -version = "1.0.0-beta.24" +version = "1.0.0-beta.25" edition = "2021" [[bin]] diff --git a/zebrad/Cargo.toml b/zebrad/Cargo.toml index 2c7566a43f9..354cc519f35 100644 --- a/zebrad/Cargo.toml +++ b/zebrad/Cargo.toml @@ -3,7 +3,7 @@ name = "zebrad" authors = ["Zcash Foundation "] license = "MIT OR Apache-2.0" -version = "1.0.0-rc.8" +version = "1.0.0-rc.9" repository = "https://github.com/ZcashFoundation/zebra" # Settings that impact compilation diff --git a/zebrad/src/components/sync/end_of_support.rs b/zebrad/src/components/sync/end_of_support.rs index 89317c61d94..09b5caa60df 100644 --- a/zebrad/src/components/sync/end_of_support.rs +++ b/zebrad/src/components/sync/end_of_support.rs @@ -13,7 +13,7 @@ use zebra_chain::{ use crate::application::release_version; /// The estimated height that this release started to run. -pub const ESTIMATED_RELEASE_HEIGHT: u32 = 2_081_448; +pub const ESTIMATED_RELEASE_HEIGHT: u32 = 2_113_936; /// The maximum number of days after `ESTIMATED_RELEASE_HEIGHT` where a Zebra server will run /// without halting. From baa2b0128418ed383fbbcc2fe6e65250653e1fe4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 8 Jun 2023 05:33:44 +0000 Subject: [PATCH 057/265] build(deps): bump docker/metadata-action from 4.4.0 to 4.5.0 (#6850) Bumps [docker/metadata-action](https://github.com/docker/metadata-action) from 4.4.0 to 4.5.0. - [Release notes](https://github.com/docker/metadata-action/releases) - [Commits](https://github.com/docker/metadata-action/compare/v4.4.0...v4.5.0) --- updated-dependencies: - dependency-name: docker/metadata-action dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/build-docker-image.yml | 2 +- .github/workflows/zcash-lightwalletd.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build-docker-image.yml b/.github/workflows/build-docker-image.yml index 502ac48cc76..00d8d6f8751 100644 --- a/.github/workflows/build-docker-image.yml +++ b/.github/workflows/build-docker-image.yml @@ -84,7 +84,7 @@ jobs: # Automatic tag management and OCI Image Format Specification for labels - name: Docker meta id: meta - uses: docker/metadata-action@v4.4.0 + uses: docker/metadata-action@v4.5.0 with: # list of Docker images to use as base name for tags images: | diff --git a/.github/workflows/zcash-lightwalletd.yml b/.github/workflows/zcash-lightwalletd.yml index f651d5ce98e..44a67b5c3e1 100644 --- a/.github/workflows/zcash-lightwalletd.yml +++ b/.github/workflows/zcash-lightwalletd.yml @@ -75,7 +75,7 @@ jobs: # Automatic tag management and OCI Image Format Specification for labels - name: Docker meta id: meta - uses: docker/metadata-action@v4.4.0 + uses: docker/metadata-action@v4.5.0 with: # list of Docker images to use as base name for tags images: | From 6075a091de02919fcb0bd34a109744ec6d9059e0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 8 Jun 2023 05:34:07 +0000 Subject: [PATCH 058/265] build(deps): bump docker/login-action from 2.1.0 to 2.2.0 (#6849) Bumps [docker/login-action](https://github.com/docker/login-action) from 2.1.0 to 2.2.0. - [Release notes](https://github.com/docker/login-action/releases) - [Commits](https://github.com/docker/login-action/compare/v2.1.0...v2.2.0) --- updated-dependencies: - dependency-name: docker/login-action dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/build-docker-image.yml | 4 ++-- .github/workflows/delete-gcp-resources.yml | 2 +- .github/workflows/zcash-lightwalletd.yml | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/build-docker-image.yml b/.github/workflows/build-docker-image.yml index 00d8d6f8751..6131b2d0afb 100644 --- a/.github/workflows/build-docker-image.yml +++ b/.github/workflows/build-docker-image.yml @@ -126,7 +126,7 @@ jobs: access_token_lifetime: 10800s - name: Login to Google Artifact Registry - uses: docker/login-action@v2.1.0 + uses: docker/login-action@v2.2.0 with: registry: us-docker.pkg.dev username: oauth2accesstoken @@ -136,7 +136,7 @@ jobs: # We only publish images to DockerHub if a release is not a pre-release # Ref: https://github.com/orgs/community/discussions/26281#discussioncomment-3251177 if: ${{ github.event_name == 'release' && !github.event.release.prerelease }} - uses: docker/login-action@v2.1.0 + uses: docker/login-action@v2.2.0 with: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} diff --git a/.github/workflows/delete-gcp-resources.yml b/.github/workflows/delete-gcp-resources.yml index f0b97ff1a64..958fe88dda2 100644 --- a/.github/workflows/delete-gcp-resources.yml +++ b/.github/workflows/delete-gcp-resources.yml @@ -254,7 +254,7 @@ jobs: token_format: 'access_token' - name: Login to Google Artifact Registry - uses: docker/login-action@v2.1.0 + uses: docker/login-action@v2.2.0 with: registry: us-docker.pkg.dev username: oauth2accesstoken diff --git a/.github/workflows/zcash-lightwalletd.yml b/.github/workflows/zcash-lightwalletd.yml index 44a67b5c3e1..d5074cf7f46 100644 --- a/.github/workflows/zcash-lightwalletd.yml +++ b/.github/workflows/zcash-lightwalletd.yml @@ -121,7 +121,7 @@ jobs: uses: google-github-actions/setup-gcloud@v1.1.1 - name: Login to Google Artifact Registry - uses: docker/login-action@v2.1.0 + uses: docker/login-action@v2.2.0 with: registry: us-docker.pkg.dev username: oauth2accesstoken From 081d2d1ec2657def9e1631a503a65bcdd4453ea7 Mon Sep 17 00:00:00 2001 From: Alfredo Garcia Date: Thu, 8 Jun 2023 02:34:23 -0300 Subject: [PATCH 059/265] release(docs): Refactor bug report template (#6858) * try a github form the user bug report template * make fixes * apply suggestions * Apply suggestions from code review Co-authored-by: teor --------- Co-authored-by: teor --- .github/ISSUE_TEMPLATE/bug_report.md | 69 -------------------------- .github/ISSUE_TEMPLATE/bug_report.yml | 71 +++++++++++++++++++++++++++ CONTRIBUTING.md | 3 +- 3 files changed, 72 insertions(+), 71 deletions(-) delete mode 100644 .github/ISSUE_TEMPLATE/bug_report.md create mode 100644 .github/ISSUE_TEMPLATE/bug_report.yml diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md deleted file mode 100644 index 2ebcafc15b7..00000000000 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ /dev/null @@ -1,69 +0,0 @@ ---- -name: Bug report -about: Create a report to help us improve -title: '' -labels: C-bug, S-needs-triage -assignees: '' - ---- - -## Description - - - -[short summary of the bug] - -### Steps to Reproduce - -I tried this: - -[behavior or code sample that causes the bug] - -```sh -copy and paste the exact commands or code here -``` - -### Expected Behaviour - -I expected to see this happen: [explanation] - -### Actual Behaviour - -Instead, this happened: [explanation] - -### Zebra Logs - - - -
- -``` -copy and paste the logs here -``` - -
- -## Environment - -### Zebra Version - - - -### Operating System - - diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml new file mode 100644 index 00000000000..f3bf463a17b --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.yml @@ -0,0 +1,71 @@ +name: Bug report +description: Create a report to help us improve +title: '[User reported bug]: ' +labels: C-bug, S-needs-triage +body: + - type: markdown + attributes: + value: | + Thanks for taking the time to fill out this bug report! + - type: textarea + id: what-happened + attributes: + label: What happened? + description: Also tell us, what did you expect to happen? + value: "I expected to see this happen: + + + Instead, this happened: + + " + validations: + required: true + - type: textarea + id: reproduce + attributes: + label: What were you doing when the issue happened? + description: Copy and paste the exact commands or code here. + placeholder: "Behavior or code sample that causes the bug" + validations: + required: false + - type: textarea + id: logs + attributes: + label: Zebra logs + description: Copy and paste the last 100 Zebra log lines or upload the full logs to https://gist.github.com/ and add a link to them here. + placeholder: "Copy and paste the logs here" + validations: + required: false + - type: input + id: zebrad-version + attributes: + label: Zebra Version + description: "For bugs in `zebrad`, run `zebrad --version`." + placeholder: "zebrad 1.0.0-placeholder" + validations: + required: false + - type: checkboxes + id: os + attributes: + label: Which operating systems does the issue happen on? + description: You may select more than one. + options: + - label: Linux + - label: macOS + - label: Windows + - label: Other OS + - type: input + id: os-details + attributes: + label: OS details + description: "Linux, macOS, BSD: the output of `uname -a`; Windows: version and 32-bit or 64-bit; Other OS: name and version" + placeholder: + validations: + required: false + - type: textarea + id: anything-else + attributes: + label: Additional information + description: Is there anything else that could help us solve this issue? + validations: + required: false diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 4788d60abd9..7d8ce532628 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -14,8 +14,7 @@ how to build, run, and instrument Zebra. ## Bug Reports [bug-reports]: #bug-reports -[File an issue](https://github.com/ZcashFoundation/zebra/issues/new/choose) -on the issue tracker using the bug report template. +Please [create an issue](https://github.com/ZcashFoundation/zebra/issues/new?assignees=&labels=C-bug%2C+S-needs-triage&projects=&template=bug_report.yml&title=) on the Zebra issue tracker. ## Pull Requests [pull-requests]: #pull-requests From d9add4a01f0a560245d573e3ddaf8dfc928f5cbc Mon Sep 17 00:00:00 2001 From: teor Date: Thu, 8 Jun 2023 15:44:30 +1000 Subject: [PATCH 060/265] change(cd): Deploy testnet instances for every main branch push and release (#6842) --- .dockerignore | 13 +- .github/workflows/build-docker-image.yml | 19 ++- .github/workflows/continous-delivery.yml | 154 +++++++++++++----- .../continous-integration-docker.yml | 11 +- .github/workflows/release-binaries.yml | 2 - .github/workflows/zcash-params.yml | 8 + docker/Dockerfile | 90 +++++----- docker/runtime-entrypoint.sh | 69 ++++++++ 8 files changed, 255 insertions(+), 111 deletions(-) create mode 100755 docker/runtime-entrypoint.sh diff --git a/.dockerignore b/.dockerignore index 12a78c0d76b..12057f20ac8 100644 --- a/.dockerignore +++ b/.dockerignore @@ -1,11 +1,11 @@ -# Before the docker CLI sends the context to the docker daemon, it looks for a file -# named .dockerignore in the root directory of the context. If this file exists, the -# CLI modifies the context to exclude files and directories that match patterns in it. +# Before the docker CLI sends the context to the docker daemon, it looks for a file +# named .dockerignore in the root directory of the context. If this file exists, the +# CLI modifies the context to exclude files and directories that match patterns in it. # -# You may want to specify which files to include in the context, rather than which -# to exclude. To achieve this, specify * as the first pattern, followed by one or +# You may want to specify which files to include in the context, rather than which +# to exclude. To achieve this, specify * as the first pattern, followed by one or # more ! exception patterns. -# +# # https://docs.docker.com/engine/reference/builder/#dockerignore-file # Exclude everything: @@ -21,3 +21,4 @@ !zebra-* !zebrad !docker/entrypoint.sh +!docker/runtime-entrypoint.sh diff --git a/.github/workflows/build-docker-image.yml b/.github/workflows/build-docker-image.yml index 6131b2d0afb..bcb8dc8ade7 100644 --- a/.github/workflows/build-docker-image.yml +++ b/.github/workflows/build-docker-image.yml @@ -6,9 +6,6 @@ on: network: required: false type: string - checkpoint_sync: - required: false - type: boolean image_name: required: true type: string @@ -53,6 +50,11 @@ on: tag_suffix: required: false type: string + no_cache: + description: 'Disable the Docker cache for this build' + required: false + type: boolean + default: false outputs: image_digest: @@ -158,19 +160,20 @@ jobs: RUST_LIB_BACKTRACE=${{ inputs.rust_lib_backtrace }} COLORBT_SHOW_HIDDEN=${{ inputs.colorbt_show_hidden }} ZEBRA_SKIP_IPV6_TESTS=${{ inputs.zebra_skip_ipv6_tests }} - CHECKPOINT_SYNC=${{ inputs.checkpoint_sync }} RUST_LOG=${{ inputs.rust_log }} FEATURES=${{ inputs.features }} TEST_FEATURES=${{ inputs.test_features }} RPC_PORT=${{ inputs.rpc_port }} push: true + # Don't read from the cache if the caller disabled it. + # https://docs.docker.com/engine/reference/commandline/buildx_build/#options + no-cache: ${{ inputs.no_cache }} # To improve build speeds, for each branch we push an additional image to the registry, # to be used as the caching layer, using the `max` caching mode. # - # We use multiple cache sources to confirm a cache hit, starting from a per-branch cache, - # and if there's no hit, then continue with the `main` branch. When changes are added to a PR, - # they are usually smaller than the diff between the PR and `main` branch. So this provides the - # best performance. + # We use multiple cache sources to confirm a cache hit, starting from a per-branch cache. + # If there's no hit, we continue with a `main` branch cache, which helps us avoid + # rebuilding cargo-chef, most dependencies, and possibly some Zebra crates. # # The caches are tried in top-down order, the first available cache is used: # https://github.com/moby/moby/pull/26839#issuecomment-277383550 diff --git a/.github/workflows/continous-delivery.yml b/.github/workflows/continous-delivery.yml index 5a91c4f7db8..a8487dc36d7 100644 --- a/.github/workflows/continous-delivery.yml +++ b/.github/workflows/continous-delivery.yml @@ -2,9 +2,12 @@ name: CD # Ensures that only one workflow task will run at a time. Previous deployments, if # already in process, won't get cancelled. Instead, we let the first to complete -# then queue the latest pending workflow, cancelling any workflows in between +# then queue the latest pending workflow, cancelling any workflows in between. +# +# Since the different event types each use a different Managed Instance Group or instance, +# we can run different event types concurrently. concurrency: - group: ${{ github.workflow }}-${{ github.ref }} + group: ${{ github.workflow }}-${{ github.event_name }}-${{ github.ref }} cancel-in-progress: false on: @@ -14,10 +17,14 @@ on: default: 'Mainnet' description: 'Network to deploy: Mainnet or Testnet' required: true - checkpoint_sync: - default: 'true' - description: 'Use as many checkpoints as possible when syncing' - required: true + log_file: + description: 'Log to a file path rather than standard output' + no_cache: + description: 'Disable the Docker cache for this build' + required: false + type: boolean + default: false + push: branches: - main @@ -64,17 +71,18 @@ jobs: dockerfile_path: ./docker/Dockerfile dockerfile_target: runtime image_name: zebrad - # We need to hard-code Mainnet here, because env is not allowed in this context - network: ${{ inputs.network || 'Mainnet' }} - checkpoint_sync: true + no_cache: ${{ inputs.no_cache || false }} + # We hard-code Mainnet here, because the config is modified before running zebrad + network: 'Mainnet' rust_backtrace: '1' zebra_skip_ipv6_tests: '1' rust_log: info - # Test that Zebra works using the default config with the latest Zebra version + # Test that Zebra works using the default config with the latest Zebra version, + # and test reconfiguring the docker image for testnet. test-configuration-file: name: Test Zebra default Docker config file - timeout-minutes: 5 + timeout-minutes: 15 runs-on: ubuntu-latest needs: build steps: @@ -85,32 +93,81 @@ jobs: with: short-length: 7 + # Make sure Zebra can sync at least one full checkpoint on mainnet - name: Run tests using the default config run: | set -ex docker pull ${{ vars.GAR_BASE }}/zebrad@${{ needs.build.outputs.image_digest }} docker run --detach --name default-conf-tests -t ${{ vars.GAR_BASE }}/zebrad@${{ needs.build.outputs.image_digest }} - EXIT_STATUS=$(docker logs --tail all --follow default-conf-tests 2>&1 | grep -q --extended-regexp --max-count=1 -e 'estimated progress to chain tip.*BeforeOverwinter'; echo $?; ) + # show the logs, even if the job times out + docker logs --tail all --follow default-conf-tests | \ + tee --output-error=exit /dev/stderr | \ + grep --max-count=1 --extended-regexp --color=always \ + 'net.*=.*Main.*estimated progress to chain tip.*BeforeOverwinter' docker stop default-conf-tests + # get the exit status from docker + EXIT_STATUS=$( \ + docker wait default-conf-tests || \ + docker inspect --format "{{.State.ExitCode}}" default-conf-tests || \ + echo "missing container, or missing exit status for container" \ + ) docker logs default-conf-tests + echo "docker exit status: $EXIT_STATUS" + if [[ "$EXIT_STATUS" = "137" ]]; then + echo "ignoring expected signal status" + exit 0 + fi exit "$EXIT_STATUS" - # This jobs handles the deployment of a Managed Instance Group (MiG) with 2 nodes in - # the us-central1 region. Two different groups of MiGs are deployed one for pushes to - # the main branch and another for version releases of Zebra + # Make sure Zebra can sync the genesis block on testnet + - name: Run tests using a testnet config + run: | + set -ex + docker pull ${{ vars.GAR_BASE }}/zebrad@${{ needs.build.outputs.image_digest }} + docker run --env "NETWORK=Testnet" --detach --name testnet-conf-tests -t ${{ vars.GAR_BASE }}/zebrad@${{ needs.build.outputs.image_digest }} + # show the logs, even if the job times out + docker logs --tail all --follow testnet-conf-tests | \ + tee --output-error=exit /dev/stderr | \ + grep --max-count=1 --extended-regexp --color=always \ + 'net.*=.*Test.*estimated progress to chain tip.*Genesis' + docker stop testnet-conf-tests + # get the exit status from docker + EXIT_STATUS=$( \ + docker wait testnet-conf-tests || \ + docker inspect --format "{{.State.ExitCode}}" testnet-conf-tests || \ + echo "missing container, or missing exit status for container" \ + ) + docker logs testnet-conf-tests + echo "docker exit status: $EXIT_STATUS" + if [[ "$EXIT_STATUS" = "137" ]]; then + echo "ignoring expected signal status" + exit 0 + fi + exit "$EXIT_STATUS" + + # Deploy Managed Instance Groups (MiGs) for Mainnet and Testnet, + # with one node in the configured GCP region. # - # Once this workflow is triggered the previous MiG is replaced, on pushes to main its - # always replaced, and with releases its only replaced if the same major version is - # being deployed, otherwise a new major version is deployed + # Separate Mainnet and Testnet MiGs are deployed whenever there are: + # - pushes to the main branch, or + # - version releases of Zebra. + # + # Once this workflow is triggered: + # - by pushes to main: the MiG is always replaced, + # - by releases: the MiG is only replaced if the same major version is being deployed, + # otherwise a new major version is deployed in a new MiG. # # Runs: # - on every push/merge to the `main` branch # - on every release, when it's published deploy-nodes: - name: Deploy ${{ inputs.network || 'Mainnet' }} nodes + strategy: + matrix: + network: [Mainnet, Testnet] + name: Deploy ${{ matrix.network }} nodes needs: [ build, test-configuration-file, versioning ] runs-on: ubuntu-latest - timeout-minutes: 30 + timeout-minutes: 60 permissions: contents: 'read' id-token: 'write' @@ -129,12 +186,12 @@ jobs: # Makes the Zcash network name lowercase. # # Labels in GCP are required to be in lowercase, but the blockchain network - # uses sentence case, so we need to downcase ${{ inputs.network || 'Mainnet' }}. + # uses sentence case, so we need to downcase the network. # # Passes the lowercase network to subsequent steps using $NETWORK env variable. - name: Downcase network name for labels run: | - NETWORK_CAPS="${{ inputs.network || 'Mainnet' }}" + NETWORK_CAPS="${{ matrix.network }}" echo "NETWORK=${NETWORK_CAPS,,}" >> "$GITHUB_ENV" # Setup gcloud CLI @@ -151,15 +208,18 @@ jobs: # TODO we should implement the fixes from https://github.com/ZcashFoundation/zebra/pull/5670 here # but the implementation is failing as it's requiring the disk names, contrary to what is stated in the official documentation - - name: Create instance template + - name: Create instance template for ${{ matrix.network }} run: | - gcloud compute instance-templates create-with-container zebrad-${{ needs.versioning.outputs.major_version || env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }} \ + gcloud compute instance-templates create-with-container zebrad-${{ needs.versioning.outputs.major_version || env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }}-${NETWORK} \ --boot-disk-type=pd-ssd \ --image-project=cos-cloud \ --image-family=cos-stable \ + --user-output-enabled \ + --metadata google-logging-enabled=true,google-logging-use-fluentbit=true \ --container-image ${{ vars.GAR_BASE }}/zebrad@${{ needs.build.outputs.image_digest }} \ - --create-disk=name=zebrad-cache-${{ env.GITHUB_SHA_SHORT }},auto-delete=yes,size=300GB,type=pd-ssd \ - --container-mount-disk=mount-path="/zebrad-cache",name=zebrad-cache-${{ env.GITHUB_SHA_SHORT }} \ + --container-env "NETWORK=${{ matrix.network }},LOG_FILE=${{ vars.CD_LOG_FILE }},SENTRY_DSN=${{ vars.SENTRY_DSN }},SHORT_SHA=${{ env.GITHUB_SHA_SHORT }}" \ + --create-disk=name=zebrad-cache-${{ env.GITHUB_SHA_SHORT }}-${NETWORK},auto-delete=yes,size=300GB,type=pd-ssd \ + --container-mount-disk=mount-path="/zebrad-cache",name=zebrad-cache-${{ env.GITHUB_SHA_SHORT }}-${NETWORK} \ --machine-type ${{ vars.GCP_SMALL_MACHINE }} \ --network-interface=subnet=${{ vars.GCP_SUBNETWORK }} \ --service-account ${{ vars.GCP_DEPLOYMENTS_SA }} \ @@ -168,34 +228,34 @@ jobs: --tags zebrad # Check if our destination instance group exists already - - name: Check if instance group exists + - name: Check if ${{ matrix.network }} instance group exists id: does-group-exist continue-on-error: true run: | - gcloud compute instance-groups list | grep "zebrad-${{ needs.versioning.outputs.major_version || env.GITHUB_REF_SLUG_URL }}" | grep "${{ vars.GCP_REGION }}" + gcloud compute instance-groups list | grep "zebrad-${{ needs.versioning.outputs.major_version || env.GITHUB_REF_SLUG_URL }}-${NETWORK}" | grep "${{ vars.GCP_REGION }}" # Deploy new managed instance group using the new instance template - - name: Create managed instance group + - name: Create managed instance group for ${{ matrix.network }} if: steps.does-group-exist.outcome == 'failure' run: | gcloud compute instance-groups managed create \ - "zebrad-${{ needs.versioning.outputs.major_version || env.GITHUB_REF_SLUG_URL }}" \ - --template "zebrad-${{ needs.versioning.outputs.major_version || env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }}" \ + "zebrad-${{ needs.versioning.outputs.major_version || env.GITHUB_REF_SLUG_URL }}-${NETWORK}" \ + --template "zebrad-${{ needs.versioning.outputs.major_version || env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }}-${NETWORK}" \ --health-check zebrad-tracing-filter \ --initial-delay 30 \ --region "${{ vars.GCP_REGION }}" \ --size 1 # Rolls out update to existing group using the new instance template - - name: Update managed instance group + - name: Update managed instance group for ${{ matrix.network }} if: steps.does-group-exist.outcome == 'success' run: | gcloud compute instance-groups managed rolling-action start-update \ - "zebrad-${{ needs.versioning.outputs.major_version || env.GITHUB_REF_SLUG_URL }}" \ - --version template="zebrad-${{ needs.versioning.outputs.major_version || env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }}" \ + "zebrad-${{ needs.versioning.outputs.major_version || env.GITHUB_REF_SLUG_URL }}-${NETWORK}" \ + --version template="zebrad-${{ needs.versioning.outputs.major_version || env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }}-${NETWORK}" \ --region "${{ vars.GCP_REGION }}" - # This jobs handles the deployment of a single node (1) in the us-central1-a zone + # This jobs handles the deployment of a single node (1) in the configured GCP zone # when an instance is required to test a specific commit # # Runs: @@ -203,7 +263,7 @@ jobs: # # Note: this instances are not automatically replaced or deleted deploy-instance: - name: Deploy single instance + name: Deploy single ${{ inputs.network }} instance needs: [ build, test-configuration-file ] runs-on: ubuntu-latest timeout-minutes: 30 @@ -222,6 +282,17 @@ jobs: with: short-length: 7 + # Makes the Zcash network name lowercase. + # + # Labels in GCP are required to be in lowercase, but the blockchain network + # uses sentence case, so we need to downcase the network. + # + # Passes the lowercase network to subsequent steps using $NETWORK env variable. + - name: Downcase network name for labels + run: | + NETWORK_CAPS="${{ inputs.network }}" + echo "NETWORK=${NETWORK_CAPS,,}" >> "$GITHUB_ENV" + # Setup gcloud CLI - name: Authenticate to Google Cloud id: auth @@ -235,19 +306,22 @@ jobs: uses: google-github-actions/setup-gcloud@v1.1.1 # Create instance template from container image - - name: Manual deploy of a single instance running zebrad + - name: Manual deploy of a single ${{ inputs.network }} instance running zebrad run: | - gcloud compute instances create-with-container "zebrad-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }}" \ + gcloud compute instances create-with-container "zebrad-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }}-${NETWORK}" \ --boot-disk-size 300GB \ --boot-disk-type=pd-ssd \ --image-project=cos-cloud \ --image-family=cos-stable \ + --user-output-enabled \ + --metadata google-logging-enabled=true,google-logging-use-fluentbit=true \ --container-stdin \ --container-tty \ --container-image ${{ vars.GAR_BASE }}/zebrad@${{ needs.build.outputs.image_digest }} \ + --container-env "NETWORK=${{ inputs.network }},LOG_FILE=${{ inputs.log_file || vars.CD_LOG_FILE }},SENTRY_DSN=${{ vars.SENTRY_DSN }},SHORT_SHA=${{ env.GITHUB_SHA_SHORT }}" \ --create-disk=auto-delete=yes,size=300GB,type=pd-ssd \ - --create-disk=name=zebrad-cache-${{ env.GITHUB_SHA_SHORT }},auto-delete=yes,size=300GB,type=pd-ssd \ - --container-mount-disk=mount-path='/zebrad-cache',name=zebrad-cache-${{ env.GITHUB_SHA_SHORT }} \ + --create-disk=name=zebrad-cache-${{ env.GITHUB_SHA_SHORT }}-${NETWORK},auto-delete=yes,size=300GB,type=pd-ssd \ + --container-mount-disk=mount-path='/zebrad-cache',name=zebrad-cache-${{ env.GITHUB_SHA_SHORT }}-${NETWORK} \ --machine-type ${{ vars.GCP_SMALL_MACHINE }} \ --network-interface=subnet=${{ vars.GCP_SUBNETWORK }} \ --service-account ${{ vars.GCP_DEPLOYMENTS_SA }} \ diff --git a/.github/workflows/continous-integration-docker.yml b/.github/workflows/continous-integration-docker.yml index b358da96976..2f3063fc66a 100644 --- a/.github/workflows/continous-integration-docker.yml +++ b/.github/workflows/continous-integration-docker.yml @@ -14,10 +14,6 @@ on: default: 'Mainnet' description: 'Network to deploy: Mainnet or Testnet' required: true - checkpoint_sync: - default: 'true' - description: 'Configures `zebrad` to use as many checkpoints as possible' - required: true regenerate-disks: type: boolean default: false @@ -33,6 +29,11 @@ on: default: false description: 'Just run a lightwalletd full sync and update tip disks' required: true + no_cache: + description: 'Disable the Docker cache for this build' + required: false + type: boolean + default: false pull_request: paths: @@ -114,8 +115,8 @@ jobs: dockerfile_path: ./docker/Dockerfile dockerfile_target: tests image_name: ${{ vars.CI_IMAGE_NAME }} + no_cache: ${{ inputs.no_cache || false }} network: ${{ inputs.network || vars.ZCASH_NETWORK }} - checkpoint_sync: true rust_backtrace: full rust_lib_backtrace: full colorbt_show_hidden: '1' diff --git a/.github/workflows/release-binaries.yml b/.github/workflows/release-binaries.yml index 8366f51d23a..319777dec22 100644 --- a/.github/workflows/release-binaries.yml +++ b/.github/workflows/release-binaries.yml @@ -27,7 +27,6 @@ jobs: dockerfile_target: runtime image_name: zebra network: Mainnet - checkpoint_sync: true rust_backtrace: '1' zebra_skip_ipv6_tests: '1' rust_log: info @@ -48,7 +47,6 @@ jobs: rpc_port: '18232' features: "sentry getblocktemplate-rpcs" test_features: "" - checkpoint_sync: true rust_backtrace: '1' zebra_skip_ipv6_tests: '1' rust_log: info diff --git a/.github/workflows/zcash-params.yml b/.github/workflows/zcash-params.yml index 4574ad1f6b8..e0b952e331a 100644 --- a/.github/workflows/zcash-params.yml +++ b/.github/workflows/zcash-params.yml @@ -9,6 +9,13 @@ concurrency: on: workflow_dispatch: + inputs: + no_cache: + description: 'Disable the Docker cache for this build' + required: false + type: boolean + default: false + push: branches: - 'main' @@ -30,6 +37,7 @@ jobs: dockerfile_path: ./docker/zcash-params/Dockerfile dockerfile_target: release image_name: zcash-params + no_cache: ${{ inputs.no_cache || false }} rust_backtrace: full rust_lib_backtrace: full colorbt_show_hidden: '1' diff --git a/docker/Dockerfile b/docker/Dockerfile index 68d0a95be1b..012fa674ce4 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -1,3 +1,5 @@ +# If you want to include a file in the Docker image, add it to .dockerignore. +# # We are using five stages: # - chef: installs cargo-chef # - planner: computes the recipe file @@ -75,10 +77,6 @@ ENV RUST_LOG ${RUST_LOG:-info} ARG ZEBRA_SKIP_IPV6_TESTS ENV ZEBRA_SKIP_IPV6_TESTS ${ZEBRA_SKIP_IPV6_TESTS:-1} -# Use default checkpoint sync and network values if none is provided -ARG CHECKPOINT_SYNC -ENV CHECKPOINT_SYNC ${CHECKPOINT_SYNC:-true} - # Build zebrad with these features # Keep these in sync with: # https://github.com/ZcashFoundation/zebra/blob/main/.github/workflows/build-docker-image.yml#L42 @@ -88,6 +86,7 @@ ARG TEST_FEATURES="lightwalletd-grpc-tests zebra-checkpoints" # separately from the test and production image builds. ENV ENTRYPOINT_FEATURES "$TEST_FEATURES $FEATURES" +# Use default network value if none is provided ARG NETWORK ENV NETWORK ${NETWORK:-Mainnet} @@ -134,74 +133,65 @@ COPY . . # Build zebra RUN cargo build --locked --release --features "${FEATURES}" --package zebrad --bin zebrad +COPY ./docker/runtime-entrypoint.sh / +RUN chmod u+x /runtime-entrypoint.sh + # This stage is only used when deploying nodes or when only the resulting zebrad binary is needed # # To save space, this step starts from scratch using debian, and only adds the resulting # binary from the `release` stage, and the Zcash Sprout & Sapling parameters from ZCash FROM debian:bullseye-slim AS runtime COPY --from=release /opt/zebrad/target/release/zebrad /usr/local/bin +COPY --from=release /runtime-entrypoint.sh / COPY --from=us-docker.pkg.dev/zealous-zebra/zebra/zcash-params /root/.zcash-params /root/.zcash-params RUN apt-get update && \ apt-get install -y --no-install-recommends \ ca-certificates -ARG CHECKPOINT_SYNC=true -ARG NETWORK=Mainnet +# Config settings + +ARG NETWORK +ENV NETWORK ${NETWORK:-Mainnet} + +# Set this to enable the RPC port ARG RPC_PORT +ENV RPC_PORT ${RPC_PORT} -# Use a configurable dir and file for the zebrad configuration file -ARG ZEBRA_CONF_DIR=/etc/zebra -ENV ZEBRA_CONF_DIR ${ZEBRA_CONF_DIR} +# Set this to log to a file, if not set, logs to standard output +ARG LOG_FILE +ENV LOG_FILE ${LOG_FILE} -ARG ZEBRA_CONF_FILE=zebrad.toml -ENV ZEBRA_CONF_FILE ${ZEBRA_CONF_FILE} +# Expose configured ports -ARG ZEBRA_CONF_PATH=${ZEBRA_CONF_DIR}/${ZEBRA_CONF_FILE} -ENV ZEBRA_CONF_PATH ${ZEBRA_CONF_PATH} +EXPOSE 8233 18233 $RPC_PORT -# Build the `zebrad.toml` before starting the container, using the arguments from build -# time, or using the default values set just above. And create the conf path and file if -# it does not exist. -# -# We disable most ports by default, so the default config is secure. -# Users have to opt-in to additional functionality by editing `zebrad.toml`. -# -# It is safe to use multiple RPC threads in Docker, because we know we are the only running -# `zebrad` or `zcashd` process in the container. -# -# TODO: -# - move this file creation to an entrypoint as we can use default values at runtime, -# and modify those as needed when starting the container (at runtime and not at build time) -# - make `cache_dir`, `rpc.listen_addr`, `metrics.endpoint_addr`, and `tracing.endpoint_addr` into Docker arguments -RUN mkdir -p ${ZEBRA_CONF_DIR} \ - && touch ${ZEBRA_CONF_PATH} -RUN set -ex; \ - { \ - echo "[network]"; \ - echo "network = '${NETWORK}'"; \ - echo "listen_addr = '0.0.0.0'"; \ - echo "[consensus]"; \ - echo "checkpoint_sync = ${CHECKPOINT_SYNC}"; \ - echo "[state]"; \ - echo "cache_dir = '/zebrad-cache'"; \ - echo "[rpc]"; \ - [ -n "$RPC_PORT" ] && echo "listen_addr = '0.0.0.0:${RPC_PORT}'"; \ - echo "parallel_cpu_threads = 0"; \ - echo "[metrics]"; \ - echo "#endpoint_addr = '0.0.0.0:9999'"; \ - echo "[tracing]"; \ - echo "#endpoint_addr = '0.0.0.0:3000'"; \ - } > "${ZEBRA_CONF_PATH}" +# Config location + +# Use a configurable dir and file for the zebrad configuration file +ARG ZEBRA_CONF_DIR +ENV ZEBRA_CONF_DIR ${ZEBRA_CONF_DIR:-/etc/zebra} +ARG ZEBRA_CONF_FILE +ENV ZEBRA_CONF_FILE ${ZEBRA_CONF_FILE:-zebrad.toml} -EXPOSE 8233 18233 $RPC_PORT +ARG ZEBRA_CONF_PATH +ENV ZEBRA_CONF_PATH ${ZEBRA_CONF_PATH:-$ZEBRA_CONF_DIR/$ZEBRA_CONF_FILE} + +# Other settings ARG SHORT_SHA -ENV SHORT_SHA $SHORT_SHA +ENV SHORT_SHA ${SHORT_SHA} +# Set this to send sentry reports when Zebra crashes ARG SENTRY_DSN ENV SENTRY_DSN ${SENTRY_DSN} -# TODO: remove the specified config file location and use the default expected by zebrad -CMD zebrad -c "${ZEBRA_CONF_PATH}" start +# Create a default config file based on the Docker build arguments, +# and report the available zebrad arguments. +# (--help is used as a dummy command.) +RUN /runtime-entrypoint.sh --help + +# Update the config file based on the Docker run variables, +# and launch zebrad with it +ENTRYPOINT [ "/runtime-entrypoint.sh" ] diff --git a/docker/runtime-entrypoint.sh b/docker/runtime-entrypoint.sh new file mode 100755 index 00000000000..a91f4ea21d7 --- /dev/null +++ b/docker/runtime-entrypoint.sh @@ -0,0 +1,69 @@ +#!/usr/bin/env bash + +# show the commands we are executing +set -x +# exit if a command fails +set -e +# exit if any command in a pipeline fails +set -o pipefail + +echo "Config variables:" +echo "NETWORK=$NETWORK" +echo "RPC_PORT=$RPC_PORT" +echo "LOG_FILE=$LOG_FILE" + +echo "Config location:" +echo "ZEBRA_CONF_DIR=$ZEBRA_CONF_DIR" +echo "ZEBRA_CONF_FILE=$ZEBRA_CONF_FILE" +echo "ZEBRA_CONF_PATH=$ZEBRA_CONF_PATH" + +echo "Other variables:" +echo "SHORT_SHA=$SHORT_SHA" +echo "SENTRY_DSN=$SENTRY_DSN" + +# Create the conf path and file if it does not exist. +mkdir -p "$ZEBRA_CONF_DIR" +touch "$ZEBRA_CONF_PATH" + +# Populate `zebrad.toml` before starting zebrad, using the environmental +# variables set by the Dockerfile. +# +# We disable most ports by default, so the default config is secure. +# Users have to opt-in to additional functionality by setting environmental variables. +# +# TODO: +# - make `cache_dir`, `metrics.endpoint_addr`, and `tracing.endpoint_addr` into Docker arguments +# - add an $EXTRA_CONFIG or $REPLACEMENT_CONFIG environmental variable +cat < "$ZEBRA_CONF_PATH" +[network] +network = "$NETWORK" +listen_addr = "0.0.0.0" + +[state] +cache_dir = "/zebrad-cache" + +[metrics] +#endpoint_addr = "0.0.0.0:9999" +EOF + +if [[ -n "$RPC_PORT" ]]; then +cat <> "$ZEBRA_CONF_PATH" +[rpc] +listen_addr = "0.0.0.0:${RPC_PORT}" +EOF +fi + +if [[ -n "$LOG_FILE" ]]; then +mkdir -p $(dirname "$LOG_FILE") + +cat <> "$ZEBRA_CONF_PATH" +[tracing] +log_file = "${LOG_FILE}" +#endpoint_addr = "0.0.0.0:3000" +EOF +fi + +echo "Using zebrad.toml:" +cat "$ZEBRA_CONF_PATH" + +exec zebrad -c "$ZEBRA_CONF_PATH" "$@" From 0ffa47cc1e81695e5e79a052dd79885e5283f90d Mon Sep 17 00:00:00 2001 From: teor Date: Thu, 8 Jun 2023 20:50:18 +1000 Subject: [PATCH 061/265] Add instructions for doing mass renames easily (#6865) --- book/src/SUMMARY.md | 3 +- book/src/dev/mass-renames.md | 113 +++++++++++++++++++++++++++++++++++ 2 files changed, 115 insertions(+), 1 deletion(-) create mode 100644 book/src/dev/mass-renames.md diff --git a/book/src/SUMMARY.md b/book/src/SUMMARY.md index 6840019a278..77a8a8e354d 100644 --- a/book/src/SUMMARY.md +++ b/book/src/SUMMARY.md @@ -36,5 +36,6 @@ - [Network Architecture](dev/diagrams/zebra-network.md) - [Continuous Integration](dev/continuous-integration.md) - [Continuous Delivery](dev/continuous-delivery.md) - - [zebra-checkpoints](dev/zebra-checkpoints.md) + - [Generating Zebra Checkpoints](dev/zebra-checkpoints.md) + - [Doing Mass Renames](dev/mass-renames.md) - [API Reference](api.md) diff --git a/book/src/dev/mass-renames.md b/book/src/dev/mass-renames.md new file mode 100644 index 00000000000..cd9eda6de01 --- /dev/null +++ b/book/src/dev/mass-renames.md @@ -0,0 +1,113 @@ +# Doing Mass Renames in Zebra Code + +Sometimes we want to rename a Rust type or function, or change a log message. + +But our types and functions are also used in our documentation, +so the compiler can sometimes miss when their names are changed. + +Our log messages are also used in our integration tests, +so changing them can lead to unexpected test failures or hangs. + +## Universal Renames with `sed` + +You can use `sed` to rename all the instances of a name in Zebra's code, documentation, and tests: +```sh +git ls-tree --full-tree -r --name-only HEAD | \ +xargs sed -i 's/OldName/NewName/g' +``` + +Or excluding specific paths: +```sh +git ls-tree --full-tree -r --name-only HEAD | \ +grep -v 'path-to-skip' | \ +xargs sed -i 's/OldName/NewName/g' +``` + +`sed` also supports regular expressions to replace a pattern with another pattern. + +Here's how to make a PR with these replacements: +1. Run the `sed` commands +2. Run `cargo fmt --all` after doing all the replacements +3. Put the commands in the commit message and pull request, so the reviewer can check them + +Here's how to review that PR: +1. Check out two copies of the repository, one with the PR, and one without: +```sh +cd zebra +git fetch --all +# clear the checkout so we can use main elsewhere +git checkout main^ +# Use the base branch or commit for the PR, which is usually main +git worktree add ../zebra-sed main +git worktree add ../zebra-pr origin/pr-branch-name +``` + +2. Run the scripts on the repository without the PR: +```sh +cd ../zebra-sed +# run the scripts in the PR or commit message +git ls-tree --full-tree -r --name-only HEAD | \ +xargs sed -i 's/OldName/NewName/g' +cargo fmt --all +``` + +3. Automatically check that they match +```sh +cd .. +git diff zebra-sed zebra-pr +``` + +If there are no differences, then the PR can be approved. + +If there are differences, then post them as a review in the PR, +and ask the author to re-run the script on the latest `main`. + +## Interactive Renames with `fastmod` + +You can use `fastmod` to rename some instances, but skip others: +```sh +fastmod --fixed-strings "OldName" "NewName" [paths to change] +``` + +`fastmod` also supports regular expressions to replace a pattern with another pattern. + +Here's how to make a PR with these replacements: +1. Run the `fastmod` commands, choosing which instances to replace +2. Run `cargo fmt --all` after doing all the replacements +3. Put the commands in the commit message and pull request, so the reviewer can check them +4. If there are a lot of renames: + - use `sed` on any directories or files that are always renamed, and put them in the first PR, + - do a cleanup using `fastmod` in the next PR. + +Here's how to review that PR: +1. Manually review each replacement (there's no shortcut) + +## Using `rustdoc` links to detect name changes + +When you're referencing a type or function in a doc comment, +use a `rustdoc` link to refer to it. + +This makes the documentation easier to navigate, +and our `rustdoc` lint will detect any typos or name changes. + +```rust +//! This is what `rustdoc` links look like: +//! - [`u32`] type or trait +//! - [`drop()`] function +//! - [`Clone::clone()`] method +//! - [`Option::None`] enum variant +//! - [`Option::Some(_)`](Option::Some) enum variant with data +//! - [`HashMap`](std::collections::HashMap) fully-qualified path +//! - [`BTreeSet`](std::collections::BTreeSet) fully-qualified path with generics +``` + +If a type isn't imported in the module or Rust prelude, +then it needs a fully-qualified path in the docs, or an unused import: +```rust +// For rustdoc +#[allow(unused_imports)] +use std::collections::LinkedList; + +//! Link to [`LinkedList`]. +struct Type; +``` From 319b01bb841b174e29478c478824e9d67566e3cd Mon Sep 17 00:00:00 2001 From: Deirdre Connolly Date: Thu, 8 Jun 2023 15:53:51 -0400 Subject: [PATCH 062/265] docs: Prep release checklist for stable (#6876) * Prep release checklist for stable * Update .github/PULL_REQUEST_TEMPLATE/release-checklist.md --- .github/PULL_REQUEST_TEMPLATE/release-checklist.md | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/.github/PULL_REQUEST_TEMPLATE/release-checklist.md b/.github/PULL_REQUEST_TEMPLATE/release-checklist.md index 39aee582583..884b56310bc 100644 --- a/.github/PULL_REQUEST_TEMPLATE/release-checklist.md +++ b/.github/PULL_REQUEST_TEMPLATE/release-checklist.md @@ -61,7 +61,7 @@ You can use `fastmod` to interactively find and replace versions. For example, you can do something like: ``` -fastmod --extensions rs,toml,md --fixed-strings '1.0.0-rc.0' '1.0.0-rc.1' zebrad README.md zebra-network/src/constants.rs book/src/user/docker.md +fastmod --extensions rs,toml,md --fixed-strings '1.0.0-rc.9' '1.0.0' zebrad README.md zebra-network/src/constants.rs book/src/user/docker.md fastmod --extensions rs,toml,md --fixed-strings '1.0.0-beta.15' '1.0.0-beta.16' zebra-* fastmod --extensions rs,toml,md --fixed-strings '0.2.30' '0.2.31' tower-batch tower-fallback cargo build @@ -147,7 +147,7 @@ and the updated changelog: - [ ] Make sure the PRs with the new checkpoint hashes and missed dependencies are already merged - [ ] Push the version increments, the updated changelog and the release constants into a branch - (for example: `bump-v1.0.0-rc.0` - this needs to be different to the tag name) + (for example: `bump-v1.0.0` - this needs to be different to the tag name) - [ ] Create a release PR by adding `&template=release-checklist.md` to the comparing url ([Example](https://github.com/ZcashFoundation/zebra/compare/v1.0.0-rc.0-release?expand=1&template=release-checklist.md)). - [ ] Add the list of deleted changelog entries as a comment to make reviewing easier. - [ ] Freeze the [`batched` queue](https://dashboard.mergify.com/github/ZcashFoundation/repo/zebra/queues) using Mergify. @@ -157,10 +157,10 @@ and the updated changelog: - [ ] Once the PR has been merged, create a new release using the draft release as a base, by clicking the Edit icon in the [draft release](https://github.com/ZcashFoundation/zebra/releases) - [ ] Set the tag name to the version tag, - for example: `v1.0.0-rc.0` + for example: `v1.0.0` - [ ] Set the release to target the `main` branch - [ ] Set the release title to `Zebra ` followed by the version tag, - for example: `Zebra 1.0.0-rc.0` + for example: `Zebra 1.0.0` - [ ] Replace the prepopulated draft changelog in the release description with the final changelog you created; starting just _after_ the title `## [Zebra ...` of the current version being released, and ending just _before_ the title of the previous release. @@ -175,8 +175,9 @@ and the updated changelog: - [ ] Wait until the [pre-release deployment machines have successfully launched](https://github.com/ZcashFoundation/zebra/actions/workflows/continous-delivery.yml) - [ ] [Publish the release to GitHub](https://github.com/ZcashFoundation/zebra/releases) by disabling 'pre-release', then clicking "Set as the latest release" - [ ] Wait until [the Docker images have been published](https://github.com/ZcashFoundation/zebra/actions/workflows/release-binaries.yml) -- [ ] Test the Docker image using `docker run --tty --interactive zfnd/zebra:1.0.0-rc.`, - and put the output in a comment on the PR +- [ ] Test the Docker image using `docker run --tty --interactive zfnd/zebra`, + and put the output in a comment on the PR. + (You can use [gcloud cloud shell](https://console.cloud.google.com/home/dashboard?cloudshell=true)) - [ ] Un-freeze the [`batched` queue](https://dashboard.mergify.com/github/ZcashFoundation/repo/zebra/queues) using Mergify. From 6f3e79e2349b8568e46046f02f2f90b4402bb726 Mon Sep 17 00:00:00 2001 From: Alfredo Garcia Date: Thu, 8 Jun 2023 20:42:32 -0300 Subject: [PATCH 063/265] add user support section to readme (#6873) --- README.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/README.md b/README.md index 9bfb4bf8ca3..893db0a1a61 100644 --- a/README.md +++ b/README.md @@ -16,6 +16,7 @@ - [Known Issues](#known-issues) - [Future Work](#future-work) - [Documentation](#documentation) +- [User support](#user-support) - [Security](#security) - [License](#license) @@ -177,6 +178,12 @@ documentation](https://doc.zebra.zfnd.org) for the external API of our crates, as well as [internal documentation](https://doc-internal.zebra.zfnd.org) for private APIs. +## User support + +For bug reports please [open a bug report ticket in the Zebra repository](https://github.com/ZcashFoundation/zebra/issues/new?assignees=&labels=C-bug%2C+S-needs-triage&projects=&template=bug_report.yml&title=%5BUser+reported+bug%5D%3A+). + +Alternatively by chat, [Join the Zcash Foundation Discord Server](https://discord.com/invite/aRgNRVwsM8) and find the #zebra-support channel. + ## Security Zebra has a [responsible disclosure policy](https://github.com/ZcashFoundation/zebra/blob/main/SECURITY.md), which we encourage security researchers to follow. From 4fbc89fc936fa1b2cc3453a3124954abb86de148 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 8 Jun 2023 23:42:52 +0000 Subject: [PATCH 064/265] build(deps): bump tj-actions/changed-files from 36.0.18 to 36.1.0 (#6874) Bumps [tj-actions/changed-files](https://github.com/tj-actions/changed-files) from 36.0.18 to 36.1.0. - [Release notes](https://github.com/tj-actions/changed-files/releases) - [Changelog](https://github.com/tj-actions/changed-files/blob/main/HISTORY.md) - [Commits](https://github.com/tj-actions/changed-files/compare/v36.0.18...v36.1.0) --- updated-dependencies: - dependency-name: tj-actions/changed-files dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/lint.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 1125ee01bf7..656c926374b 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -37,7 +37,7 @@ jobs: - name: Rust files id: changed-files-rust - uses: tj-actions/changed-files@v36.0.18 + uses: tj-actions/changed-files@v36.1.0 with: files: | **/*.rs @@ -49,7 +49,7 @@ jobs: - name: Workflow files id: changed-files-workflows - uses: tj-actions/changed-files@v36.0.18 + uses: tj-actions/changed-files@v36.1.0 with: files: | .github/workflows/*.yml From 92077f4db5dfc3018f78a0a3367990015c9d4c53 Mon Sep 17 00:00:00 2001 From: teor Date: Fri, 9 Jun 2023 09:43:03 +1000 Subject: [PATCH 065/265] fix(net): Avoid potential concurrency bugs in outbound handshakes (#6869) * Stop sending peer errors on the PeerSet channel, to respect send limits * Move locking out of the cralwer select!, potential deadlock or hang risk * Move report_failed() out of the CandidateSet, reducing concurrency risks * Make CandidateSet Send * Make all CandidateSet operations concurrent, previous hand/deadlock bug * Reduce the gap between handshakes and peer set updates, and exit the task on shutdown --- zebra-network/src/peer_set/candidate_set.rs | 28 +- .../src/peer_set/candidate_set/tests/prop.rs | 2 +- zebra-network/src/peer_set/initialize.rs | 402 ++++++++++++------ .../src/peer_set/initialize/tests/vectors.rs | 83 +--- 4 files changed, 284 insertions(+), 231 deletions(-) diff --git a/zebra-network/src/peer_set/candidate_set.rs b/zebra-network/src/peer_set/candidate_set.rs index 76006672c9a..f3126e6adde 100644 --- a/zebra-network/src/peer_set/candidate_set.rs +++ b/zebra-network/src/peer_set/candidate_set.rs @@ -125,7 +125,11 @@ mod tests; // When we add the Seed state: // * show that seed peers that transition to other never attempted // states are already in the address book -pub(crate) struct CandidateSet { +pub(crate) struct CandidateSet +where + S: Service + Send, + S::Future: Send + 'static, +{ // Correctness: the address book must be private, // so all operations are performed on a blocking thread (see #1976). address_book: Arc>, @@ -136,7 +140,7 @@ pub(crate) struct CandidateSet { impl CandidateSet where - S: Service, + S: Service + Send, S::Future: Send + 'static, { /// Uses `address_book` and `peer_service` to manage a [`CandidateSet`] of peers. @@ -180,8 +184,6 @@ where /// The handshaker sets up the peer message receiver so it also sends a /// [`Responded`] peer address update. /// - /// [`report_failed`][Self::report_failed] puts peers into the [`Failed`] state. - /// /// [`next`][Self::next] puts peers into the [`AttemptPending`] state. /// /// ## Security @@ -411,21 +413,9 @@ where Some(next_peer) } - /// Mark `addr` as a failed peer. - pub async fn report_failed(&mut self, addr: &MetaAddr) { - let addr = MetaAddr::new_errored(addr.addr, addr.services); - - // # Correctness - // - // Spawn address book accesses on a blocking thread, - // to avoid deadlocks (see #1976). - let address_book = self.address_book.clone(); - let span = Span::current(); - tokio::task::spawn_blocking(move || { - span.in_scope(|| address_book.lock().unwrap().update(addr)) - }) - .await - .expect("panic in peer failure address book update task"); + /// Returns the address book for this `CandidateSet`. + pub async fn address_book(&self) -> Arc> { + self.address_book.clone() } } diff --git a/zebra-network/src/peer_set/candidate_set/tests/prop.rs b/zebra-network/src/peer_set/candidate_set/tests/prop.rs index 394e35df6c3..d77b190c67b 100644 --- a/zebra-network/src/peer_set/candidate_set/tests/prop.rs +++ b/zebra-network/src/peer_set/candidate_set/tests/prop.rs @@ -139,7 +139,7 @@ proptest! { /// - if no reconnection peer is returned at all. async fn check_candidates_rate_limiting(candidate_set: &mut CandidateSet, candidates: u32) where - S: tower::Service, + S: tower::Service + Send, S::Future: Send + 'static, { let mut now = Instant::now(); diff --git a/zebra-network/src/peer_set/initialize.rs b/zebra-network/src/peer_set/initialize.rs index 72e1b8878b3..d306475b722 100644 --- a/zebra-network/src/peer_set/initialize.rs +++ b/zebra-network/src/peer_set/initialize.rs @@ -5,6 +5,7 @@ use std::{ collections::{BTreeMap, HashSet}, + convert::Infallible, net::SocketAddr, sync::Arc, time::Duration, @@ -13,7 +14,7 @@ use std::{ use futures::{ future::{self, FutureExt}, sink::SinkExt, - stream::{FuturesUnordered, StreamExt, TryStreamExt}, + stream::{FuturesUnordered, StreamExt}, TryFutureExt, }; use rand::seq::SliceRandom; @@ -26,6 +27,7 @@ use tokio_stream::wrappers::IntervalStream; use tower::{ buffer::Buffer, discover::Change, layer::Layer, util::BoxService, Service, ServiceExt, }; +use tracing::Span; use tracing_futures::Instrument; use zebra_chain::chain_tip::ChainTip; @@ -46,11 +48,15 @@ use crate::{ #[cfg(test)] mod tests; -/// The result of an outbound peer connection attempt or inbound connection -/// handshake. +/// A successful outbound peer connection attempt or inbound connection handshake. /// -/// This result comes from the `Handshaker`. -type DiscoveredPeer = Result<(PeerSocketAddr, peer::Client), BoxError>; +/// The [`Handshake`](peer::Handshake) service returns a [`Result`]. Only successful connections +/// should be sent on the channel. Errors should be logged or ignored. +/// +/// We don't allow any errors in this type, because: +/// - The connection limits don't include failed connections +/// - tower::Discover interprets an error as stream termination +type DiscoveredPeer = (PeerSocketAddr, peer::Client); /// Initialize a peer set, using a network `config`, `inbound_service`, /// and `latest_chain_tip`. @@ -146,14 +152,15 @@ where // Create an mpsc channel for peer changes, // based on the maximum number of inbound and outbound peers. + // + // The connection limit does not apply to errors, + // so they need to be handled before sending to this channel. let (peerset_tx, peerset_rx) = futures::channel::mpsc::channel::(config.peerset_total_connection_limit()); - let discovered_peers = peerset_rx - // Discover interprets an error as stream termination, - // so discard any errored connections... - .filter(|result| future::ready(result.is_ok())) - .map_ok(|(address, client)| Change::Insert(address, client.into())); + let discovered_peers = peerset_rx.map(|(address, client)| { + Result::<_, Infallible>::Ok(Change::Insert(address, client.into())) + }); // Create an mpsc channel for peerset demand signaling, // based on the maximum number of outbound peers. @@ -210,6 +217,9 @@ where // because zcashd rate-limits `addr`/`addrv2` messages per connection, // and if we only have one initial peer, // we need to ensure that its `Response::Addr` is used by the crawler. + // + // TODO: this might not be needed after we added the Connection peer address cache, + // try removing it in a future release? info!( ?active_initial_peer_count, "sending initial request for peers" @@ -342,7 +352,7 @@ where let handshake_result = handshake_result.expect("unexpected panic in initial peer handshake"); match handshake_result { - Ok(ref change) => { + Ok(change) => { handshake_success_total += 1; debug!( ?handshake_success_total, @@ -350,6 +360,9 @@ where ?change, "an initial peer handshake succeeded" ); + + // The connection limit makes sure this send doesn't block + peerset_tx.send(change).await?; } Err((addr, ref e)) => { handshake_error_total += 1; @@ -384,10 +397,6 @@ where } } - peerset_tx - .send(handshake_result.map_err(|(_addr, e)| e)) - .await?; - // Security: Let other tasks run after each connection is processed. // // Avoids remote peers starving other Zebra tasks using initial connection successes or errors. @@ -617,7 +626,8 @@ where let handshake_result = handshake.await; if let Ok(client) = handshake_result { - let _ = peerset_tx.send(Ok((addr, client))).await; + // The connection limit makes sure this send doesn't block + let _ = peerset_tx.send((addr, client)).await; } else { debug!(?handshake_result, "error handshaking with inbound peer"); } @@ -660,20 +670,18 @@ where enum CrawlerAction { /// Drop the demand signal because there are too many pending handshakes. DemandDrop, - /// Initiate a handshake to `candidate` in response to demand. - DemandHandshake { candidate: MetaAddr }, - /// Crawl existing peers for more peers in response to demand, because there - /// are no available candidates. - DemandCrawl, + /// Initiate a handshake to the next candidate peer in response to demand. + /// + /// If there are no available candidates, crawl existing peers. + DemandHandshakeOrCrawl, /// Crawl existing peers for more peers in response to a timer `tick`. TimerCrawl { tick: Instant }, - /// Handle a successfully connected handshake `peer_set_change`. - HandshakeConnected { - address: PeerSocketAddr, - client: peer::Client, - }, - /// Handle a handshake failure to `failed_addr`. - HandshakeFailed { failed_addr: MetaAddr }, + /// Clear a finished handshake. + HandshakeFinished, + /// Clear a finished demand crawl (DemandHandshakeOrCrawl with no peers). + DemandCrawlFinished, + /// Clear a finished TimerCrawl. + TimerCrawlFinished, } /// Given a channel `demand_rx` that signals a need for new peers, try to find @@ -709,11 +717,11 @@ enum CrawlerAction { )] async fn crawl_and_dial( config: Config, - mut demand_tx: futures::channel::mpsc::Sender, + demand_tx: futures::channel::mpsc::Sender, mut demand_rx: futures::channel::mpsc::Receiver, - mut candidates: CandidateSet, + candidates: CandidateSet, outbound_connector: C, - mut peerset_tx: futures::channel::mpsc::Sender, + peerset_tx: futures::channel::mpsc::Sender, mut active_outbound_connections: ActiveConnectionCounter, ) -> Result<(), BoxError> where @@ -725,31 +733,30 @@ where + Send + 'static, C::Future: Send + 'static, - S: Service, + S: Service + Send + Sync + 'static, S::Future: Send + 'static, { use CrawlerAction::*; - // CORRECTNESS - // - // To avoid hangs and starvation, the crawler must: - // - spawn a separate task for each crawl and handshake, so they can make - // progress independently (and avoid deadlocking each other) - // - use the `select!` macro for all actions, because the `select` function - // is biased towards the first ready future - info!( crawl_new_peer_interval = ?config.crawl_new_peer_interval, outbound_connections = ?active_outbound_connections.update_count(), "starting the peer address crawler", ); + let address_book = candidates.address_book().await; + + // # Concurrency + // + // Allow tasks using the candidate set to be spawned, so they can run concurrently. + // Previously, Zebra has had deadlocks and long hangs caused by running dependent + // candidate set futures in the same async task. + let candidates = Arc::new(futures::lock::Mutex::new(candidates)); + + // This contains both crawl and handshake tasks. let mut handshakes = FuturesUnordered::new(); // returns None when empty. - // Keeping an unresolved future in the pool means the stream - // never terminates. - // We could use StreamExt::select_next_some and StreamExt::fuse, but `fuse` - // prevents us from adding items to the stream and checking its length. + // Keeping an unresolved future in the pool means the stream never terminates. handshakes.push(future::pending().boxed()); let mut crawl_timer = tokio::time::interval(config.crawl_new_peer_interval); @@ -759,6 +766,10 @@ where let mut crawl_timer = IntervalStream::new(crawl_timer).map(|tick| TimerCrawl { tick }); + // # Concurrency + // + // To avoid hangs and starvation, the crawler must spawn a separate task for each crawl + // and handshake, so they can make progress independently (and avoid deadlocking each other). loop { metrics::gauge!( "crawler.in_flight_handshakes", @@ -769,33 +780,45 @@ where ); let crawler_action = tokio::select! { + biased; + // Check for completed handshakes first, because the rest of the app needs them. + // Pending handshakes are limited by the connection limit. next_handshake_res = handshakes.next() => next_handshake_res.expect( "handshakes never terminates, because it contains a future that never resolves" ), - next_timer = crawl_timer.next() => next_timer.expect("timers never terminate"), - // turn the demand into an action, based on the crawler's current state - _ = demand_rx.next() => { + // The timer is rate-limited + next_timer = crawl_timer.next() => Ok(next_timer.expect("timers never terminate")), + // Turn any new demand into an action, based on the crawler's current state. + // + // # Concurrency + // + // Demand is potentially unlimited, so it must go last in a biased select!. + next_demand = demand_rx.next() => next_demand.ok_or("demand stream closed, is Zebra shutting down?".into()).map(|MorePeers|{ if active_outbound_connections.update_count() >= config.peerset_outbound_connection_limit() { // Too many open outbound connections or pending handshakes already DemandDrop - } else if let Some(candidate) = candidates.next().await { - // candidates.next has a short delay, and briefly holds the address - // book lock, so it shouldn't hang - DemandHandshake { candidate } } else { - DemandCrawl + DemandHandshakeOrCrawl } - } + }) }; match crawler_action { - DemandDrop => { + // Dummy actions + Ok(DemandDrop) => { // This is set to trace level because when the peerset is - // congested it can generate a lot of demand signal very - // rapidly. + // congested it can generate a lot of demand signal very rapidly. trace!("too many open connections or in-flight handshakes, dropping demand signal"); } - DemandHandshake { candidate } => { + + // Spawned tasks + Ok(DemandHandshakeOrCrawl) => { + let candidates = candidates.clone(); + let outbound_connector = outbound_connector.clone(); + let peerset_tx = peerset_tx.clone(); + let address_book = address_book.clone(); + let demand_tx = demand_tx.clone(); + // Increment the connection count before we spawn the connection. let outbound_connection_tracker = active_outbound_connections.track_connection(); debug!( @@ -803,74 +826,91 @@ where "opening an outbound peer connection" ); - // Spawn each handshake into an independent task, so it can make - // progress independently of the crawls. - let hs_join = tokio::spawn(dial( - candidate, - outbound_connector.clone(), - outbound_connection_tracker, - )) + // Spawn each handshake or crawl into an independent task, so handshakes can make + // progress while crawls are running. + let handshake_or_crawl_handle = tokio::spawn(async move { + // Try to get the next available peer for a handshake. + // + // candidates.next() has a short timeout, and briefly holds the address + // book lock, so it shouldn't hang. + // + // Hold the lock for as short a time as possible. + let candidate = { candidates.lock().await.next().await }; + + if let Some(candidate) = candidate { + // we don't need to spawn here, because there's nothing running concurrently + dial( + candidate, + outbound_connector, + outbound_connection_tracker, + peerset_tx, + address_book, + demand_tx, + ) + .await?; + + Ok(HandshakeFinished) + } else { + // There weren't any peers, so try to get more peers. + debug!("demand for peers but no available candidates"); + + crawl(candidates, demand_tx).await?; + + Ok(DemandCrawlFinished) + } + }) .map(move |res| match res { Ok(crawler_action) => crawler_action, Err(e) => { - panic!("panic during handshaking with {candidate:?}: {e:?} "); + panic!("panic during handshaking: {e:?}"); } }) .in_current_span(); - handshakes.push(Box::pin(hs_join)); + handshakes.push(Box::pin(handshake_or_crawl_handle)); } - DemandCrawl => { - debug!("demand for peers but no available candidates"); - // update has timeouts, and briefly holds the address book - // lock, so it shouldn't hang - // - // TODO: refactor candidates into a buffered service, so we can - // spawn independent tasks to avoid deadlocks - let more_peers = candidates.update().await?; - - // If we got more peers, try to connect to a new peer. - // - // # Security - // - // Update attempts are rate-limited by the candidate set. - // - // We only try peers if there was actually an update. - // So if all peers have had a recent attempt, - // and there was recent update with no peers, - // the channel will drain. - // This prevents useless update attempt loops. - if let Some(more_peers) = more_peers { - let _ = demand_tx.try_send(more_peers); - } + Ok(TimerCrawl { tick }) => { + let candidates = candidates.clone(); + let demand_tx = demand_tx.clone(); + + let crawl_handle = tokio::spawn(async move { + debug!( + ?tick, + "crawling for more peers in response to the crawl timer" + ); + + crawl(candidates, demand_tx).await?; + + Ok(TimerCrawlFinished) + }) + .map(move |res| match res { + Ok(crawler_action) => crawler_action, + Err(e) => { + panic!("panic during TimerCrawl: {tick:?} {e:?}"); + } + }) + .in_current_span(); + + handshakes.push(Box::pin(crawl_handle)); } - TimerCrawl { tick } => { - debug!( - ?tick, - "crawling for more peers in response to the crawl timer" - ); - // TODO: spawn independent tasks to avoid deadlocks - candidates.update().await?; - // Try to connect to a new peer. - let _ = demand_tx.try_send(MorePeers); + + // Completed spawned tasks + Ok(HandshakeFinished) => { + // Already logged in dial() } - HandshakeConnected { address, client } => { - debug!(candidate.addr = ?address, "successfully dialed new peer"); - // successes are handled by an independent task, except for `candidates.update` in - // this task, which has a timeout, so they shouldn't hang - peerset_tx.send(Ok((address, client))).await?; + Ok(DemandCrawlFinished) => { + // This is set to trace level because when the peerset is + // congested it can generate a lot of demand signal very rapidly. + trace!("demand-based crawl finished"); + } + Ok(TimerCrawlFinished) => { + debug!("timer-based crawl finished"); } - HandshakeFailed { failed_addr } => { - // The connection was never opened, or it failed the handshake and was dropped. - - debug!(?failed_addr.addr, "marking candidate as failed"); - candidates.report_failed(&failed_addr).await; - // The demand signal that was taken out of the queue - // to attempt to connect to the failed candidate never - // turned into a connection, so add it back: - // - // Security: handshake failures are rate-limited by peer attempt timeouts. - let _ = demand_tx.try_send(MorePeers); + + // Fatal errors and shutdowns + Err(error) => { + info!(?error, "crawler task exiting due to an error"); + return Err(error); } } @@ -881,17 +921,79 @@ where } } +/// Try to get more peers using `candidates`, then queue a connection attempt using `demand_tx`. +/// If there were no new peers, the connection attempt is skipped. +#[instrument(skip(candidates, demand_tx))] +async fn crawl( + candidates: Arc>>, + mut demand_tx: futures::channel::mpsc::Sender, +) -> Result<(), BoxError> +where + S: Service + Send + Sync + 'static, + S::Future: Send + 'static, +{ + // update() has timeouts, and briefly holds the address book + // lock, so it shouldn't hang. + // Try to get new peers, holding the lock for as short a time as possible. + let result = { + let result = candidates.lock().await.update().await; + std::mem::drop(candidates); + result + }; + let more_peers = match result { + Ok(more_peers) => more_peers, + Err(e) => { + info!( + ?e, + "candidate set returned an error, is Zebra shutting down?" + ); + return Err(e); + } + }; + + // If we got more peers, try to connect to a new peer on our next loop. + // + // # Security + // + // Update attempts are rate-limited by the candidate set, + // and we only try peers if there was actually an update. + // + // So if all peers have had a recent attempt, and there was recent update + // with no peers, the channel will drain. This prevents useless update attempt + // loops. + if let Some(more_peers) = more_peers { + if let Err(send_error) = demand_tx.try_send(more_peers) { + if send_error.is_disconnected() { + // Zebra is shutting down + return Err(send_error.into()); + } + } + } + + Ok(()) +} + /// Try to connect to `candidate` using `outbound_connector`. /// Uses `outbound_connection_tracker` to track the active connection count. /// -/// Returns a `HandshakeConnected` action on success, and a -/// `HandshakeFailed` action on error. -#[instrument(skip(outbound_connector, outbound_connection_tracker))] +/// On success, sends peers to `peerset_tx`. +/// On failure, marks the peer as failed in the address book, +/// then re-adds demand to `demand_tx`. +#[instrument(skip( + outbound_connector, + outbound_connection_tracker, + peerset_tx, + address_book, + demand_tx +))] async fn dial( candidate: MetaAddr, mut outbound_connector: C, outbound_connection_tracker: ConnectionTracker, -) -> CrawlerAction + mut peerset_tx: futures::channel::mpsc::Sender, + address_book: Arc>, + mut demand_tx: futures::channel::mpsc::Sender, +) -> Result<(), BoxError> where C: Service< OutboundConnectorRequest, @@ -902,7 +1004,7 @@ where + 'static, C::Future: Send + 'static, { - // CORRECTNESS + // # Correctness // // To avoid hangs, the dialer must only await: // - functions that return immediately, or @@ -911,10 +1013,7 @@ where debug!(?candidate.addr, "attempting outbound connection in response to demand"); // the connector is always ready, so this can't hang - let outbound_connector = outbound_connector - .ready() - .await - .expect("outbound connector never errors"); + let outbound_connector = outbound_connector.ready().await?; let req = OutboundConnectorRequest { addr: candidate.addr, @@ -922,24 +1021,51 @@ where }; // the handshake has timeouts, so it shouldn't hang - outbound_connector - .call(req) - .map_err(|e| (candidate, e)) - .map(Into::into) - .await -} + let handshake_result = outbound_connector.call(req).map(Into::into).await; + + match handshake_result { + Ok((address, client)) => { + debug!(?candidate.addr, "successfully dialed new peer"); -impl From> for CrawlerAction { - fn from(dial_result: Result<(PeerSocketAddr, peer::Client), (MetaAddr, BoxError)>) -> Self { - use CrawlerAction::*; - match dial_result { - Ok((address, client)) => HandshakeConnected { address, client }, - Err((candidate, e)) => { - debug!(?candidate.addr, ?e, "failed to connect to candidate"); - HandshakeFailed { - failed_addr: candidate, + // The connection limit makes sure this send doesn't block. + peerset_tx.send((address, client)).await?; + } + // The connection was never opened, or it failed the handshake and was dropped. + Err(error) => { + debug!(?error, ?candidate.addr, "failed to make outbound connection to peer"); + report_failed(address_book.clone(), candidate).await; + + // The demand signal that was taken out of the queue to attempt to connect to the + // failed candidate never turned into a connection, so add it back. + // + // # Security + // + // Handshake failures are rate-limited by peer attempt timeouts. + if let Err(send_error) = demand_tx.try_send(MorePeers) { + if send_error.is_disconnected() { + // Zebra is shutting down + return Err(send_error.into()); } } } } + + Ok(()) +} + +/// Mark `addr` as a failed peer in `address_book`. +#[instrument(skip(address_book))] +async fn report_failed(address_book: Arc>, addr: MetaAddr) { + let addr = MetaAddr::new_errored(addr.addr, addr.services); + + // # Correctness + // + // Spawn address book accesses on a blocking thread, + // to avoid deadlocks (see #1976). + let span = Span::current(); + tokio::task::spawn_blocking(move || { + span.in_scope(|| address_book.lock().unwrap().update(addr)) + }) + .await + .expect("panic in peer failure address book update task"); } diff --git a/zebra-network/src/peer_set/initialize/tests/vectors.rs b/zebra-network/src/peer_set/initialize/tests/vectors.rs index d9fdf9a1535..f949506cdaf 100644 --- a/zebra-network/src/peer_set/initialize/tests/vectors.rs +++ b/zebra-network/src/peer_set/initialize/tests/vectors.rs @@ -459,15 +459,7 @@ async fn crawler_peer_limit_one_connect_ok_then_drop() { let peer_result = peerset_rx.try_next(); match peer_result { // A peer handshake succeeded. - Ok(Some(peer_result)) => { - assert!( - matches!(peer_result, Ok((_, _))), - "unexpected connection error: {peer_result:?}\n\ - {peer_count} previous peers succeeded", - ); - peer_count += 1; - } - + Ok(Some(_peer_change)) => peer_count += 1, // The channel is closed and there are no messages left in the channel. Ok(None) => break, // The channel is still open, but there are no messages left in the channel. @@ -521,15 +513,7 @@ async fn crawler_peer_limit_one_connect_ok_stay_open() { let peer_change_result = peerset_rx.try_next(); match peer_change_result { // A peer handshake succeeded. - Ok(Some(peer_change_result)) => { - assert!( - matches!(peer_change_result, Ok((_, _))), - "unexpected connection error: {peer_change_result:?}\n\ - {peer_change_count} previous peers succeeded", - ); - peer_change_count += 1; - } - + Ok(Some(_peer_change)) => peer_change_count += 1, // The channel is closed and there are no messages left in the channel. Ok(None) => break, // The channel is still open, but there are no messages left in the channel. @@ -631,15 +615,7 @@ async fn crawler_peer_limit_default_connect_ok_then_drop() { let peer_result = peerset_rx.try_next(); match peer_result { // A peer handshake succeeded. - Ok(Some(peer_result)) => { - assert!( - matches!(peer_result, Ok((_, _))), - "unexpected connection error: {peer_result:?}\n\ - {peer_count} previous peers succeeded", - ); - peer_count += 1; - } - + Ok(Some(_peer_change)) => peer_count += 1, // The channel is closed and there are no messages left in the channel. Ok(None) => break, // The channel is still open, but there are no messages left in the channel. @@ -694,15 +670,7 @@ async fn crawler_peer_limit_default_connect_ok_stay_open() { let peer_change_result = peerset_rx.try_next(); match peer_change_result { // A peer handshake succeeded. - Ok(Some(peer_change_result)) => { - assert!( - matches!(peer_change_result, Ok((_, _))), - "unexpected connection error: {peer_change_result:?}\n\ - {peer_change_count} previous peers succeeded", - ); - peer_change_count += 1; - } - + Ok(Some(_peer_change)) => peer_change_count += 1, // The channel is closed and there are no messages left in the channel. Ok(None) => break, // The channel is still open, but there are no messages left in the channel. @@ -834,15 +802,7 @@ async fn listener_peer_limit_one_handshake_ok_then_drop() { let peer_result = peerset_rx.try_next(); match peer_result { // A peer handshake succeeded. - Ok(Some(peer_result)) => { - assert!( - matches!(peer_result, Ok((_, _))), - "unexpected connection error: {peer_result:?}\n\ - {peer_count} previous peers succeeded", - ); - peer_count += 1; - } - + Ok(Some(_peer_change)) => peer_count += 1, // The channel is closed and there are no messages left in the channel. Ok(None) => break, // The channel is still open, but there are no messages left in the channel. @@ -900,15 +860,7 @@ async fn listener_peer_limit_one_handshake_ok_stay_open() { let peer_change_result = peerset_rx.try_next(); match peer_change_result { // A peer handshake succeeded. - Ok(Some(peer_change_result)) => { - assert!( - matches!(peer_change_result, Ok((_, _))), - "unexpected connection error: {peer_change_result:?}\n\ - {peer_change_count} previous peers succeeded", - ); - peer_change_count += 1; - } - + Ok(Some(_peer_change)) => peer_change_count += 1, // The channel is closed and there are no messages left in the channel. Ok(None) => break, // The channel is still open, but there are no messages left in the channel. @@ -1019,15 +971,7 @@ async fn listener_peer_limit_default_handshake_ok_then_drop() { let peer_result = peerset_rx.try_next(); match peer_result { // A peer handshake succeeded. - Ok(Some(peer_result)) => { - assert!( - matches!(peer_result, Ok((_, _))), - "unexpected connection error: {peer_result:?}\n\ - {peer_count} previous peers succeeded", - ); - peer_count += 1; - } - + Ok(Some(_peer_change)) => peer_count += 1, // The channel is closed and there are no messages left in the channel. Ok(None) => break, // The channel is still open, but there are no messages left in the channel. @@ -1085,15 +1029,7 @@ async fn listener_peer_limit_default_handshake_ok_stay_open() { let peer_change_result = peerset_rx.try_next(); match peer_change_result { // A peer handshake succeeded. - Ok(Some(peer_change_result)) => { - assert!( - matches!(peer_change_result, Ok((_, _))), - "unexpected connection error: {peer_change_result:?}\n\ - {peer_change_count} previous peers succeeded", - ); - peer_change_count += 1; - } - + Ok(Some(_peer_change)) => peer_change_count += 1, // The channel is closed and there are no messages left in the channel. Ok(None) => break, // The channel is still open, but there are no messages left in the channel. @@ -1158,7 +1094,8 @@ async fn add_initial_peers_is_rate_limited() { let elapsed = Instant::now() - before; - assert_eq!(connections.len(), PEER_COUNT); + // Errors are ignored, so we don't expect any peers here + assert_eq!(connections.len(), 0); // Make sure the rate limiting worked by checking if it took long enough assert!( elapsed From 286ebdbf8c9e40891164821852567bc86de30012 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 9 Jun 2023 01:50:38 +0000 Subject: [PATCH 066/265] build(deps): bump serde from 1.0.163 to 1.0.164 (#6875) Bumps [serde](https://github.com/serde-rs/serde) from 1.0.163 to 1.0.164. - [Release notes](https://github.com/serde-rs/serde/releases) - [Commits](https://github.com/serde-rs/serde/compare/v1.0.163...v1.0.164) --- updated-dependencies: - dependency-name: serde dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 8 ++++---- zebra-chain/Cargo.toml | 2 +- zebra-consensus/Cargo.toml | 2 +- zebra-network/Cargo.toml | 2 +- zebra-node-services/Cargo.toml | 4 ++-- zebra-rpc/Cargo.toml | 2 +- zebra-state/Cargo.toml | 2 +- zebrad/Cargo.toml | 2 +- 8 files changed, 12 insertions(+), 12 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3c3f57a2818..87710ff552c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4023,9 +4023,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.163" +version = "1.0.164" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2113ab51b87a539ae008b5c6c02dc020ffa39afd2d83cffcb3f4eb2722cebec2" +checksum = "9e8c8cf938e98f769bc164923b06dce91cea1751522f46f8466461af04c9027d" dependencies = [ "serde_derive", ] @@ -4041,9 +4041,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.163" +version = "1.0.164" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c805777e3930c8883389c602315a24224bcc738b63905ef87cd1420353ea93e" +checksum = "d9735b638ccc51c28bf6914d90a2e9725b377144fc612c49a611fddd1b631d68" dependencies = [ "proc-macro2 1.0.59", "quote 1.0.28", diff --git a/zebra-chain/Cargo.toml b/zebra-chain/Cargo.toml index c3d855a0e07..ec2e1d070b8 100644 --- a/zebra-chain/Cargo.toml +++ b/zebra-chain/Cargo.toml @@ -80,7 +80,7 @@ tracing = "0.1.37" # Serialization hex = { version = "0.4.3", features = ["serde"] } -serde = { version = "1.0.163", features = ["serde_derive", "rc"] } +serde = { version = "1.0.164", features = ["serde_derive", "rc"] } serde_with = "3.0.0" serde-big-array = "0.5.1" diff --git a/zebra-consensus/Cargo.toml b/zebra-consensus/Cargo.toml index 789cbfc4cda..f80c5602f46 100644 --- a/zebra-consensus/Cargo.toml +++ b/zebra-consensus/Cargo.toml @@ -38,7 +38,7 @@ chrono = { version = "0.4.26", default-features = false, features = ["clock", "s displaydoc = "0.2.4" lazy_static = "1.4.0" once_cell = "1.18.0" -serde = { version = "1.0.163", features = ["serde_derive"] } +serde = { version = "1.0.164", features = ["serde_derive"] } futures = "0.3.28" futures-util = "0.3.28" diff --git a/zebra-network/Cargo.toml b/zebra-network/Cargo.toml index 983e70374c1..9c66368e365 100644 --- a/zebra-network/Cargo.toml +++ b/zebra-network/Cargo.toml @@ -38,7 +38,7 @@ pin-project = "1.1.0" rand = { version = "0.8.5", package = "rand" } rayon = "1.7.0" regex = "1.8.4" -serde = { version = "1.0.163", features = ["serde_derive"] } +serde = { version = "1.0.164", features = ["serde_derive"] } tempfile = "3.5.0" thiserror = "1.0.40" diff --git a/zebra-node-services/Cargo.toml b/zebra-node-services/Cargo.toml index 3505d249b0f..2f888bbdaf3 100644 --- a/zebra-node-services/Cargo.toml +++ b/zebra-node-services/Cargo.toml @@ -35,7 +35,7 @@ zebra-chain = { path = "../zebra-chain" } color-eyre = { version = "0.6.2", optional = true } jsonrpc-core = { version = "18.0.0", optional = true } reqwest = { version = "0.11.18", optional = true } -serde = { version = "1.0.163", optional = true } +serde = { version = "1.0.164", optional = true } serde_json = { version = "1.0.95", optional = true } [dev-dependencies] @@ -43,5 +43,5 @@ serde_json = { version = "1.0.95", optional = true } color-eyre = "0.6.2" jsonrpc-core = "18.0.0" reqwest = "0.11.18" -serde = "1.0.163" +serde = "1.0.164" serde_json = "1.0.95" diff --git a/zebra-rpc/Cargo.toml b/zebra-rpc/Cargo.toml index 9f96681a35d..1cca24fcf28 100644 --- a/zebra-rpc/Cargo.toml +++ b/zebra-rpc/Cargo.toml @@ -53,7 +53,7 @@ tower = "0.4.13" tracing = "0.1.37" hex = { version = "0.4.3", features = ["serde"] } -serde = { version = "1.0.163", features = ["serde_derive"] } +serde = { version = "1.0.164", features = ["serde_derive"] } # Experimental feature getblocktemplate-rpcs rand = { version = "0.8.5", package = "rand", optional = true } diff --git a/zebra-state/Cargo.toml b/zebra-state/Cargo.toml index 5550769cace..e0e268f5a52 100644 --- a/zebra-state/Cargo.toml +++ b/zebra-state/Cargo.toml @@ -47,7 +47,7 @@ regex = "1.8.4" rlimit = "0.9.1" rocksdb = { version = "0.21.0", default_features = false, features = ["lz4"] } semver = "1.0.17" -serde = { version = "1.0.163", features = ["serde_derive"] } +serde = { version = "1.0.164", features = ["serde_derive"] } tempfile = "3.5.0" thiserror = "1.0.40" diff --git a/zebrad/Cargo.toml b/zebrad/Cargo.toml index 354cc519f35..239dc1ead34 100644 --- a/zebrad/Cargo.toml +++ b/zebrad/Cargo.toml @@ -121,7 +121,7 @@ chrono = { version = "0.4.26", default-features = false, features = ["clock", "s humantime-serde = "1.1.1" indexmap = "1.9.3" lazy_static = "1.4.0" -serde = { version = "1.0.163", features = ["serde_derive"] } +serde = { version = "1.0.164", features = ["serde_derive"] } toml = "0.7.4" futures = "0.3.28" From a18f47d5f683dbcd6dd0a1e4381621525eb51f41 Mon Sep 17 00:00:00 2001 From: teor Date: Fri, 9 Jun 2023 15:07:34 +1000 Subject: [PATCH 067/265] fix(app): Stop panicking at startup when parsing the app version - release blocker (#6888) * Fix a startup panic in app_version() * Fix a potential RPC panic in get_info() * Fix typo --- zebra-rpc/src/methods.rs | 8 +-- zebrad/src/application.rs | 119 ++++++++++++++++++++++++++++---------- 2 files changed, 91 insertions(+), 36 deletions(-) diff --git a/zebra-rpc/src/methods.rs b/zebra-rpc/src/methods.rs index 4923d0513bd..56e8cae69b8 100644 --- a/zebra-rpc/src/methods.rs +++ b/zebra-rpc/src/methods.rs @@ -374,10 +374,10 @@ where // remove everything after the `+` character if any .split('+') .next() - .expect("always at least 1 slice") - // remove the previously added `v` character at the start since it's not a part of the user agent. - .strip_prefix('v') - .expect("we are always expecting the `v` prefix"); + .expect("always at least 1 slice"); + // Remove the previously added `v` character at the start since it's not a part of the user agent. + let release_version = release_version.strip_prefix('v').unwrap_or(release_version); + let user_agent = format!("/Zebra:{release_version}/"); let response = GetInfo { diff --git a/zebrad/src/application.rs b/zebrad/src/application.rs index 4b3809593ed..6f95d6393db 100644 --- a/zebrad/src/application.rs +++ b/zebrad/src/application.rs @@ -36,45 +36,98 @@ pub static APPLICATION: AppCell = AppCell::new(); /// /// For details, see pub fn app_version() -> Version { + // CARGO_PKG_VERSION is always a valid SemVer 2.0 version. const CARGO_PKG_VERSION: &str = env!("CARGO_PKG_VERSION"); - let vergen_git_describe: Option<&str> = option_env!("VERGEN_GIT_DESCRIBE"); - - match vergen_git_describe { - // change the git describe format to the semver 2.0 format - Some(mut vergen_git_describe) if !vergen_git_describe.is_empty() => { - // strip the leading "v", if present - if &vergen_git_describe[0..1] == "v" { - vergen_git_describe = &vergen_git_describe[1..]; - } - // split into tag, commit count, hash - let rparts: Vec<_> = vergen_git_describe.rsplitn(3, '-').collect(); - - match rparts.as_slice() { - // assume it's a cargo package version or a git tag with no hash - [_] | [_, _] => vergen_git_describe.parse().unwrap_or_else(|_| { - panic!( - "VERGEN_GIT_DESCRIBE without a hash {vergen_git_describe:?} must be valid semver 2.0" - ) - }), - - // it's the "git describe" format, which doesn't quite match SemVer 2.0 - [hash, commit_count, tag] => { - let semver_fix = format!("{tag}+{commit_count}.{hash}"); - semver_fix.parse().unwrap_or_else(|_| - panic!("Modified VERGEN_GIT_DESCRIBE {vergen_git_describe:?} -> {rparts:?} -> {semver_fix:?} must be valid. Note: CARGO_PKG_VERSION was {CARGO_PKG_VERSION:?}.")) - } + // VERGEN_GIT_DESCRIBE should be in the format: + // - v1.0.0-rc.9-6-g319b01bb84 + // - v1.0.0-6-g319b01bb84 + // but sometimes it is just a short commit hash. See #6879 for details. + // + // Currently it is the output of `git describe --tags --dirty --match='v*.*.*'`, + // or whatever is specified in zebrad/build.rs. + const VERGEN_GIT_DESCRIBE: Option<&str> = option_env!("VERGEN_GIT_DESCRIBE"); + + // We're using the same library as cargo uses internally, so this is guaranteed. + let fallback_version = CARGO_PKG_VERSION.parse().unwrap_or_else(|error| { + panic!( + "unexpected invalid CARGO_PKG_VERSION: {error:?} in {CARGO_PKG_VERSION:?}, \ + should have been checked by cargo" + ) + }); + + // The SemVer 2.0 format is: + // - 1.0.0-rc.9+6.g319b01bb84 + // - 1.0.0+6.g319b01bb84 + // + // Or as a pattern: + // - version: major`.`minor`.`patch + // - optional pre-release: `-`tag[`.`tag ...] + // - optional build: `+`tag[`.`tag ...] + // change the git describe format to the semver 2.0 format + let Some(vergen_git_describe) = VERGEN_GIT_DESCRIBE else { + return fallback_version; + }; + + // Split using "git describe" separators. + let mut vergen_git_describe = vergen_git_describe.split('-').peekable(); + + // Check the "version core" part. + let version = vergen_git_describe.next(); + let Some(mut version) = version else { + return fallback_version; + }; + + // strip the leading "v", if present. + version = version.strip_prefix('v').unwrap_or(version); + + // If the initial version is empty, just a commit hash, or otherwise invalid. + if Version::parse(version).is_err() { + return fallback_version; + } - _ => unreachable!("split is limited to 3 parts"), - } - } - _ => CARGO_PKG_VERSION.parse().unwrap_or_else(|_| { - panic!("CARGO_PKG_VERSION {CARGO_PKG_VERSION:?} must be valid semver 2.0") - }), + let mut semver = version.to_string(); + + // Check if the next part is a pre-release or build part, + // but only consume it if it is a pre-release tag. + let Some(part) = vergen_git_describe.peek() else { + // No pre-release or build. + return semver.parse().expect("just checked semver is valid"); + }; + + if part.starts_with(char::is_alphabetic) { + // It's a pre-release tag. + semver.push('-'); + semver.push_str(part); + + // Consume the pre-release tag to move on to the build tags, if any. + let _ = vergen_git_describe.next(); + } + + // Check if the next part is a build part. + let Some(build) = vergen_git_describe.peek() else { + // No build tags. + return semver.parse().unwrap_or(fallback_version); + }; + + if !build.starts_with(char::is_numeric) { + // It's not a valid "commit count" build tag from "git describe". + return fallback_version; } + + // Append the rest of the build parts with the correct `+` and `.` separators. + let build_parts: Vec<_> = vergen_git_describe.collect(); + let build_parts = build_parts.join("."); + + semver.push('+'); + semver.push_str(&build_parts); + + semver.parse().unwrap_or(fallback_version) } /// The Zebra current release version. +// +// TODO: deduplicate this code with release_version in zebra_rpc::get_info() pub fn release_version() -> String { app_version() .to_string() @@ -89,6 +142,8 @@ pub fn release_version() -> String { /// This must be a valid [BIP 14] user agent. /// /// [BIP 14]: https://github.com/bitcoin/bips/blob/master/bip-0014.mediawiki +// +// TODO: deduplicate this code with the user agent in zebra_rpc::get_info() pub fn user_agent() -> String { let release_version = release_version(); format!("/Zebra:{release_version}/") From 89bf87574428408f04d263d71173517b659aa964 Mon Sep 17 00:00:00 2001 From: teor Date: Fri, 9 Jun 2023 17:41:09 +1000 Subject: [PATCH 068/265] fix(deployment): Fix log file path and log colour (#6890) * Fix CD log file path handling * Variables don't substitute * Disable color escapes in Google Cloud logs * Use correct elif syntax --- .github/workflows/continous-delivery.yml | 5 +++-- docker/Dockerfile | 6 ++++++ docker/runtime-entrypoint.sh | 19 +++++++++++++++++-- 3 files changed, 26 insertions(+), 4 deletions(-) diff --git a/.github/workflows/continous-delivery.yml b/.github/workflows/continous-delivery.yml index a8487dc36d7..7a3e2f97e97 100644 --- a/.github/workflows/continous-delivery.yml +++ b/.github/workflows/continous-delivery.yml @@ -18,6 +18,7 @@ on: description: 'Network to deploy: Mainnet or Testnet' required: true log_file: + default: '' description: 'Log to a file path rather than standard output' no_cache: description: 'Disable the Docker cache for this build' @@ -217,7 +218,7 @@ jobs: --user-output-enabled \ --metadata google-logging-enabled=true,google-logging-use-fluentbit=true \ --container-image ${{ vars.GAR_BASE }}/zebrad@${{ needs.build.outputs.image_digest }} \ - --container-env "NETWORK=${{ matrix.network }},LOG_FILE=${{ vars.CD_LOG_FILE }},SENTRY_DSN=${{ vars.SENTRY_DSN }},SHORT_SHA=${{ env.GITHUB_SHA_SHORT }}" \ + --container-env "NETWORK=${{ matrix.network }},LOG_FILE=${{ vars.CD_LOG_FILE }},LOG_COLOR=false,SENTRY_DSN=${{ vars.SENTRY_DSN }},SHORT_SHA=${{ env.GITHUB_SHA_SHORT }}" \ --create-disk=name=zebrad-cache-${{ env.GITHUB_SHA_SHORT }}-${NETWORK},auto-delete=yes,size=300GB,type=pd-ssd \ --container-mount-disk=mount-path="/zebrad-cache",name=zebrad-cache-${{ env.GITHUB_SHA_SHORT }}-${NETWORK} \ --machine-type ${{ vars.GCP_SMALL_MACHINE }} \ @@ -318,7 +319,7 @@ jobs: --container-stdin \ --container-tty \ --container-image ${{ vars.GAR_BASE }}/zebrad@${{ needs.build.outputs.image_digest }} \ - --container-env "NETWORK=${{ inputs.network }},LOG_FILE=${{ inputs.log_file || vars.CD_LOG_FILE }},SENTRY_DSN=${{ vars.SENTRY_DSN }},SHORT_SHA=${{ env.GITHUB_SHA_SHORT }}" \ + --container-env "NETWORK=${{ inputs.network }},LOG_FILE=${{ inputs.log_file }},LOG_COLOR=false,SENTRY_DSN=${{ vars.SENTRY_DSN }},SHORT_SHA=${{ env.GITHUB_SHA_SHORT }}" \ --create-disk=auto-delete=yes,size=300GB,type=pd-ssd \ --create-disk=name=zebrad-cache-${{ env.GITHUB_SHA_SHORT }}-${NETWORK},auto-delete=yes,size=300GB,type=pd-ssd \ --container-mount-disk=mount-path='/zebrad-cache',name=zebrad-cache-${{ env.GITHUB_SHA_SHORT }}-${NETWORK} \ diff --git a/docker/Dockerfile b/docker/Dockerfile index 012fa674ce4..f0f4ae52f3e 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -162,6 +162,12 @@ ENV RPC_PORT ${RPC_PORT} ARG LOG_FILE ENV LOG_FILE ${LOG_FILE} +# Zebra automatically detects if it is attached to a terminal, and uses colored output. +# Set this to 'true' to force using color even if the output is not a terminal. +# Set this to 'false' to disable using color even if the output is a terminal. +ARG LOG_COLOR +ENV LOG_COLOR ${LOG_COLOR} + # Expose configured ports EXPOSE 8233 18233 $RPC_PORT diff --git a/docker/runtime-entrypoint.sh b/docker/runtime-entrypoint.sh index a91f4ea21d7..fbac0849e20 100755 --- a/docker/runtime-entrypoint.sh +++ b/docker/runtime-entrypoint.sh @@ -53,13 +53,28 @@ listen_addr = "0.0.0.0:${RPC_PORT}" EOF fi +if [[ -n "$LOG_FILE" ]] || [[ -n "$LOG_COLOR" ]]; then +cat <> "$ZEBRA_CONF_PATH" +[tracing] +#endpoint_addr = "0.0.0.0:3000" +EOF +fi + if [[ -n "$LOG_FILE" ]]; then mkdir -p $(dirname "$LOG_FILE") cat <> "$ZEBRA_CONF_PATH" -[tracing] log_file = "${LOG_FILE}" -#endpoint_addr = "0.0.0.0:3000" +EOF +fi + +if [[ "$LOG_COLOR" = "true" ]]; then +cat <> "$ZEBRA_CONF_PATH" +force_use_color = true +EOF +elif [[ "$LOG_COLOR" = "false" ]]; then +cat <> "$ZEBRA_CONF_PATH" +use_color = false EOF fi From 954ff2ef420e5c2d74919add1f0f4e861b38a08c Mon Sep 17 00:00:00 2001 From: Deirdre Connolly Date: Fri, 9 Jun 2023 06:03:58 -0400 Subject: [PATCH 069/265] build(deps): Upgrade to ed25519-zebra 4.0.0 (#6881) * build: Upgrade to ed5519-zebra 4.0.0 * skip-tree hashbrown =0.13.2 because ed25519-zebra is 0.14.0 and a dep of metrics-exporter-prometheus is 0.13.2 * Use correct versions in deny.toml * Oops turns out we need both exceptions --------- Co-authored-by: teor --- Cargo.lock | 152 +++++++++++++++++++++++------------------ deny.toml | 6 +- tower-batch/Cargo.toml | 2 +- zebra-chain/Cargo.toml | 2 +- 4 files changed, 91 insertions(+), 71 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 87710ff552c..09d755bb46a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -35,7 +35,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "55bfb86e57d13c06e482c570826ddcddcc8f07fab916760e8911141d4fda8b62" dependencies = [ "ident_case", - "proc-macro2 1.0.59", + "proc-macro2 1.0.60", "quote 1.0.28", "syn 1.0.109", "synstructure", @@ -118,6 +118,12 @@ dependencies = [ "memchr", ] +[[package]] +name = "allocator-api2" +version = "0.2.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c4f263788a35611fba42eb41ff811c5d0360c58b97402570312a350736e2542e" + [[package]] name = "android-tzdata" version = "0.1.1" @@ -251,7 +257,7 @@ version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ - "proc-macro2 1.0.59", + "proc-macro2 1.0.60", "quote 1.0.28", "syn 2.0.18", ] @@ -262,7 +268,7 @@ version = "0.1.68" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b9ccdd8f2a161be9bd5c023df56f1b2a0bd1d83872ae53b71a84a12c9bf6e842" dependencies = [ - "proc-macro2 1.0.59", + "proc-macro2 1.0.60", "quote 1.0.28", "syn 2.0.18", ] @@ -418,7 +424,7 @@ dependencies = [ "log", "peeking_take_while", "prettyplease 0.2.6", - "proc-macro2 1.0.59", + "proc-macro2 1.0.60", "quote 1.0.28", "regex", "rustc-hash", @@ -797,7 +803,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b8cd2b2a819ad6eec39e8f1d6b53001af1e5469f8c177579cdaeb313115b825f" dependencies = [ "heck 0.4.1", - "proc-macro2 1.0.59", + "proc-macro2 1.0.60", "quote 1.0.28", "syn 2.0.18", ] @@ -1036,19 +1042,6 @@ dependencies = [ "typenum", ] -[[package]] -name = "curve25519-dalek" -version = "3.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b9fdf9972b2bd6af2d913799d9ebc165ea4d2e65878e329d9c6b372c4491b61" -dependencies = [ - "byteorder", - "digest 0.9.0", - "rand_core 0.5.1", - "subtle", - "zeroize", -] - [[package]] name = "curve25519-dalek" version = "4.0.0-rc.2" @@ -1056,6 +1049,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "03d928d978dbec61a1167414f5ec534f24bea0d7a0d24dd9b6233d3d8223e585" dependencies = [ "cfg-if 1.0.0", + "digest 0.10.7", "fiat-crypto", "packed_simd_2", "platforms", @@ -1083,7 +1077,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b677bcf759c79656defee3b0374aeff759122d3fc80edb0b77eeb0fd06e8fd20" dependencies = [ "codespan-reporting", - "proc-macro2 1.0.59", + "proc-macro2 1.0.60", "quote 1.0.28", "syn 2.0.18", ] @@ -1100,7 +1094,7 @@ version = "1.0.94" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2345488264226bf682893e25de0769f3360aac9957980ec49361b083ddaa5bc5" dependencies = [ - "proc-macro2 1.0.59", + "proc-macro2 1.0.60", "quote 1.0.28", "syn 2.0.18", ] @@ -1133,7 +1127,7 @@ checksum = "859d65a907b6852c9361e3185c862aae7fafd2887876799fa55f5f99dc40d610" dependencies = [ "fnv", "ident_case", - "proc-macro2 1.0.59", + "proc-macro2 1.0.60", "quote 1.0.28", "strsim 0.10.0", "syn 1.0.109", @@ -1147,7 +1141,7 @@ checksum = "ab8bfa2e259f8ee1ce5e97824a3c55ec4404a0d772ca7fa96bf19f0752a046eb" dependencies = [ "fnv", "ident_case", - "proc-macro2 1.0.59", + "proc-macro2 1.0.60", "quote 1.0.28", "strsim 0.10.0", "syn 2.0.18", @@ -1241,7 +1235,7 @@ version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" dependencies = [ - "proc-macro2 1.0.59", + "proc-macro2 1.0.60", "quote 1.0.28", "syn 2.0.18", ] @@ -1252,18 +1246,29 @@ version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "68b0cf012f1230e43cd00ebb729c6bb58707ecfa8ad08b52ef3a4ccd2697fc30" +[[package]] +name = "ed25519" +version = "2.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5fb04eee5d9d907f29e80ee6b0e78f7e2c82342c63e3580d8c4f69d9d5aad963" +dependencies = [ + "serde", + "signature", +] + [[package]] name = "ed25519-zebra" -version = "3.1.0" +version = "4.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c24f403d068ad0b359e577a77f92392118be3f3c927538f2bb544a5ecd828c6" +checksum = "6af5e1fb700a3c779c7a7ed25c8c0b7f193db101de3773ac46e704bcb882d772" dependencies = [ - "curve25519-dalek 3.2.0", - "hashbrown 0.12.3", + "curve25519-dalek", + "ed25519", + "hashbrown 0.14.0", "hex", "rand_core 0.6.4", "serde", - "sha2 0.9.9", + "sha2 0.10.6", "zeroize", ] @@ -1545,7 +1550,7 @@ version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ - "proc-macro2 1.0.59", + "proc-macro2 1.0.60", "quote 1.0.28", "syn 2.0.18", ] @@ -1734,9 +1739,6 @@ name = "hashbrown" version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" -dependencies = [ - "ahash 0.7.6", -] [[package]] name = "hashbrown" @@ -1747,6 +1749,16 @@ dependencies = [ "ahash 0.8.3", ] +[[package]] +name = "hashbrown" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c6201b9ff9fd90a5a3bac2e56a830d0caa509576f0e503818ee82c181b3437a" +dependencies = [ + "ahash 0.8.3", + "allocator-api2", +] + [[package]] name = "hdrhistogram" version = "7.5.2" @@ -2017,7 +2029,7 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "11d7a9f6330b71fea57921c9b61c47ee6e84f72d394754eff6163ae67e7395eb" dependencies = [ - "proc-macro2 1.0.59", + "proc-macro2 1.0.60", "quote 1.0.28", "syn 1.0.109", ] @@ -2197,7 +2209,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b939a78fa820cdfcb7ee7484466746a7377760970f6f9c6fe19f9edcc8a38d2" dependencies = [ "proc-macro-crate 0.1.5", - "proc-macro2 1.0.59", + "proc-macro2 1.0.60", "quote 1.0.28", "syn 1.0.109", ] @@ -2488,7 +2500,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "731f8ecebd9f3a4aa847dfe75455e4757a45da40a7793d2f0b1f9b6ed18b23f3" dependencies = [ - "proc-macro2 1.0.59", + "proc-macro2 1.0.60", "quote 1.0.28", "syn 1.0.109", ] @@ -2499,7 +2511,7 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ddece26afd34c31585c74a4db0630c376df271c285d682d1e55012197830b6df" dependencies = [ - "proc-macro2 1.0.59", + "proc-macro2 1.0.60", "quote 1.0.28", "syn 2.0.18", ] @@ -2754,7 +2766,7 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ - "proc-macro2 1.0.59", + "proc-macro2 1.0.60", "quote 1.0.28", "syn 2.0.18", ] @@ -2884,7 +2896,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "86b26a931f824dd4eca30b3e43bb4f31cd5f0d3a403c5f5ff27106b805bfde7b" dependencies = [ "proc-macro-crate 1.3.1", - "proc-macro2 1.0.59", + "proc-macro2 1.0.60", "quote 1.0.28", "syn 1.0.109", ] @@ -3013,7 +3025,7 @@ checksum = "6c435bf1076437b851ebc8edc3a18442796b30f1728ffea6262d59bbe28b077e" dependencies = [ "pest", "pest_meta", - "proc-macro2 1.0.59", + "proc-macro2 1.0.60", "quote 1.0.28", "syn 2.0.18", ] @@ -3054,7 +3066,7 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "39407670928234ebc5e6e580247dd567ad73a3578460c5990f9503df207e8f07" dependencies = [ - "proc-macro2 1.0.59", + "proc-macro2 1.0.60", "quote 1.0.28", "syn 2.0.18", ] @@ -3149,7 +3161,7 @@ version = "0.1.25" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c8646e95016a7a6c4adea95bafa8a16baab64b583356217f2c85db4a39d9a86" dependencies = [ - "proc-macro2 1.0.59", + "proc-macro2 1.0.60", "syn 1.0.109", ] @@ -3159,7 +3171,7 @@ version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3b69d39aab54d069e7f2fe8cb970493e7834601ca2d8c65fd7bbd183578080d1" dependencies = [ - "proc-macro2 1.0.59", + "proc-macro2 1.0.60", "syn 2.0.18", ] @@ -3200,7 +3212,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" dependencies = [ "proc-macro-error-attr", - "proc-macro2 1.0.59", + "proc-macro2 1.0.60", "quote 1.0.28", "syn 1.0.109", "version_check", @@ -3212,7 +3224,7 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" dependencies = [ - "proc-macro2 1.0.59", + "proc-macro2 1.0.60", "quote 1.0.28", "version_check", ] @@ -3228,9 +3240,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.59" +version = "1.0.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6aeca18b86b413c660b781aa319e4e2648a3e6f9eadc9b47e9038e6fe9f3451b" +checksum = "dec2b086b7a862cf4de201096214fa870344cf922b2b30c167badb3af3195406" dependencies = [ "unicode-ident", ] @@ -3306,7 +3318,7 @@ checksum = "e5d2d8d10f3c6ded6da8b05b5fb3b8a5082514344d56c9f871412d29b4e075b4" dependencies = [ "anyhow", "itertools", - "proc-macro2 1.0.59", + "proc-macro2 1.0.60", "quote 1.0.28", "syn 1.0.109", ] @@ -3369,7 +3381,7 @@ version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "608c156fd8e97febc07dc9c2e2c80bf74cfc6ef26893eae3daf8bc2bc94a4b7f" dependencies = [ - "proc-macro2 1.0.59", + "proc-macro2 1.0.60", "quote 1.0.28", "syn 1.0.109", ] @@ -3389,7 +3401,7 @@ version = "1.0.28" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b9ab9c7eadfd8df19006f1cf1a4aed13540ed5cbc047010ece5826e10825488" dependencies = [ - "proc-macro2 1.0.59", + "proc-macro2 1.0.60", ] [[package]] @@ -4045,7 +4057,7 @@ version = "1.0.164" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d9735b638ccc51c28bf6914d90a2e9725b377144fc612c49a611fddd1b631d68" dependencies = [ - "proc-macro2 1.0.59", + "proc-macro2 1.0.60", "quote 1.0.28", "syn 2.0.18", ] @@ -4116,7 +4128,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e182d6ec6f05393cc0e5ed1bf81ad6db3a8feedf8ee515ecdd369809bcce8082" dependencies = [ "darling 0.13.4", - "proc-macro2 1.0.59", + "proc-macro2 1.0.60", "quote 1.0.28", "syn 1.0.109", ] @@ -4128,7 +4140,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "edc7d5d3932fb12ce722ee5e64dd38c504efba37567f0c402f6ca728c3b8b070" dependencies = [ "darling 0.20.1", - "proc-macro2 1.0.59", + "proc-macro2 1.0.60", "quote 1.0.28", "syn 2.0.18", ] @@ -4181,6 +4193,12 @@ dependencies = [ "libc", ] +[[package]] +name = "signature" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e1788eed21689f9cf370582dfc467ef36ed9c707f073528ddafa8d83e3b8500" + [[package]] name = "similar" version = "2.2.1" @@ -4235,7 +4253,7 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5bdfb59103e43a0f99a346b57860d50f2138a7008d08acd964e9ac0fef3ae9a5" dependencies = [ - "proc-macro2 1.0.59", + "proc-macro2 1.0.60", "quote 1.0.28", "syn 1.0.109", ] @@ -4298,7 +4316,7 @@ checksum = "dcb5ae327f9cc13b68763b5749770cb9e048a99bd9dfdfa58d0cf05d5f64afe0" dependencies = [ "heck 0.3.3", "proc-macro-error", - "proc-macro2 1.0.59", + "proc-macro2 1.0.60", "quote 1.0.28", "syn 1.0.109", ] @@ -4326,7 +4344,7 @@ version = "1.0.109" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" dependencies = [ - "proc-macro2 1.0.59", + "proc-macro2 1.0.60", "quote 1.0.28", "unicode-ident", ] @@ -4337,7 +4355,7 @@ version = "2.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32d41677bcbe24c20c52e7c70b0d8db04134c5d1066bf98662e2871ad200ea3e" dependencies = [ - "proc-macro2 1.0.59", + "proc-macro2 1.0.60", "quote 1.0.28", "unicode-ident", ] @@ -4354,7 +4372,7 @@ version = "0.12.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f36bdaa60a83aca3921b5259d5400cbf5e90fc51931376a9bd4a0eb79aa7210f" dependencies = [ - "proc-macro2 1.0.59", + "proc-macro2 1.0.60", "quote 1.0.28", "syn 1.0.109", "unicode-xid 0.2.4", @@ -4413,7 +4431,7 @@ version = "1.0.40" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f9456a42c5b0d803c8cd86e73dd7cc9edd429499f37a3550d286d5e86720569f" dependencies = [ - "proc-macro2 1.0.59", + "proc-macro2 1.0.60", "quote 1.0.28", "syn 2.0.18", ] @@ -4430,9 +4448,9 @@ dependencies = [ [[package]] name = "time" -version = "0.3.21" +version = "0.3.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f3403384eaacbca9923fa06940178ac13e4edb725486d70e8e15881d0c836cc" +checksum = "ea9e1b3cf1243ae005d9e74085d4d542f3125458f3a81af210d901dcd7411efd" dependencies = [ "itoa", "serde", @@ -4516,7 +4534,7 @@ version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ - "proc-macro2 1.0.59", + "proc-macro2 1.0.60", "quote 1.0.28", "syn 2.0.18", ] @@ -4672,7 +4690,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a6fdaae4c2c638bb70fe42803a26fbd6fc6ac8c72f5c59f67ecc2a2dcabf4b07" dependencies = [ "prettyplease 0.1.25", - "proc-macro2 1.0.59", + "proc-macro2 1.0.60", "prost-build", "quote 1.0.28", "syn 1.0.109", @@ -4791,7 +4809,7 @@ version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0f57e3ca2a01450b1a921183a9c9cbfda207fd822cef4ccb00a65402cbba7a74" dependencies = [ - "proc-macro2 1.0.59", + "proc-macro2 1.0.60", "quote 1.0.28", "syn 2.0.18", ] @@ -5161,7 +5179,7 @@ dependencies = [ "bumpalo", "log", "once_cell", - "proc-macro2 1.0.59", + "proc-macro2 1.0.60", "quote 1.0.28", "syn 2.0.18", "wasm-bindgen-shared", @@ -5195,7 +5213,7 @@ version = "0.2.86" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e128beba882dd1eb6200e1dc92ae6c5dbaa4311aa7bb211ca035779e5efc39f8" dependencies = [ - "proc-macro2 1.0.59", + "proc-macro2 1.0.60", "quote 1.0.28", "syn 2.0.18", "wasm-bindgen-backend", @@ -5468,7 +5486,7 @@ version = "2.0.0-rc.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fabd6e16dd08033932fc3265ad4510cc2eab24656058a6dcb107ffe274abcc95" dependencies = [ - "curve25519-dalek 4.0.0-rc.2", + "curve25519-dalek", "rand_core 0.6.4", "serde", "zeroize", @@ -5990,7 +6008,7 @@ version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ - "proc-macro2 1.0.59", + "proc-macro2 1.0.60", "quote 1.0.28", "syn 2.0.18", ] diff --git a/deny.toml b/deny.toml index aa2f1a2cf9b..30c7a846928 100644 --- a/deny.toml +++ b/deny.toml @@ -55,8 +55,10 @@ skip-tree = [ # ed25519-zebra/hashbrown: https://github.com/ZcashFoundation/ed25519-zebra/pull/65 { name = "ahash", version = "=0.7.6" }, - # wait for ed25519-zebra to upgrade - { name = "curve25519-dalek", version = "=3.2.0" }, + # wait for indexmap, toml_edit, serde_json, tower to upgrade + { name = "hashbrown", version = "=0.12.3" }, + # wait for metrics-exporter-prometheus to upgrade + { name = "hashbrown", version = "=0.13.2" }, # ECC crates diff --git a/tower-batch/Cargo.toml b/tower-batch/Cargo.toml index cef74a39761..611b30e818a 100644 --- a/tower-batch/Cargo.toml +++ b/tower-batch/Cargo.toml @@ -22,7 +22,7 @@ color-eyre = "0.6.2" # Enable a feature that makes tinyvec compile much faster. tinyvec = { version = "1.6.0", features = ["rustc_1_55"] } -ed25519-zebra = "3.1.0" +ed25519-zebra = "4.0.0" rand = { version = "0.8.5", package = "rand" } tokio = { version = "1.28.2", features = ["full", "tracing", "test-util"] } diff --git a/zebra-chain/Cargo.toml b/zebra-chain/Cargo.toml index ec2e1d070b8..f1e24ad2b45 100644 --- a/zebra-chain/Cargo.toml +++ b/zebra-chain/Cargo.toml @@ -90,7 +90,7 @@ itertools = "0.10.5" rayon = "1.7.0" # ZF deps -ed25519-zebra = "3.1.0" +ed25519-zebra = "4.0.0" redjubjub = "0.7.0" reddsa = "0.5.0" From 0c4e1bd4ed2431d1a2db9cc902dc9037e92e30a1 Mon Sep 17 00:00:00 2001 From: teor Date: Fri, 9 Jun 2023 20:04:21 +1000 Subject: [PATCH 070/265] fix(rust): Declare the correct compatibility with older Rust versions (#6892) * Declare the correct compatibility with older Rust versions * Declare the correct supported Rust version for compiling zebrad * Tweak wording Co-authored-by: Arya --------- Co-authored-by: Arya --- README.md | 7 +++---- zebrad/Cargo.toml | 6 +++--- 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/README.md b/README.md index 893db0a1a61..87fd5092292 100644 --- a/README.md +++ b/README.md @@ -67,11 +67,10 @@ Building Zebra requires [Rust](https://www.rust-lang.org/tools/install), [pkg-config](http://pkgconf.org/), and a C++ compiler. Zebra is tested with the latest `stable` Rust version. Earlier versions are not -supported or tested. Note that Zebra's code currently uses features introduced -in Rust 1.68, or any later stable release. +supported or tested. Any Zebra release can start depending on new features in the +latest stable Rust. -Every few weeks, we release a [new Zebra -version](https://github.com/ZcashFoundation/zebra/releases). +Every few weeks, we release a [new Zebra version](https://github.com/ZcashFoundation/zebra/releases). Below are quick summaries for installing the dependencies on your machine. diff --git a/zebrad/Cargo.toml b/zebrad/Cargo.toml index 239dc1ead34..71d57a0a498 100644 --- a/zebrad/Cargo.toml +++ b/zebrad/Cargo.toml @@ -9,9 +9,9 @@ repository = "https://github.com/ZcashFoundation/zebra" # Settings that impact compilation edition = "2021" -# Zebra is only supported on the latest stable Rust version. Some earlier versions might work. -# Zebra's code uses features introduced in Rust 1.68, or any later stable release. -rust-version = "1.68" +# Zebra is only supported on the latest stable Rust version. See the README for details. +# Any Zebra release can break compatibility with older Rust versions. +rust-version = "1.66" # Settings that impact runtime behaviour From 3390f03be4ff63e9dca5ceef4cf681960b8bcd7d Mon Sep 17 00:00:00 2001 From: teor Date: Sat, 10 Jun 2023 01:30:37 +1000 Subject: [PATCH 071/265] Stop doing another full sync on every main branch push (#6895) --- .github/workflows/continous-delivery.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/continous-delivery.yml b/.github/workflows/continous-delivery.yml index 7a3e2f97e97..1359e50041b 100644 --- a/.github/workflows/continous-delivery.yml +++ b/.github/workflows/continous-delivery.yml @@ -25,10 +25,10 @@ on: required: false type: boolean default: false - - push: - branches: - - main + # Temporarily disabled to reduce network load, see #6894. + #push: + # branches: + # - main release: types: - published From d67b37a7a13e68d92c43bad068cda556d0ac0c5e Mon Sep 17 00:00:00 2001 From: teor Date: Sat, 10 Jun 2023 01:58:38 +1000 Subject: [PATCH 072/265] Fix release-checklist.md (#6883) --- .../release-checklist.md | 19 +++++++------------ 1 file changed, 7 insertions(+), 12 deletions(-) diff --git a/.github/PULL_REQUEST_TEMPLATE/release-checklist.md b/.github/PULL_REQUEST_TEMPLATE/release-checklist.md index 884b56310bc..1d7a1e4f9e5 100644 --- a/.github/PULL_REQUEST_TEMPLATE/release-checklist.md +++ b/.github/PULL_REQUEST_TEMPLATE/release-checklist.md @@ -61,7 +61,7 @@ You can use `fastmod` to interactively find and replace versions. For example, you can do something like: ``` -fastmod --extensions rs,toml,md --fixed-strings '1.0.0-rc.9' '1.0.0' zebrad README.md zebra-network/src/constants.rs book/src/user/docker.md +fastmod --extensions rs,toml,md --fixed-strings '1.0.0' '1.1.0' zebrad README.md zebra-network/src/constants.rs book/src/user/docker.md fastmod --extensions rs,toml,md --fixed-strings '1.0.0-beta.15' '1.0.0-beta.16' zebra-* fastmod --extensions rs,toml,md --fixed-strings '0.2.30' '0.2.31' tower-batch tower-fallback cargo build @@ -94,7 +94,8 @@ Sometimes `dependabot` misses some dependency updates, or we accidentally turned Here's how we make sure we got everything: - [ ] Run `cargo update` on the latest `main` branch, and keep the output - [ ] If needed, update [deny.toml](https://github.com/ZcashFoundation/zebra/blob/main/book/src/dev/continuous-integration.md#fixing-duplicate-dependencies-in-check-denytoml-bans) -- [ ] Open a separate PR with the changes, and add the output of `cargo update` to that PR as a comment +- [ ] Open a separate PR with the changes +- [ ] Add the output of `cargo update` to that PR as a comment ## Change Log @@ -136,8 +137,6 @@ Needed for the end of support feature. Please update the following constants [in - So for example if you think the release will be tagged somewhere in the next 3 days you can add `1152 * 3` to the current tip height and use that value here.
-- [ ] `EOS_PANIC_AFTER` (optional) - Replace if you want the release to be valid for a different numbers of days into the future. The default here is 120 days. - ## Create the Release ### Create the Release PR @@ -148,7 +147,7 @@ and the updated changelog: - [ ] Make sure the PRs with the new checkpoint hashes and missed dependencies are already merged - [ ] Push the version increments, the updated changelog and the release constants into a branch (for example: `bump-v1.0.0` - this needs to be different to the tag name) -- [ ] Create a release PR by adding `&template=release-checklist.md` to the comparing url ([Example](https://github.com/ZcashFoundation/zebra/compare/v1.0.0-rc.0-release?expand=1&template=release-checklist.md)). +- [ ] Create a release PR by adding `&template=release-checklist.md` to the comparing url ([Example](https://github.com/ZcashFoundation/zebra/compare/bump-v1.0.0?expand=1&template=release-checklist.md)). - [ ] Add the list of deleted changelog entries as a comment to make reviewing easier. - [ ] Freeze the [`batched` queue](https://dashboard.mergify.com/github/ZcashFoundation/repo/zebra/queues) using Mergify. - [ ] Mark all the release PRs as `Critical` priority, so they go in the `urgent` Mergify queue. @@ -175,10 +174,9 @@ and the updated changelog: - [ ] Wait until the [pre-release deployment machines have successfully launched](https://github.com/ZcashFoundation/zebra/actions/workflows/continous-delivery.yml) - [ ] [Publish the release to GitHub](https://github.com/ZcashFoundation/zebra/releases) by disabling 'pre-release', then clicking "Set as the latest release" - [ ] Wait until [the Docker images have been published](https://github.com/ZcashFoundation/zebra/actions/workflows/release-binaries.yml) -- [ ] Test the Docker image using `docker run --tty --interactive zfnd/zebra`, +- [ ] Test the Docker image using `docker run --tty --interactive zfnd/zebra:v1.0.0`, and put the output in a comment on the PR. - (You can use [gcloud cloud shell](https://console.cloud.google.com/home/dashboard?cloudshell=true)) - + (You can use [gcloud cloud shell](https://console.cloud.google.com/home/dashboard?cloudshell=true)) - [ ] Un-freeze the [`batched` queue](https://dashboard.mergify.com/github/ZcashFoundation/repo/zebra/queues) using Mergify. @@ -186,9 +184,6 @@ and the updated changelog: - [ ] Post a summary of the important changes in the release in the `#arborist` and `#communications` Slack channels -If the release contains new features (`major` or `minor`), or high-priority bug fixes: -- [ ] Ask the team about doing a blog post - ## Release Failures If building or running fails after tagging: @@ -196,7 +191,7 @@ If building or running fails after tagging:
1. Fix the bug that caused the failure -2. Increment versions again, following these instructions from the start +2. Increment the patch version again, following these instructions from the start 3. Update the code and documentation with a **new** git tag 4. Update `CHANGELOG.md` with details about the fix 5. Tag a **new** release From 17d36ffc7a4efa521f76771db22824aba2f3d475 Mon Sep 17 00:00:00 2001 From: Gustavo Valverde Date: Fri, 9 Jun 2023 15:16:39 -0400 Subject: [PATCH 073/265] fix(deploy): allow the container to raise in MIGs (#6893) * fix(deploy): allow the container to raise in MIGs * fix(docker): add the `ZEBRA_CACHED_STATE_DIR` as a default `ENV` This no longer requires the env variable to be defined in other places, unless we're changing the default configuration --- .github/workflows/continous-delivery.yml | 16 +++++++++------- docker/Dockerfile | 4 ++++ docker/runtime-entrypoint.sh | 2 +- 3 files changed, 14 insertions(+), 8 deletions(-) diff --git a/.github/workflows/continous-delivery.yml b/.github/workflows/continous-delivery.yml index 1359e50041b..e33c76b9f31 100644 --- a/.github/workflows/continous-delivery.yml +++ b/.github/workflows/continous-delivery.yml @@ -212,15 +212,18 @@ jobs: - name: Create instance template for ${{ matrix.network }} run: | gcloud compute instance-templates create-with-container zebrad-${{ needs.versioning.outputs.major_version || env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }}-${NETWORK} \ + --boot-disk-size 300GB \ --boot-disk-type=pd-ssd \ --image-project=cos-cloud \ --image-family=cos-stable \ --user-output-enabled \ - --metadata google-logging-enabled=true,google-logging-use-fluentbit=true \ + --metadata google-logging-enabled=true,google-logging-use-fluentbit=true,google-monitoring-enabled=true \ + --container-stdin \ + --container-tty \ --container-image ${{ vars.GAR_BASE }}/zebrad@${{ needs.build.outputs.image_digest }} \ --container-env "NETWORK=${{ matrix.network }},LOG_FILE=${{ vars.CD_LOG_FILE }},LOG_COLOR=false,SENTRY_DSN=${{ vars.SENTRY_DSN }},SHORT_SHA=${{ env.GITHUB_SHA_SHORT }}" \ - --create-disk=name=zebrad-cache-${{ env.GITHUB_SHA_SHORT }}-${NETWORK},auto-delete=yes,size=300GB,type=pd-ssd \ - --container-mount-disk=mount-path="/zebrad-cache",name=zebrad-cache-${{ env.GITHUB_SHA_SHORT }}-${NETWORK} \ + --create-disk=name=zebrad-cache-${{ env.GITHUB_SHA_SHORT }}-${NETWORK},device-name=zebrad-cache-${{ env.GITHUB_SHA_SHORT }}-${NETWORK},auto-delete=yes,size=300GB,type=pd-ssd,mode=rw \ + --container-mount-disk=mount-path='/var/cache/zebrad-cache',name=zebrad-cache-${{ env.GITHUB_SHA_SHORT }}-${NETWORK},mode=rw \ --machine-type ${{ vars.GCP_SMALL_MACHINE }} \ --network-interface=subnet=${{ vars.GCP_SUBNETWORK }} \ --service-account ${{ vars.GCP_DEPLOYMENTS_SA }} \ @@ -315,14 +318,13 @@ jobs: --image-project=cos-cloud \ --image-family=cos-stable \ --user-output-enabled \ - --metadata google-logging-enabled=true,google-logging-use-fluentbit=true \ + --metadata google-logging-enabled=true,google-logging-use-fluentbit=true,google-monitoring-enabled=true \ --container-stdin \ --container-tty \ --container-image ${{ vars.GAR_BASE }}/zebrad@${{ needs.build.outputs.image_digest }} \ --container-env "NETWORK=${{ inputs.network }},LOG_FILE=${{ inputs.log_file }},LOG_COLOR=false,SENTRY_DSN=${{ vars.SENTRY_DSN }},SHORT_SHA=${{ env.GITHUB_SHA_SHORT }}" \ - --create-disk=auto-delete=yes,size=300GB,type=pd-ssd \ - --create-disk=name=zebrad-cache-${{ env.GITHUB_SHA_SHORT }}-${NETWORK},auto-delete=yes,size=300GB,type=pd-ssd \ - --container-mount-disk=mount-path='/zebrad-cache',name=zebrad-cache-${{ env.GITHUB_SHA_SHORT }}-${NETWORK} \ + --create-disk=name=zebrad-cache-${{ env.GITHUB_SHA_SHORT }}-${NETWORK},device-name=zebrad-cache-${{ env.GITHUB_SHA_SHORT }}-${NETWORK},auto-delete=yes,size=300GB,type=pd-ssd,mode=rw \ + --container-mount-disk=mount-path='/var/cache/zebrad-cache',name=zebrad-cache-${{ env.GITHUB_SHA_SHORT }}-${NETWORK},mode=rw \ --machine-type ${{ vars.GCP_SMALL_MACHINE }} \ --network-interface=subnet=${{ vars.GCP_SUBNETWORK }} \ --service-account ${{ vars.GCP_DEPLOYMENTS_SA }} \ diff --git a/docker/Dockerfile b/docker/Dockerfile index f0f4ae52f3e..36c6751b417 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -162,6 +162,10 @@ ENV RPC_PORT ${RPC_PORT} ARG LOG_FILE ENV LOG_FILE ${LOG_FILE} +# Set this to change the default cached state directory +ARG ZEBRA_CACHED_STATE_DIR +ENV ZEBRA_CACHED_STATE_DIR ${ZEBRA_CACHED_STATE_DIR:-/var/cache/zebrad-cache} + # Zebra automatically detects if it is attached to a terminal, and uses colored output. # Set this to 'true' to force using color even if the output is not a terminal. # Set this to 'false' to disable using color even if the output is a terminal. diff --git a/docker/runtime-entrypoint.sh b/docker/runtime-entrypoint.sh index fbac0849e20..98b1c9447d6 100755 --- a/docker/runtime-entrypoint.sh +++ b/docker/runtime-entrypoint.sh @@ -40,7 +40,7 @@ network = "$NETWORK" listen_addr = "0.0.0.0" [state] -cache_dir = "/zebrad-cache" +cache_dir = "$ZEBRA_CACHED_STATE_DIR" [metrics] #endpoint_addr = "0.0.0.0:9999" From 492132d68447f9ff6c4e8156d11bf3330a240649 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 9 Jun 2023 21:19:06 +0000 Subject: [PATCH 074/265] build(deps): bump clap from 4.3.2 to 4.3.3 (#6904) Bumps [clap](https://github.com/clap-rs/clap) from 4.3.2 to 4.3.3. - [Release notes](https://github.com/clap-rs/clap/releases) - [Changelog](https://github.com/clap-rs/clap/blob/master/CHANGELOG.md) - [Commits](https://github.com/clap-rs/clap/compare/v4.3.2...v4.3.3) --- updated-dependencies: - dependency-name: clap dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 14 +++++++------- zebrad/Cargo.toml | 2 +- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 09d755bb46a..319c15bee1e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -12,7 +12,7 @@ dependencies = [ "arc-swap", "backtrace", "canonical-path", - "clap 4.3.2", + "clap 4.3.3", "color-eyre", "fs-err", "once_cell", @@ -773,9 +773,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.3.2" +version = "4.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "401a4694d2bf92537b6867d94de48c4842089645fdcdf6c71865b175d836e9c2" +checksum = "ca8f255e4b8027970e78db75e78831229c9815fdbfa67eb1a1b777a62e24b4a0" dependencies = [ "clap_builder", "clap_derive", @@ -784,9 +784,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.3.1" +version = "4.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72394f3339a76daf211e57d4bcb374410f3965dcc606dd0e03738c7888766980" +checksum = "acd4f3c17c83b0ba34ffbc4f8bbd74f079413f747f84a6f89292f138057e36ab" dependencies = [ "anstream", "anstyle", @@ -956,7 +956,7 @@ dependencies = [ "anes", "cast", "ciborium", - "clap 4.3.2", + "clap 4.3.3", "criterion-plot", "is-terminal", "itertools", @@ -5935,7 +5935,7 @@ dependencies = [ "abscissa_core", "atty", "chrono", - "clap 4.3.2", + "clap 4.3.3", "color-eyre", "console-subscriber", "dirs", diff --git a/zebrad/Cargo.toml b/zebrad/Cargo.toml index 71d57a0a498..3bd556669a3 100644 --- a/zebrad/Cargo.toml +++ b/zebrad/Cargo.toml @@ -116,7 +116,7 @@ zebra-rpc = { path = "../zebra-rpc" } zebra-state = { path = "../zebra-state" } abscissa_core = "0.7.0" -clap = { version = "4.3.0", features = ["cargo"] } +clap = { version = "4.3.3", features = ["cargo"] } chrono = { version = "0.4.26", default-features = false, features = ["clock", "std"] } humantime-serde = "1.1.1" indexmap = "1.9.3" From 6549357dbcf8ca5445a1e6038a1dadaf0395bc9c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 10 Jun 2023 00:59:58 +0000 Subject: [PATCH 075/265] build(deps): bump actions/checkout from 3.5.2 to 3.5.3 (#6901) Bumps [actions/checkout](https://github.com/actions/checkout) from 3.5.2 to 3.5.3. - [Release notes](https://github.com/actions/checkout/releases) - [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md) - [Commits](https://github.com/actions/checkout/compare/v3.5.2...v3.5.3) --- updated-dependencies: - dependency-name: actions/checkout dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .../build-crates-individually.patch.yml | 2 +- .../workflows/build-crates-individually.yml | 4 +- .github/workflows/build-docker-image.yml | 2 +- .github/workflows/continous-delivery.yml | 4 +- .../workflows/continous-integration-os.yml | 10 ++--- .github/workflows/coverage.yml | 2 +- .github/workflows/delete-gcp-resources.yml | 4 +- .github/workflows/deploy-gcp-tests.yml | 42 +++++++++---------- .github/workflows/dockerhub-description.yml | 2 +- .github/workflows/docs.yml | 2 +- .github/workflows/find-cached-disks.yml | 2 +- .github/workflows/lint.yml | 12 +++--- .github/workflows/zcash-lightwalletd.yml | 4 +- .github/workflows/zcashd-manual-deploy.yml | 2 +- 14 files changed, 47 insertions(+), 47 deletions(-) diff --git a/.github/workflows/build-crates-individually.patch.yml b/.github/workflows/build-crates-individually.patch.yml index ef5b6f01c8f..ad957e8521e 100644 --- a/.github/workflows/build-crates-individually.patch.yml +++ b/.github/workflows/build-crates-individually.patch.yml @@ -23,7 +23,7 @@ jobs: outputs: matrix: ${{ steps.set-matrix.outputs.matrix }} steps: - - uses: actions/checkout@v3.5.2 + - uses: actions/checkout@v3.5.3 # Setup Rust with stable toolchain and minimal profile - name: Setup Rust diff --git a/.github/workflows/build-crates-individually.yml b/.github/workflows/build-crates-individually.yml index 1546a361c65..49db2a55866 100644 --- a/.github/workflows/build-crates-individually.yml +++ b/.github/workflows/build-crates-individually.yml @@ -50,7 +50,7 @@ jobs: outputs: matrix: ${{ steps.set-matrix.outputs.matrix }} steps: - - uses: actions/checkout@v3.5.2 + - uses: actions/checkout@v3.5.3 - uses: r7kamura/rust-problem-matchers@v1.3.0 # Setup Rust with stable toolchain and minimal profile @@ -106,7 +106,7 @@ jobs: matrix: ${{ fromJson(needs.matrix.outputs.matrix) }} steps: - - uses: actions/checkout@v3.5.2 + - uses: actions/checkout@v3.5.3 with: persist-credentials: false - uses: r7kamura/rust-problem-matchers@v1.3.0 diff --git a/.github/workflows/build-docker-image.yml b/.github/workflows/build-docker-image.yml index bcb8dc8ade7..d62fcc05e75 100644 --- a/.github/workflows/build-docker-image.yml +++ b/.github/workflows/build-docker-image.yml @@ -73,7 +73,7 @@ jobs: contents: 'read' id-token: 'write' steps: - - uses: actions/checkout@v3.5.2 + - uses: actions/checkout@v3.5.3 with: persist-credentials: false - uses: r7kamura/rust-problem-matchers@v1.3.0 diff --git a/.github/workflows/continous-delivery.yml b/.github/workflows/continous-delivery.yml index e33c76b9f31..15c8c08a615 100644 --- a/.github/workflows/continous-delivery.yml +++ b/.github/workflows/continous-delivery.yml @@ -175,7 +175,7 @@ jobs: if: ${{ !cancelled() && !failure() && ((github.event_name == 'push' && github.ref_name == 'main') || github.event_name == 'release') }} steps: - - uses: actions/checkout@v3.5.2 + - uses: actions/checkout@v3.5.3 with: persist-credentials: false @@ -277,7 +277,7 @@ jobs: if: github.event_name == 'workflow_dispatch' steps: - - uses: actions/checkout@v3.5.2 + - uses: actions/checkout@v3.5.3 with: persist-credentials: false diff --git a/.github/workflows/continous-integration-os.yml b/.github/workflows/continous-integration-os.yml index 8b0c04c45f3..dcd6fabeb42 100644 --- a/.github/workflows/continous-integration-os.yml +++ b/.github/workflows/continous-integration-os.yml @@ -89,7 +89,7 @@ jobs: features: " --features getblocktemplate-rpcs" steps: - - uses: actions/checkout@v3.5.2 + - uses: actions/checkout@v3.5.3 with: persist-credentials: false - uses: r7kamura/rust-problem-matchers@v1.3.0 @@ -203,7 +203,7 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3.5.2 + - uses: actions/checkout@v3.5.3 with: persist-credentials: false - uses: r7kamura/rust-problem-matchers@v1.3.0 @@ -225,7 +225,7 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3.5.2 + - uses: actions/checkout@v3.5.3 with: persist-credentials: false - uses: r7kamura/rust-problem-matchers@v1.3.0 @@ -269,7 +269,7 @@ jobs: continue-on-error: ${{ matrix.checks == 'advisories' }} steps: - - uses: actions/checkout@v3.5.2 + - uses: actions/checkout@v3.5.3 with: persist-credentials: false - uses: r7kamura/rust-problem-matchers@v1.3.0 @@ -288,7 +288,7 @@ jobs: steps: - name: Checkout git repository - uses: actions/checkout@v3.5.2 + uses: actions/checkout@v3.5.3 with: persist-credentials: false - uses: r7kamura/rust-problem-matchers@v1.3.0 diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index 0eba61e0868..5eecd3a6dd3 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -57,7 +57,7 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3.5.2 + - uses: actions/checkout@v3.5.3 with: persist-credentials: false diff --git a/.github/workflows/delete-gcp-resources.yml b/.github/workflows/delete-gcp-resources.yml index 958fe88dda2..b92e2f94d20 100644 --- a/.github/workflows/delete-gcp-resources.yml +++ b/.github/workflows/delete-gcp-resources.yml @@ -30,7 +30,7 @@ jobs: contents: 'read' id-token: 'write' steps: - - uses: actions/checkout@v3.5.2 + - uses: actions/checkout@v3.5.3 with: persist-credentials: false @@ -239,7 +239,7 @@ jobs: contents: 'read' id-token: 'write' steps: - - uses: actions/checkout@v3.5.2 + - uses: actions/checkout@v3.5.3 with: persist-credentials: false diff --git a/.github/workflows/deploy-gcp-tests.yml b/.github/workflows/deploy-gcp-tests.yml index 6aaf754c34e..263f7e1e85b 100644 --- a/.github/workflows/deploy-gcp-tests.yml +++ b/.github/workflows/deploy-gcp-tests.yml @@ -109,7 +109,7 @@ jobs: contents: 'read' id-token: 'write' steps: - - uses: actions/checkout@v3.5.2 + - uses: actions/checkout@v3.5.3 with: persist-credentials: false fetch-depth: '2' @@ -204,7 +204,7 @@ jobs: contents: 'read' id-token: 'write' steps: - - uses: actions/checkout@v3.5.2 + - uses: actions/checkout@v3.5.3 with: persist-credentials: false fetch-depth: '2' @@ -270,7 +270,7 @@ jobs: contents: 'read' id-token: 'write' steps: - - uses: actions/checkout@v3.5.2 + - uses: actions/checkout@v3.5.3 with: persist-credentials: false fetch-depth: '2' @@ -448,7 +448,7 @@ jobs: contents: 'read' id-token: 'write' steps: - - uses: actions/checkout@v3.5.2 + - uses: actions/checkout@v3.5.3 with: persist-credentials: false fetch-depth: '2' @@ -584,7 +584,7 @@ jobs: contents: 'read' id-token: 'write' steps: - - uses: actions/checkout@v3.5.2 + - uses: actions/checkout@v3.5.3 with: persist-credentials: false fetch-depth: '2' @@ -656,7 +656,7 @@ jobs: contents: 'read' id-token: 'write' steps: - - uses: actions/checkout@v3.5.2 + - uses: actions/checkout@v3.5.3 with: persist-credentials: false fetch-depth: '2' @@ -723,7 +723,7 @@ jobs: contents: 'read' id-token: 'write' steps: - - uses: actions/checkout@v3.5.2 + - uses: actions/checkout@v3.5.3 with: persist-credentials: false fetch-depth: '2' @@ -792,7 +792,7 @@ jobs: contents: 'read' id-token: 'write' steps: - - uses: actions/checkout@v3.5.2 + - uses: actions/checkout@v3.5.3 with: persist-credentials: false fetch-depth: '2' @@ -861,7 +861,7 @@ jobs: contents: 'read' id-token: 'write' steps: - - uses: actions/checkout@v3.5.2 + - uses: actions/checkout@v3.5.3 with: persist-credentials: false fetch-depth: '2' @@ -931,7 +931,7 @@ jobs: contents: 'read' id-token: 'write' steps: - - uses: actions/checkout@v3.5.2 + - uses: actions/checkout@v3.5.3 with: persist-credentials: false fetch-depth: '2' @@ -1000,7 +1000,7 @@ jobs: contents: 'read' id-token: 'write' steps: - - uses: actions/checkout@v3.5.2 + - uses: actions/checkout@v3.5.3 with: persist-credentials: false fetch-depth: '2' @@ -1069,7 +1069,7 @@ jobs: contents: 'read' id-token: 'write' steps: - - uses: actions/checkout@v3.5.2 + - uses: actions/checkout@v3.5.3 with: persist-credentials: false fetch-depth: '2' @@ -1138,7 +1138,7 @@ jobs: contents: 'read' id-token: 'write' steps: - - uses: actions/checkout@v3.5.2 + - uses: actions/checkout@v3.5.3 with: persist-credentials: false fetch-depth: '2' @@ -1207,7 +1207,7 @@ jobs: contents: 'read' id-token: 'write' steps: - - uses: actions/checkout@v3.5.2 + - uses: actions/checkout@v3.5.3 with: persist-credentials: false fetch-depth: '2' @@ -1275,7 +1275,7 @@ jobs: contents: 'read' id-token: 'write' steps: - - uses: actions/checkout@v3.5.2 + - uses: actions/checkout@v3.5.3 with: persist-credentials: false fetch-depth: '2' @@ -1343,7 +1343,7 @@ jobs: contents: 'read' id-token: 'write' steps: - - uses: actions/checkout@v3.5.2 + - uses: actions/checkout@v3.5.3 with: persist-credentials: false fetch-depth: '2' @@ -1412,7 +1412,7 @@ jobs: contents: 'read' id-token: 'write' steps: - - uses: actions/checkout@v3.5.2 + - uses: actions/checkout@v3.5.3 with: persist-credentials: false fetch-depth: '2' @@ -1478,7 +1478,7 @@ jobs: contents: 'read' id-token: 'write' steps: - - uses: actions/checkout@v3.5.2 + - uses: actions/checkout@v3.5.3 with: persist-credentials: false fetch-depth: '2' @@ -1553,7 +1553,7 @@ jobs: contents: 'read' id-token: 'write' steps: - - uses: actions/checkout@v3.5.2 + - uses: actions/checkout@v3.5.3 with: persist-credentials: false fetch-depth: '2' @@ -1635,7 +1635,7 @@ jobs: contents: 'read' id-token: 'write' steps: - - uses: actions/checkout@v3.5.2 + - uses: actions/checkout@v3.5.3 with: persist-credentials: false fetch-depth: '2' @@ -1839,7 +1839,7 @@ jobs: contents: 'read' id-token: 'write' steps: - - uses: actions/checkout@v3.5.2 + - uses: actions/checkout@v3.5.3 with: persist-credentials: false fetch-depth: '2' diff --git a/.github/workflows/dockerhub-description.yml b/.github/workflows/dockerhub-description.yml index fc0a58debb7..958a23bc8e8 100644 --- a/.github/workflows/dockerhub-description.yml +++ b/.github/workflows/dockerhub-description.yml @@ -17,7 +17,7 @@ jobs: dockerHubDescription: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3.5.2 + - uses: actions/checkout@v3.5.3 with: persist-credentials: false diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index acf07c8a2ba..9c8d835f822 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -40,7 +40,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout the source code - uses: actions/checkout@v3.5.2 + uses: actions/checkout@v3.5.3 with: persist-credentials: false diff --git a/.github/workflows/find-cached-disks.yml b/.github/workflows/find-cached-disks.yml index d92d176726e..7c07259ac82 100644 --- a/.github/workflows/find-cached-disks.yml +++ b/.github/workflows/find-cached-disks.yml @@ -30,7 +30,7 @@ jobs: contents: 'read' id-token: 'write' steps: - - uses: actions/checkout@v3.5.2 + - uses: actions/checkout@v3.5.3 with: persist-credentials: false fetch-depth: 0 diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 656c926374b..ac3be34d7c3 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -30,7 +30,7 @@ jobs: rust: ${{ steps.changed-files-rust.outputs.any_changed == 'true' }} workflows: ${{ steps.changed-files-workflows.outputs.any_changed == 'true' }} steps: - - uses: actions/checkout@v3.5.2 + - uses: actions/checkout@v3.5.3 with: persist-credentials: false fetch-depth: 0 @@ -62,7 +62,7 @@ jobs: if: ${{ needs.changed-files.outputs.rust == 'true' }} steps: - - uses: actions/checkout@v3.5.2 + - uses: actions/checkout@v3.5.3 with: persist-credentials: false @@ -112,7 +112,7 @@ jobs: if: ${{ needs.changed-files.outputs.rust == 'true' }} steps: - - uses: actions/checkout@v3.5.2 + - uses: actions/checkout@v3.5.3 with: persist-credentials: false - uses: r7kamura/rust-problem-matchers@v1.3.0 @@ -151,7 +151,7 @@ jobs: RUSTDOCFLAGS: -D warnings -A rustdoc::private_intra_doc_links steps: - - uses: actions/checkout@v3.5.2 + - uses: actions/checkout@v3.5.3 with: persist-credentials: false - uses: r7kamura/rust-problem-matchers@v1.3.0 @@ -177,7 +177,7 @@ jobs: needs: changed-files if: ${{ needs.changed-files.outputs.workflows == 'true' }} steps: - - uses: actions/checkout@v3.5.2 + - uses: actions/checkout@v3.5.3 - uses: reviewdog/action-actionlint@v1.37.0 with: level: warning @@ -187,7 +187,7 @@ jobs: runs-on: ubuntu-latest needs: changed-files steps: - - uses: actions/checkout@v3.5.2 + - uses: actions/checkout@v3.5.3 - uses: plettich/action-codespell@master with: github_token: ${{ secrets.github_token }} diff --git a/.github/workflows/zcash-lightwalletd.yml b/.github/workflows/zcash-lightwalletd.yml index d5074cf7f46..d85bcb24f2d 100644 --- a/.github/workflows/zcash-lightwalletd.yml +++ b/.github/workflows/zcash-lightwalletd.yml @@ -56,13 +56,13 @@ jobs: id-token: 'write' steps: - - uses: actions/checkout@v3.5.2 + - uses: actions/checkout@v3.5.3 with: repository: adityapk00/lightwalletd ref: 'master' persist-credentials: false - - uses: actions/checkout@v3.5.2 + - uses: actions/checkout@v3.5.3 with: path: zebra persist-credentials: false diff --git a/.github/workflows/zcashd-manual-deploy.yml b/.github/workflows/zcashd-manual-deploy.yml index e2d6eb004ee..142708504d1 100644 --- a/.github/workflows/zcashd-manual-deploy.yml +++ b/.github/workflows/zcashd-manual-deploy.yml @@ -22,7 +22,7 @@ jobs: id-token: 'write' steps: - - uses: actions/checkout@v3.5.2 + - uses: actions/checkout@v3.5.3 with: persist-credentials: false From 2e379811c3a8f4dd74c8e0e735738dcbe52d4288 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 10 Jun 2023 01:00:21 +0000 Subject: [PATCH 076/265] build(deps): bump docker/build-push-action from 4.0.0 to 4.1.0 (#6902) Bumps [docker/build-push-action](https://github.com/docker/build-push-action) from 4.0.0 to 4.1.0. - [Release notes](https://github.com/docker/build-push-action/releases) - [Commits](https://github.com/docker/build-push-action/compare/v4.0.0...v4.1.0) --- updated-dependencies: - dependency-name: docker/build-push-action dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/build-docker-image.yml | 2 +- .github/workflows/zcash-lightwalletd.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build-docker-image.yml b/.github/workflows/build-docker-image.yml index d62fcc05e75..8eeed4214aa 100644 --- a/.github/workflows/build-docker-image.yml +++ b/.github/workflows/build-docker-image.yml @@ -146,7 +146,7 @@ jobs: # Build and push image to Google Artifact Registry, and possibly DockerHub - name: Build & push id: docker_build - uses: docker/build-push-action@v4.0.0 + uses: docker/build-push-action@v4.1.0 with: target: ${{ inputs.dockerfile_target }} context: . diff --git a/.github/workflows/zcash-lightwalletd.yml b/.github/workflows/zcash-lightwalletd.yml index d85bcb24f2d..50adc90dc32 100644 --- a/.github/workflows/zcash-lightwalletd.yml +++ b/.github/workflows/zcash-lightwalletd.yml @@ -130,7 +130,7 @@ jobs: # Build and push image to Google Artifact Registry - name: Build & push id: docker_build - uses: docker/build-push-action@v4.0.0 + uses: docker/build-push-action@v4.1.0 with: target: build context: . From 8bf2c4be3f05401a670684e66b10be3af5286ad6 Mon Sep 17 00:00:00 2001 From: Deirdre Connolly Date: Sun, 11 Jun 2023 23:59:10 -0400 Subject: [PATCH 077/265] change: Rename tower-batch to tower-batch-control (#6907) --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- book/src/dev/overview.md | 2 +- book/src/dev/rfcs/0011-async-rust-in-zebra.md | 4 ++-- {tower-batch => tower-batch-control}/Cargo.toml | 2 +- {tower-batch => tower-batch-control}/src/error.rs | 0 {tower-batch => tower-batch-control}/src/future.rs | 0 {tower-batch => tower-batch-control}/src/layer.rs | 0 {tower-batch => tower-batch-control}/src/lib.rs | 0 {tower-batch => tower-batch-control}/src/message.rs | 0 {tower-batch => tower-batch-control}/src/service.rs | 0 {tower-batch => tower-batch-control}/src/worker.rs | 0 {tower-batch => tower-batch-control}/tests/ed25519.rs | 2 +- {tower-batch => tower-batch-control}/tests/worker.rs | 2 +- zebra-consensus/Cargo.toml | 2 +- zebra-consensus/src/primitives/ed25519.rs | 2 +- zebra-consensus/src/primitives/ed25519/tests.rs | 2 +- zebra-consensus/src/primitives/groth16.rs | 2 +- zebra-consensus/src/primitives/halo2.rs | 2 +- zebra-consensus/src/primitives/redjubjub.rs | 2 +- zebra-consensus/src/primitives/redjubjub/tests.rs | 2 +- zebra-consensus/src/primitives/redpallas.rs | 2 +- zebra-consensus/src/primitives/redpallas/tests.rs | 2 +- 23 files changed, 18 insertions(+), 18 deletions(-) rename {tower-batch => tower-batch-control}/Cargo.toml (97%) rename {tower-batch => tower-batch-control}/src/error.rs (100%) rename {tower-batch => tower-batch-control}/src/future.rs (100%) rename {tower-batch => tower-batch-control}/src/layer.rs (100%) rename {tower-batch => tower-batch-control}/src/lib.rs (100%) rename {tower-batch => tower-batch-control}/src/message.rs (100%) rename {tower-batch => tower-batch-control}/src/service.rs (100%) rename {tower-batch => tower-batch-control}/src/worker.rs (100%) rename {tower-batch => tower-batch-control}/tests/ed25519.rs (99%) rename {tower-batch => tower-batch-control}/tests/worker.rs (98%) diff --git a/Cargo.lock b/Cargo.lock index 319c15bee1e..7be30ebc81c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4718,7 +4718,7 @@ dependencies = [ ] [[package]] -name = "tower-batch" +name = "tower-batch-control" version = "0.2.40" dependencies = [ "color-eyre", @@ -5728,7 +5728,7 @@ dependencies = [ "tinyvec", "tokio", "tower", - "tower-batch", + "tower-batch-control", "tower-fallback", "tracing", "tracing-error", diff --git a/Cargo.toml b/Cargo.toml index d77eb87b45a..e6ffaefb9db 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -10,7 +10,7 @@ members = [ "zebra-node-services", "zebra-test", "zebra-utils", - "tower-batch", + "tower-batch-control", "tower-fallback", ] diff --git a/book/src/dev/overview.md b/book/src/dev/overview.md index b6508acfb88..f5e5f82afdc 100644 --- a/book/src/dev/overview.md +++ b/book/src/dev/overview.md @@ -123,7 +123,7 @@ into several components: of blocks and transactions: all consensus rules that can be checked independently of the chain state, such as verification of signatures, proofs, and scripts. Internally, the library - uses [`tower-batch`](https://doc.zebra.zfnd.org/tower_batch/index.html) to + uses [`tower-batch-control`](https://doc.zebra.zfnd.org/tower_batch_control/index.html) to perform automatic, transparent batch processing of contemporaneous verification requests. diff --git a/book/src/dev/rfcs/0011-async-rust-in-zebra.md b/book/src/dev/rfcs/0011-async-rust-in-zebra.md index b3c1212032c..000bb2cbd5a 100644 --- a/book/src/dev/rfcs/0011-async-rust-in-zebra.md +++ b/book/src/dev/rfcs/0011-async-rust-in-zebra.md @@ -14,7 +14,7 @@ with the [tokio](https://docs.rs/tokio/) executor. At a higher level, Zebra also uses [`tower::Service`s](https://docs.rs/tower/0.4.1/tower/trait.Service.html), [`tower::Buffer`s](https://docs.rs/tower/0.4.1/tower/buffer/struct.Buffer.html), -and our own [`tower-batch`](https://github.com/ZcashFoundation/zebra/tree/main/tower-batch) +and our own [`tower-batch-control`](https://github.com/ZcashFoundation/zebra/tree/main/tower-batch-control) implementation. # Motivation @@ -737,7 +737,7 @@ particularly important for code that modifies Zebra's highly concurrent crates: - `zebra-network` - `zebra-state` - `zebra-consensus` -- `tower-batch` +- `tower-batch-control` - `tower-fallback` ## Monitoring Async Code diff --git a/tower-batch/Cargo.toml b/tower-batch-control/Cargo.toml similarity index 97% rename from tower-batch/Cargo.toml rename to tower-batch-control/Cargo.toml index 611b30e818a..8d1b66c1f4d 100644 --- a/tower-batch/Cargo.toml +++ b/tower-batch-control/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "tower-batch" +name = "tower-batch-control" version = "0.2.40" authors = ["Zcash Foundation "] license = "MIT" diff --git a/tower-batch/src/error.rs b/tower-batch-control/src/error.rs similarity index 100% rename from tower-batch/src/error.rs rename to tower-batch-control/src/error.rs diff --git a/tower-batch/src/future.rs b/tower-batch-control/src/future.rs similarity index 100% rename from tower-batch/src/future.rs rename to tower-batch-control/src/future.rs diff --git a/tower-batch/src/layer.rs b/tower-batch-control/src/layer.rs similarity index 100% rename from tower-batch/src/layer.rs rename to tower-batch-control/src/layer.rs diff --git a/tower-batch/src/lib.rs b/tower-batch-control/src/lib.rs similarity index 100% rename from tower-batch/src/lib.rs rename to tower-batch-control/src/lib.rs diff --git a/tower-batch/src/message.rs b/tower-batch-control/src/message.rs similarity index 100% rename from tower-batch/src/message.rs rename to tower-batch-control/src/message.rs diff --git a/tower-batch/src/service.rs b/tower-batch-control/src/service.rs similarity index 100% rename from tower-batch/src/service.rs rename to tower-batch-control/src/service.rs diff --git a/tower-batch/src/worker.rs b/tower-batch-control/src/worker.rs similarity index 100% rename from tower-batch/src/worker.rs rename to tower-batch-control/src/worker.rs diff --git a/tower-batch/tests/ed25519.rs b/tower-batch-control/tests/ed25519.rs similarity index 99% rename from tower-batch/tests/ed25519.rs rename to tower-batch-control/tests/ed25519.rs index c45e196d2f1..773b1e3e017 100644 --- a/tower-batch/tests/ed25519.rs +++ b/tower-batch-control/tests/ed25519.rs @@ -7,7 +7,7 @@ use ed25519_zebra::*; use futures::stream::{FuturesOrdered, StreamExt}; use rand::thread_rng; use tower::{Service, ServiceExt}; -use tower_batch::Batch; +use tower_batch_control::Batch; use tower_fallback::Fallback; // ============ service impl ============ diff --git a/tower-batch/tests/worker.rs b/tower-batch-control/tests/worker.rs similarity index 98% rename from tower-batch/tests/worker.rs rename to tower-batch-control/tests/worker.rs index 640af1fa2a9..1e7a18b79f9 100644 --- a/tower-batch/tests/worker.rs +++ b/tower-batch-control/tests/worker.rs @@ -4,7 +4,7 @@ use std::time::Duration; use tokio_test::{assert_pending, assert_ready, assert_ready_err, task}; use tower::{Service, ServiceExt}; -use tower_batch::{error, Batch}; +use tower_batch_control::{error, Batch}; use tower_test::mock; #[tokio::test] diff --git a/zebra-consensus/Cargo.toml b/zebra-consensus/Cargo.toml index f80c5602f46..70b279a2d7a 100644 --- a/zebra-consensus/Cargo.toml +++ b/zebra-consensus/Cargo.toml @@ -54,7 +54,7 @@ orchard = "0.4.0" zcash_proofs = { version = "0.11.0", features = ["local-prover", "multicore", "download-params"] } tower-fallback = { path = "../tower-fallback/" } -tower-batch = { path = "../tower-batch/" } +tower-batch-control = { path = "../tower-batch-control/" } zebra-script = { path = "../zebra-script" } zebra-state = { path = "../zebra-state" } diff --git a/zebra-consensus/src/primitives/ed25519.rs b/zebra-consensus/src/primitives/ed25519.rs index 22b7f76613c..49bb6c4ac1d 100644 --- a/zebra-consensus/src/primitives/ed25519.rs +++ b/zebra-consensus/src/primitives/ed25519.rs @@ -14,7 +14,7 @@ use rand::thread_rng; use rayon::prelude::*; use tokio::sync::watch; use tower::{util::ServiceFn, Service}; -use tower_batch::{Batch, BatchControl}; +use tower_batch_control::{Batch, BatchControl}; use tower_fallback::Fallback; use zebra_chain::primitives::ed25519::{batch, *}; diff --git a/zebra-consensus/src/primitives/ed25519/tests.rs b/zebra-consensus/src/primitives/ed25519/tests.rs index 4c13a5d6fe1..0847ed08202 100644 --- a/zebra-consensus/src/primitives/ed25519/tests.rs +++ b/zebra-consensus/src/primitives/ed25519/tests.rs @@ -5,7 +5,7 @@ use std::time::Duration; use color_eyre::eyre::{eyre, Result}; use futures::stream::{FuturesUnordered, StreamExt}; use tower::ServiceExt; -use tower_batch::Batch; +use tower_batch_control::Batch; use crate::primitives::ed25519::*; diff --git a/zebra-consensus/src/primitives/groth16.rs b/zebra-consensus/src/primitives/groth16.rs index 29e325fa0eb..0013c048b15 100644 --- a/zebra-consensus/src/primitives/groth16.rs +++ b/zebra-consensus/src/primitives/groth16.rs @@ -22,7 +22,7 @@ use rayon::prelude::*; use tokio::sync::watch; use tower::{util::ServiceFn, Service}; -use tower_batch::{Batch, BatchControl}; +use tower_batch_control::{Batch, BatchControl}; use tower_fallback::{BoxedError, Fallback}; use zebra_chain::{ diff --git a/zebra-consensus/src/primitives/halo2.rs b/zebra-consensus/src/primitives/halo2.rs index a4c0d47c911..b747b4b0cf0 100644 --- a/zebra-consensus/src/primitives/halo2.rs +++ b/zebra-consensus/src/primitives/halo2.rs @@ -17,7 +17,7 @@ use rayon::prelude::*; use thiserror::Error; use tokio::sync::watch; use tower::{util::ServiceFn, Service}; -use tower_batch::{Batch, BatchControl}; +use tower_batch_control::{Batch, BatchControl}; use tower_fallback::Fallback; #[cfg(test)] diff --git a/zebra-consensus/src/primitives/redjubjub.rs b/zebra-consensus/src/primitives/redjubjub.rs index 1f11e6625d1..b7f65a2c176 100644 --- a/zebra-consensus/src/primitives/redjubjub.rs +++ b/zebra-consensus/src/primitives/redjubjub.rs @@ -14,7 +14,7 @@ use rand::thread_rng; use rayon::prelude::*; use tokio::sync::watch; use tower::{util::ServiceFn, Service}; -use tower_batch::{Batch, BatchControl}; +use tower_batch_control::{Batch, BatchControl}; use tower_fallback::Fallback; use zebra_chain::primitives::redjubjub::{batch, *}; diff --git a/zebra-consensus/src/primitives/redjubjub/tests.rs b/zebra-consensus/src/primitives/redjubjub/tests.rs index 8c29e318d65..eb32a1db898 100644 --- a/zebra-consensus/src/primitives/redjubjub/tests.rs +++ b/zebra-consensus/src/primitives/redjubjub/tests.rs @@ -7,7 +7,7 @@ use std::time::Duration; use color_eyre::eyre::{eyre, Result}; use futures::stream::{FuturesUnordered, StreamExt}; use tower::ServiceExt; -use tower_batch::Batch; +use tower_batch_control::Batch; async fn sign_and_verify(mut verifier: V, n: usize) -> Result<(), V::Error> where diff --git a/zebra-consensus/src/primitives/redpallas.rs b/zebra-consensus/src/primitives/redpallas.rs index 26f40b61ad9..77b6b08bc9d 100644 --- a/zebra-consensus/src/primitives/redpallas.rs +++ b/zebra-consensus/src/primitives/redpallas.rs @@ -14,7 +14,7 @@ use rand::thread_rng; use rayon::prelude::*; use tokio::sync::watch; use tower::{util::ServiceFn, Service}; -use tower_batch::{Batch, BatchControl}; +use tower_batch_control::{Batch, BatchControl}; use tower_fallback::Fallback; use zebra_chain::primitives::reddsa::{batch, orchard, Error}; diff --git a/zebra-consensus/src/primitives/redpallas/tests.rs b/zebra-consensus/src/primitives/redpallas/tests.rs index 2a49b9a1dff..6ae0717d627 100644 --- a/zebra-consensus/src/primitives/redpallas/tests.rs +++ b/zebra-consensus/src/primitives/redpallas/tests.rs @@ -7,7 +7,7 @@ use std::time::Duration; use color_eyre::eyre::{eyre, Result}; use futures::stream::{FuturesUnordered, StreamExt}; use tower::ServiceExt; -use tower_batch::Batch; +use tower_batch_control::Batch; use zebra_chain::primitives::reddsa::{ orchard::{Binding, SpendAuth}, From b10df282093032de209c9ac2e0c5559dd09a5593 Mon Sep 17 00:00:00 2001 From: Deirdre Connolly Date: Mon, 12 Jun 2023 02:31:49 -0400 Subject: [PATCH 078/265] cargo update (#6878) --- Cargo.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7be30ebc81c..27a0c18406e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4551,9 +4551,9 @@ dependencies = [ [[package]] name = "tokio-rustls" -version = "0.24.0" +version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0d409377ff5b1e3ca6437aa86c1eb7d40c134bfec254e44c830defa92669db5" +checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" dependencies = [ "rustls 0.21.1", "tokio", From c058f77a342d5513420f50542593c11e76731a87 Mon Sep 17 00:00:00 2001 From: Marek Date: Tue, 13 Jun 2023 00:10:09 +0200 Subject: [PATCH 079/265] Increase the the number of test cases (#6921) --- zebra-consensus/src/transaction/tests.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/zebra-consensus/src/transaction/tests.rs b/zebra-consensus/src/transaction/tests.rs index ea5bdc2c494..aad207836a6 100644 --- a/zebra-consensus/src/transaction/tests.rs +++ b/zebra-consensus/src/transaction/tests.rs @@ -2153,7 +2153,7 @@ async fn v4_with_joinsplit_is_rejected_for_modification( }) .await; - if result == expected_error || i >= 10 { + if result == expected_error || i >= 100 { break result; } From 16ee87a19f837802254e5f2d10dbb9c9137790f6 Mon Sep 17 00:00:00 2001 From: Gustavo Valverde Date: Mon, 12 Jun 2023 21:27:22 -0400 Subject: [PATCH 080/265] ci(tests): run network syncs on a weekly schedule (#6920) * ci(tests): just run the full sync weekly This change allows to run the Zebra full sync test every Friday at mid-day UTC. * fix: also run Testnet & LWD full sync only on schedule * chore: remove not needed file * chore: fix wording and comments Co-authored-by: teor --------- Co-authored-by: teor --- .../continous-integration-docker.yml | 32 ++++++++++--------- 1 file changed, 17 insertions(+), 15 deletions(-) diff --git a/.github/workflows/continous-integration-docker.yml b/.github/workflows/continous-integration-docker.yml index 2f3063fc66a..7c9da72ddc1 100644 --- a/.github/workflows/continous-integration-docker.yml +++ b/.github/workflows/continous-integration-docker.yml @@ -8,6 +8,12 @@ concurrency: cancel-in-progress: true on: + schedule: + # Run this job every Friday at mid-day UTC + # This is limited to the Zebra and lightwalletd Full Sync jobs + # TODO: we should move this behavior to a separate workflow + - cron: '0 12 * * 5' + workflow_dispatch: inputs: network: @@ -80,6 +86,9 @@ on: - '.github/workflows/find-cached-disks.yml' jobs: + # to also run a job on Mergify head branches, + # add `|| (github.event_name == 'push' && startsWith(github.head_ref, 'mergify/merge-queue/'))`: + # https://docs.github.com/en/actions/using-workflows/events-that-trigger-workflows#running-your-workflow-based-on-the-head-or-base-branch-of-a-pull-request-1 # Check if the cached state disks used by the tests are available for the default network. # @@ -359,7 +368,7 @@ jobs: # This test always runs on mainnet. # # Runs: - # - after every PR is merged to `main` + # - on schedule, as defined at the top of the workflow # - on every PR update, but only if the state version in constants.rs has no cached disk # - in manual workflow runs, when run-full-sync is 'true' and network is 'Mainnet' # @@ -368,10 +377,7 @@ jobs: name: Zebra tip needs: [ build, get-available-disks ] uses: ./.github/workflows/deploy-gcp-tests.yml - # to also run on Mergify head branches, - # add `|| (github.event_name == 'push' && startsWith(github.head_ref, 'mergify/merge-queue/'))`: - # https://docs.github.com/en/actions/using-workflows/events-that-trigger-workflows#running-your-workflow-based-on-the-head-or-base-branch-of-a-pull-request-1 - if: ${{ (github.event_name == 'push' && github.ref_name == 'main') || !fromJSON(needs.get-available-disks.outputs.zebra_tip_disk) || (github.event.inputs.run-full-sync == 'true' && github.event.inputs.network == 'Mainnet') }} + if: ${{ github.event_name == 'schedule' || !fromJSON(needs.get-available-disks.outputs.zebra_tip_disk) || (github.event.inputs.run-full-sync == 'true' && github.event.inputs.network == 'Mainnet') }} with: app_name: zebrad test_id: full-sync-to-tip @@ -473,7 +479,7 @@ jobs: # This job always runs on testnet, regardless of any inputs or variable settings. # # Runs: - # - after every PR is merged to `main` + # - on schedule, as defined at the top of the workflow # - on every PR update, but only if the state version in constants.rs has no cached disk # - in manual workflow runs, when run-full-sync is 'true' and network is 'Testnet' # @@ -482,10 +488,7 @@ jobs: name: Zebra tip on testnet needs: [ build, get-available-disks-testnet ] uses: ./.github/workflows/deploy-gcp-tests.yml - # to also run on Mergify head branches, - # add `|| (github.event_name == 'push' && startsWith(github.head_ref, 'mergify/merge-queue/'))`: - # https://docs.github.com/en/actions/using-workflows/events-that-trigger-workflows#running-your-workflow-based-on-the-head-or-base-branch-of-a-pull-request-1 - if: ${{ (github.event_name == 'push' && github.ref_name == 'main') || !fromJSON(needs.get-available-disks-testnet.outputs.zebra_tip_disk) || (github.event.inputs.run-full-sync == 'true' && github.event.inputs.network == 'Testnet') }} + if: ${{ github.event_name == 'schedule' || !fromJSON(needs.get-available-disks-testnet.outputs.zebra_tip_disk) || (github.event.inputs.run-full-sync == 'true' && github.event.inputs.network == 'Testnet') }} with: app_name: zebrad test_id: full-sync-to-tip-testnet @@ -552,7 +555,9 @@ jobs: # Test full sync of lightwalletd with a Zebra tip state # # Runs: - # - after every PR is merged to `main` + # - on schedule, as defined at the top of the workflow + # - on every PR update, but only if the state version in constants.rs has no cached disk + # - in manual workflow runs, when run-lwd-sync is 'true' and network is 'Mainnet' (the network is required by the test-full-sync job) # # If the state version has changed, waits for the new cached state to be created. # Otherwise, if the state rebuild was skipped, runs immediately after the build job. @@ -560,10 +565,7 @@ jobs: name: lightwalletd tip needs: [ test-full-sync, get-available-disks ] uses: ./.github/workflows/deploy-gcp-tests.yml - # to also run on Mergify head branches, - # add `|| (github.event_name == 'push' && startsWith(github.head_ref, 'mergify/merge-queue/'))`: - # https://docs.github.com/en/actions/using-workflows/events-that-trigger-workflows#running-your-workflow-based-on-the-head-or-base-branch-of-a-pull-request-1 - if: ${{ !cancelled() && !failure() && (fromJSON(needs.get-available-disks.outputs.zebra_tip_disk) || needs.test-full-sync.result == 'success') && ((github.event_name == 'push' && github.ref_name == 'main') || !fromJSON(needs.get-available-disks.outputs.lwd_tip_disk) || github.event.inputs.run-lwd-sync == 'true' ) }} + if: ${{ !cancelled() && !failure() && (fromJSON(needs.get-available-disks.outputs.zebra_tip_disk) || needs.test-full-sync.result == 'success') && (github.event_name == 'schedule' || !fromJSON(needs.get-available-disks.outputs.lwd_tip_disk) || github.event.inputs.run-lwd-sync == 'true' ) }} with: app_name: lightwalletd test_id: lwd-full-sync From 329dd71dfeafbd5734624ea89da81445454956be Mon Sep 17 00:00:00 2001 From: Deirdre Connolly Date: Tue, 13 Jun 2023 01:39:42 -0400 Subject: [PATCH 081/265] change(checkpoints): Update checkpoints for 1.0.0 (#6884) * Update mainnet checkpoints * Update testnet checkpoints * Update mainnet checkpoints on commit 2e37981 * Update testnet checkpoints as of commit 2e37981 --- .../src/checkpoint/main-checkpoints.txt | 16 ++++++++++++++++ .../src/checkpoint/test-checkpoints.txt | 18 ++++++++++++++++++ 2 files changed, 34 insertions(+) diff --git a/zebra-consensus/src/checkpoint/main-checkpoints.txt b/zebra-consensus/src/checkpoint/main-checkpoints.txt index 84e2b99561d..79b60a72f81 100644 --- a/zebra-consensus/src/checkpoint/main-checkpoints.txt +++ b/zebra-consensus/src/checkpoint/main-checkpoints.txt @@ -10873,3 +10873,19 @@ 2111216 000000000046f9457ce05ad5c9d87a7d6de5f0d526290f2f71dc4feb723d579c 2111616 000000000122596e9a9897ed9130aeff0ec850e078ef65f623e5626414b7f0c9 2112016 00000000011c14231f9405b2c4dddb7049677f85197af9059f0fb09ed8868d3f +2112416 0000000001b569f523b60e13d853da0a0db6e49859ba8abdca4cabdf03f01a5c +2112816 0000000000ae445d0053cf26a0cfd85202f606b7ef032c6faacf017f0e4f965d +2113216 0000000000dbd1b82b8c5156be270db65bf3ae45130b5a6f1874914f15041d20 +2113616 0000000000b45acbdc3ed6703ce8e31479907e43f1cccaebe9e651b0a3d0058e +2114016 00000000010e8dab09722bb9bc75abe949b72492158b528a2db60bc09c247c3a +2114416 000000000075e1dca1b3775083de165418f3aae99405e3df3e2826ab1e4e609c +2114816 0000000000f700ef27222c61e255cbc44b867b59a157b930b6c6d502c87a872d +2115216 000000000038d59f2b7571905bca2bf9ca67564f5dc90ae9b53859431108e5a5 +2115616 0000000000a2ac4c4c3270c9c57236407fe4d74053a940e701fcd9c4e3b8d1a3 +2116016 00000000019282be257a52518559a9c66cc3963b1c45c2dcfc5d3a84e8d3b9cc +2116416 0000000001826392c47f07bf16cece3ddef30bbe434e5514f7baa05615ae5a82 +2116816 000000000039630d200d3ff912e5e745eb5e994d14dbd69c405c7e2eeba7e9cb +2117216 0000000000b722d83cc8568b94bb9765509c45302a48f7f9251262854e816137 +2117616 0000000000b91c891557df28d4173766562cc455b3b5ab27e83c9a03958bbc14 +2118016 00000000013eb4b6e1cd5b9f19ad032670cad97fd4837b1dd7e876358ff8752a +2118416 0000000000a5e8d0c81b4fb1036d94ac7d16d192bd068258d3aa07fe903b8736 diff --git a/zebra-consensus/src/checkpoint/test-checkpoints.txt b/zebra-consensus/src/checkpoint/test-checkpoints.txt index a95bdcc74c5..e26e49c6347 100644 --- a/zebra-consensus/src/checkpoint/test-checkpoints.txt +++ b/zebra-consensus/src/checkpoint/test-checkpoints.txt @@ -5930,3 +5930,21 @@ 2371600 00101a398fe99963ce0ae45f67091b8ee71673ef7ce6a16d4dd010712aca7f16 2372000 002a414b6b69758f8408326c7b79f0607d27a95ffe2c46177c9dfc6179ee67df 2372400 00135546d02b716693f9b1c7764d30c7db7b876a4095cfd7b391f4a34f5bcaba +2372800 0007010345aa4f5cb2d4ac761d1c5b9a82ab7749aff5cbb8e881a2701fc88b11 +2373200 00180bcbc032ea60438ed1e1251ff5cbd8f728347758ed177ab56d4a8ccc7cf5 +2373600 004f6e9f158296590d25f38c46ab5fb7af52c681c2cf122c9caa1cdb651b5187 +2374000 001ad81a27ce25859c4cbe28da98b6b1f298aa460e842ebb868b6d5721cbde06 +2374400 00e5c9d7cd641388f23714dd3fc1eceb929968b908c4411fb78c3bd9ee036d61 +2374800 000bc5e9a8ee1b0ff85efcb1118386c88184c83001ac99bd0b30841bb3b0187e +2375200 002961f9c0ac851246c2a796b7569410c0b0ac0eac8c0873b7c65c511f0523ea +2375600 00a0b4d01e3bfaad57be6469ee23cd58f6fb19ed34d87358a1c8479db5ea59c4 +2376000 00071275ff1a42fac46138b1a1b5997cec976bde326d3dbe016d457e5294906b +2376400 002e111e59b3bebaf40f9038ba97327b92ace389eea95f8a5d439f97d8e43a2d +2376800 001cd77884be5224bb6fb047be86fedc29b771e31e51801b30a10a10716e10ab +2377200 00129deaddbe60261a544be644022841468741e94a0a2ae298ef1b8dde19af8f +2377600 001fe8a1f4cb96cfee6a7485c7eee913170b82b5cc6a8b44864c6fed186e9339 +2378000 001a299d0587852d9718ee4c696827d96b4168be9865805222cb515f14fbdbae +2378400 000f60d57bc9c19d171f04662ee5e06a1dca91f091c8c1f10e4acf827cb9548c +2378800 00192766442c4ecade78c79653645c5fb06cc99e26a900a5b1139dfbf470d6d0 +2379200 004391bd427bde706e2754f5a5f84024536b8d27d763c37b5ecb9261bef359b9 +2379600 0000a22e46cc27732a84126a2de66dfbe0bd31a8fcfbd314b773cdbb0624ab99 From 62d946d139ceca993ffcf1e5f449e1c118977a06 Mon Sep 17 00:00:00 2001 From: Deirdre Connolly Date: Tue, 13 Jun 2023 04:46:01 -0400 Subject: [PATCH 082/265] change(crates): Prepare for publishing crates to crates.io (#6908) * change(crates): Publish tower-batch-control * Cargo fields for zebra-test * Cargo fields for tower-fallback * Cargo fields for zebra-chain * Cargo fields for zebra-consensus * Cargo fields for zebra-network * Cargo fields for zebra-node-services * Cargo fields for zebra-rpc * Cargo fields for zebra-script * Cargo fields for zebra-state * Cargo fields for zebrad * Cargo fields for zebra-test * Include zebra-test version and path for zebra-chain * Include zebra-chain version and path for zebra-node-services * Include zebra-chain version and path for zebra-script * Include zebra-chain and zebra-test versions and paths for zebra-state * Include zebra-* versions and paths for zebra-consensus * Include zebra-* and tower-fallback versions and paths for tower-batch-control * Include tower-* versions and paths for zebra-consensus * Fixed tower-fallback license string * Remove versions for dev-dependencise * Update zebra-rpc/Cargo.toml Co-authored-by: teor * tower-batch-control license MUST remain MIT only * Align zebra-network with upstream tower's MIT license * Update zebra-test/Cargo.toml Co-authored-by: teor * Update zebra-utils/Cargo.toml Co-authored-by: teor * fix(legal): Use correct licenses for tower-batch-control and zebra-network (#6927) * Don't modify whitespace * Resolve rustdoc and cargo-release warnings * Add missing zebra internal dependency versions * Workaround missing zebra-utils test dependency in zebrad test-only feature * Fix missing zebra internal dependency versions * Add README, home page, and search terms to every crate * Fix README publish warnings * Change tower crates to beta versions * Add zebra-utils to the list of allowed unused dependencies * Update tower-batch-control/Cargo.toml * Update tower-fallback/Cargo.toml --------- Co-authored-by: teor --- .../workflows/continous-integration-os.yml | 2 +- Cargo.lock | 4 +-- tower-batch-control/Cargo.toml | 20 ++++++++++++-- tower-batch-control/LICENSE | 26 +++++++++++++++++++ tower-batch-control/src/lib.rs | 8 ++++++ tower-fallback/Cargo.toml | 14 ++++++++-- zebra-chain/Cargo.toml | 11 ++++++-- zebra-consensus/Cargo.toml | 21 ++++++++++----- zebra-network/Cargo.toml | 21 ++++++++++++--- zebra-network/LICENSE | 26 +++++++++++++++++++ zebra-network/src/peer_set/set.rs | 6 ++--- zebra-node-services/Cargo.toml | 14 +++++++--- zebra-rpc/Cargo.toml | 21 ++++++++++----- zebra-script/Cargo.toml | 11 ++++++-- zebra-state/Cargo.toml | 13 ++++++++-- zebra-test/Cargo.toml | 9 ++++++- zebra-utils/Cargo.toml | 22 +++++++++++----- zebrad/Cargo.toml | 24 ++++++++++++----- zebrad/README.md | 14 ---------- 19 files changed, 223 insertions(+), 64 deletions(-) create mode 100644 tower-batch-control/LICENSE create mode 100644 zebra-network/LICENSE delete mode 100644 zebrad/README.md diff --git a/.github/workflows/continous-integration-os.yml b/.github/workflows/continous-integration-os.yml index dcd6fabeb42..c7adf92b7ab 100644 --- a/.github/workflows/continous-integration-os.yml +++ b/.github/workflows/continous-integration-os.yml @@ -311,7 +311,7 @@ jobs: cargo machete --skip-target-dir || true echo "-- unused dependencies are below this line, full output is above --" if (cargo machete --skip-target-dir 2>/dev/null || true) | \ - grep -v -e gumdrop -e humantime-serde -e tinyvec -e "found the following" -e Cargo.toml -e Done; then + grep -v -e gumdrop -e humantime-serde -e tinyvec -e zebra-utils -e "found the following" -e Cargo.toml -e Done; then echo "New unused dependencies were found, please remove them!" exit 1 else diff --git a/Cargo.lock b/Cargo.lock index 27a0c18406e..84323d0187a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4719,7 +4719,7 @@ dependencies = [ [[package]] name = "tower-batch-control" -version = "0.2.40" +version = "0.2.41-beta.1" dependencies = [ "color-eyre", "ed25519-zebra", @@ -4743,7 +4743,7 @@ dependencies = [ [[package]] name = "tower-fallback" -version = "0.2.40" +version = "0.2.41-beta.1" dependencies = [ "futures-core", "pin-project", diff --git a/tower-batch-control/Cargo.toml b/tower-batch-control/Cargo.toml index 8d1b66c1f4d..d8d202acdf4 100644 --- a/tower-batch-control/Cargo.toml +++ b/tower-batch-control/Cargo.toml @@ -1,10 +1,26 @@ [package] name = "tower-batch-control" -version = "0.2.40" -authors = ["Zcash Foundation "] +version = "0.2.41-beta.1" +authors = ["Zcash Foundation ", "Tower Maintainers "] +description = "Tower middleware for batch request processing" +# # Legal +# +# This licence is deliberately different to the rest of Zebra. +# +# This code was modified from a 2019 version of: +# https://github.com/tower-rs/tower/tree/master/tower/src/buffer license = "MIT" +repository = "https://github.com/ZcashFoundation/zebra" edition = "2021" +# TODO: decide if we want to use the Zebra readme and home page +#readme = "../README.md" +#homepage = "https://zfnd.org/zebra/" +# crates.io is limited to 5 keywords and categories +keywords = ["tower", "batch"] +# Must be one of +categories = ["algorithms", "asynchronous"] + [dependencies] futures = "0.3.28" futures-core = "0.3.28" diff --git a/tower-batch-control/LICENSE b/tower-batch-control/LICENSE new file mode 100644 index 00000000000..9862976a6ce --- /dev/null +++ b/tower-batch-control/LICENSE @@ -0,0 +1,26 @@ +Copyright (c) 2019-2023 Zcash Foundation +Copyright (c) 2019 Tower Contributors + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/tower-batch-control/src/lib.rs b/tower-batch-control/src/lib.rs index 855b1a962b0..2628a09562e 100644 --- a/tower-batch-control/src/lib.rs +++ b/tower-batch-control/src/lib.rs @@ -84,6 +84,14 @@ //! a `Service`. The wrapped service does not need to implement any batch //! control logic, as it will receive explicit [`Flush`](BatchControl::Flush) //! requests from the wrapper. +//! +//! ## Implementation History +//! +//! The `tower-batch-control` code was modified from a 2019 version of: +//! +//! +//! A modified fork of this crate is available on crates.io as `tower-batch`. +//! It is focused on batching disk writes. pub mod error; pub mod future; diff --git a/tower-fallback/Cargo.toml b/tower-fallback/Cargo.toml index de3c4d7ae6c..af4aa8d1079 100644 --- a/tower-fallback/Cargo.toml +++ b/tower-fallback/Cargo.toml @@ -1,10 +1,20 @@ [package] name = "tower-fallback" -version = "0.2.40" +version = "0.2.41-beta.1" authors = ["Zcash Foundation "] -license = "MIT" +description = "A Tower service combinator that sends requests to a first service, then retries processing on a second fallback service if the first service errors." +license = "MIT OR Apache-2.0" +repository = "https://github.com/ZcashFoundation/zebra" edition = "2021" +# TODO: decide if we want to use the Zebra readme and home page +#readme = "../README.md" +#homepage = "https://zfnd.org/zebra/" +# crates.io is limited to 5 keywords and categories +keywords = ["tower", "batch"] +# Must be one of +categories = ["algorithms", "asynchronous"] + [dependencies] pin-project = "1.1.0" tower = "0.4.13" diff --git a/zebra-chain/Cargo.toml b/zebra-chain/Cargo.toml index f1e24ad2b45..5495b1a356b 100644 --- a/zebra-chain/Cargo.toml +++ b/zebra-chain/Cargo.toml @@ -2,10 +2,17 @@ name = "zebra-chain" version = "1.0.0-beta.25" authors = ["Zcash Foundation "] +description = "Core Zcash data structures" license = "MIT OR Apache-2.0" +repository = "https://github.com/ZcashFoundation/zebra" edition = "2021" -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html +readme = "../README.md" +homepage = "https://zfnd.org/zebra/" +# crates.io is limited to 5 keywords and categories +keywords = ["zebra", "zcash"] +# Must be one of +categories = ["asynchronous", "cryptography::cryptocurrencies", "encoding"] [features] default = [] @@ -109,7 +116,7 @@ rand_chacha = { version = "0.3.1", optional = true } tokio = { version = "1.28.2", features = ["tracing"], optional = true } -zebra-test = { path = "../zebra-test/", optional = true } +zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.25", optional = true } [dev-dependencies] # Benchmarks diff --git a/zebra-consensus/Cargo.toml b/zebra-consensus/Cargo.toml index 70b279a2d7a..3c0e108fa77 100644 --- a/zebra-consensus/Cargo.toml +++ b/zebra-consensus/Cargo.toml @@ -2,9 +2,18 @@ name = "zebra-consensus" version = "1.0.0-beta.25" authors = ["Zcash Foundation "] +description = "Implementation of Zcash consensus checks" license = "MIT OR Apache-2.0" +repository = "https://github.com/ZcashFoundation/zebra" edition = "2021" +readme = "../README.md" +homepage = "https://zfnd.org/zebra/" +# crates.io is limited to 5 keywords and categories +keywords = ["zebra", "zcash"] +# Must be one of +categories = ["asynchronous", "cryptography::cryptocurrencies"] + [features] default = [] @@ -53,13 +62,13 @@ orchard = "0.4.0" zcash_proofs = { version = "0.11.0", features = ["local-prover", "multicore", "download-params"] } -tower-fallback = { path = "../tower-fallback/" } -tower-batch-control = { path = "../tower-batch-control/" } +tower-fallback = { path = "../tower-fallback/", version = "0.2.41-beta.1" } +tower-batch-control = { path = "../tower-batch-control/", version = "0.2.41-beta.1" } -zebra-script = { path = "../zebra-script" } -zebra-state = { path = "../zebra-state" } -zebra-node-services = { path = "../zebra-node-services" } -zebra-chain = { path = "../zebra-chain" } +zebra-script = { path = "../zebra-script", version = "1.0.0-beta.25" } +zebra-state = { path = "../zebra-state", version = "1.0.0-beta.25" } +zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.25" } +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.25" } # prod feature progress-bar howudoin = { version = "0.1.2", optional = true } diff --git a/zebra-network/Cargo.toml b/zebra-network/Cargo.toml index 9c66368e365..f1b5429c235 100644 --- a/zebra-network/Cargo.toml +++ b/zebra-network/Cargo.toml @@ -1,11 +1,24 @@ [package] name = "zebra-network" version = "1.0.0-beta.25" -authors = ["Zcash Foundation "] -license = "MIT OR Apache-2.0" +authors = ["Zcash Foundation ", "Tower Maintainers "] +description = "Networking code for Zebra" +# # Legal +# +# This licence is deliberately different to the rest of Zebra. +# +# zebra-network/src/peer_set/set.rs was modified from a 2019 version of: +# https://github.com/tower-rs/tower/tree/master/tower/src/balance/p2c/service.rs +license = "MIT" +repository = "https://github.com/ZcashFoundation/zebra" edition = "2021" -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html +readme = "../README.md" +homepage = "https://zfnd.org/zebra/" +# crates.io is limited to 5 keywords and categories +keywords = ["zebra", "zcash"] +# Must be one of +categories = ["asynchronous", "cryptography::cryptocurrencies", "encoding", "network-programming"] [features] default = [] @@ -65,7 +78,7 @@ howudoin = { version = "0.1.2", optional = true } proptest = { version = "1.2.0", optional = true } proptest-derive = { version = "0.3.0", optional = true } -zebra-chain = { path = "../zebra-chain" } +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.25" } [dev-dependencies] proptest = "1.2.0" diff --git a/zebra-network/LICENSE b/zebra-network/LICENSE new file mode 100644 index 00000000000..9862976a6ce --- /dev/null +++ b/zebra-network/LICENSE @@ -0,0 +1,26 @@ +Copyright (c) 2019-2023 Zcash Foundation +Copyright (c) 2019 Tower Contributors + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/zebra-network/src/peer_set/set.rs b/zebra-network/src/peer_set/set.rs index 8611ef7c633..b3ff2a92df4 100644 --- a/zebra-network/src/peer_set/set.rs +++ b/zebra-network/src/peer_set/set.rs @@ -2,8 +2,8 @@ //! //! # Implementation //! -//! The [`PeerSet`] implementation is adapted from the one in the [Tower Balance][tower-balance] crate. -//! As described in that crate's documentation, it: +//! The [`PeerSet`] implementation is adapted from the one in [tower::Balance][tower-balance]. +//! As described in Tower's documentation, it: //! //! > Distributes requests across inner services using the [Power of Two Choices][p2c]. //! > @@ -40,7 +40,7 @@ //! //! [finagle]: https://twitter.github.io/finagle/guide/Clients.html#power-of-two-choices-p2c-least-loaded //! [p2c]: http://www.eecs.harvard.edu/~michaelm/postscripts/handbook2001.pdf -//! [tower-balance]: https://crates.io/crates/tower-balance +//! [tower-balance]: https://github.com/tower-rs/tower/tree/master/tower/src/balance //! //! # Behavior During Network Upgrades //! diff --git a/zebra-node-services/Cargo.toml b/zebra-node-services/Cargo.toml index 2f888bbdaf3..04639549ad2 100644 --- a/zebra-node-services/Cargo.toml +++ b/zebra-node-services/Cargo.toml @@ -1,10 +1,18 @@ [package] name = "zebra-node-services" +version = "1.0.0-beta.25" authors = ["Zcash Foundation "] +description = "The interfaces of some Zebra node services" license = "MIT OR Apache-2.0" -version = "1.0.0-beta.25" -edition = "2021" repository = "https://github.com/ZcashFoundation/zebra" +edition = "2021" + +readme = "../README.md" +homepage = "https://zfnd.org/zebra/" +# crates.io is limited to 5 keywords and categories +keywords = ["zebra", "zcash"] +# Must be one of +categories = ["asynchronous", "cryptography::cryptocurrencies", "network-programming"] [features] default = [] @@ -27,7 +35,7 @@ rpc-client = [ ] [dependencies] -zebra-chain = { path = "../zebra-chain" } +zebra-chain = { path = "../zebra-chain" , version = "1.0.0-beta.25"} # Optional dependencies diff --git a/zebra-rpc/Cargo.toml b/zebra-rpc/Cargo.toml index 1cca24fcf28..db597d6b29d 100644 --- a/zebra-rpc/Cargo.toml +++ b/zebra-rpc/Cargo.toml @@ -2,10 +2,17 @@ name = "zebra-rpc" version = "1.0.0-beta.25" authors = ["Zcash Foundation "] +description = "A Zebra JSON Remote Procedure Call (JSON-RPC) interface" license = "MIT OR Apache-2.0" +repository = "https://github.com/ZcashFoundation/zebra" edition = "2021" -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html +readme = "../README.md" +homepage = "https://zfnd.org/zebra/" +# crates.io is limited to 5 keywords and categories +keywords = ["zebra", "zcash"] +# Must be one of +categories = ["asynchronous", "cryptography::cryptocurrencies", "encoding", "network-programming"] [features] default = [] @@ -63,12 +70,12 @@ zcash_address = { version = "0.2.1", optional = true } # Test-only feature proptest-impl proptest = { version = "1.2.0", optional = true } -zebra-chain = { path = "../zebra-chain", features = ["json-conversion"] } -zebra-consensus = { path = "../zebra-consensus" } -zebra-network = { path = "../zebra-network" } -zebra-node-services = { path = "../zebra-node-services" } -zebra-script = { path = "../zebra-script" } -zebra-state = { path = "../zebra-state" } +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.25", features = ["json-conversion"] } +zebra-consensus = { path = "../zebra-consensus", version = "1.0.0-beta.25" } +zebra-network = { path = "../zebra-network", version = "1.0.0-beta.25" } +zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.25" } +zebra-script = { path = "../zebra-script", version = "1.0.0-beta.25" } +zebra-state = { path = "../zebra-state", version = "1.0.0-beta.25" } [dev-dependencies] insta = { version = "1.29.0", features = ["redactions", "json", "ron"] } diff --git a/zebra-script/Cargo.toml b/zebra-script/Cargo.toml index 4d9d41efaf9..618f51b934e 100644 --- a/zebra-script/Cargo.toml +++ b/zebra-script/Cargo.toml @@ -2,15 +2,22 @@ name = "zebra-script" version = "1.0.0-beta.25" authors = ["Zcash Foundation "] +description = "Zebra script verification wrapping zcashd's zcash_script library" license = "MIT OR Apache-2.0" +repository = "https://github.com/ZcashFoundation/zebra" edition = "2021" -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html +readme = "../README.md" +homepage = "https://zfnd.org/zebra/" +# crates.io is limited to 5 keywords and categories +keywords = ["zebra", "zcash"] +# Must be one of +categories = ["api-bindings", "cryptography::cryptocurrencies"] [dependencies] zcash_script = "0.1.12" -zebra-chain = { path = "../zebra-chain" } +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.25" } thiserror = "1.0.40" displaydoc = "0.2.4" diff --git a/zebra-state/Cargo.toml b/zebra-state/Cargo.toml index e0e268f5a52..20df4da55ae 100644 --- a/zebra-state/Cargo.toml +++ b/zebra-state/Cargo.toml @@ -2,9 +2,18 @@ name = "zebra-state" version = "1.0.0-beta.25" authors = ["Zcash Foundation "] +description = "State contextual verification and storage code for Zebra" license = "MIT OR Apache-2.0" +repository = "https://github.com/ZcashFoundation/zebra" edition = "2021" +readme = "../README.md" +homepage = "https://zfnd.org/zebra/" +# crates.io is limited to 5 keywords and categories +keywords = ["zebra", "zcash"] +# Must be one of +categories = ["asynchronous", "caching", "cryptography::cryptocurrencies"] + [features] # Production features that activate extra dependencies, or extra features in dependencies @@ -60,13 +69,13 @@ tracing = "0.1.37" elasticsearch = { version = "8.5.0-alpha.1", package = "elasticsearch", optional = true } serde_json = { version = "1.0.96", package = "serde_json", optional = true } -zebra-chain = { path = "../zebra-chain" } +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.25" } # prod feature progress-bar howudoin = { version = "0.1.2", optional = true } # test feature proptest-impl -zebra-test = { path = "../zebra-test/", optional = true } +zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.25", optional = true } proptest = { version = "1.2.0", optional = true } proptest-derive = { version = "0.3.0", optional = true } diff --git a/zebra-test/Cargo.toml b/zebra-test/Cargo.toml index db74ee3b325..649a3835bbc 100644 --- a/zebra-test/Cargo.toml +++ b/zebra-test/Cargo.toml @@ -2,10 +2,17 @@ name = "zebra-test" version = "1.0.0-beta.25" authors = ["Zcash Foundation "] +description = "Test harnesses and test vectors for Zebra" license = "MIT OR Apache-2.0" +repository = "https://github.com/ZcashFoundation/zebra" edition = "2021" -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html +readme = "../README.md" +homepage = "https://zfnd.org/zebra/" +# crates.io is limited to 5 keywords and categories +keywords = ["zebra", "zcash"] +# Must be one of +categories = ["command-line-utilities", "cryptography::cryptocurrencies"] [dependencies] hex = "0.4.3" diff --git a/zebra-utils/Cargo.toml b/zebra-utils/Cargo.toml index 9d78f43aaf9..99c80d62dec 100644 --- a/zebra-utils/Cargo.toml +++ b/zebra-utils/Cargo.toml @@ -1,10 +1,20 @@ [package] name = "zebra-utils" +version = "1.0.0-beta.25" authors = ["Zcash Foundation "] +description = "Developer tools for Zebra maintenance and testing" license = "MIT OR Apache-2.0" -version = "1.0.0-beta.25" +repository = "https://github.com/ZcashFoundation/zebra" edition = "2021" +# zebra-utils has a separate README file +readme = "README.md" +homepage = "https://zfnd.org/zebra/" +# crates.io is limited to 5 keywords and categories +keywords = ["zebra", "zcash"] +# Must be one of +categories = ["command-line-utilities", "cryptography::cryptocurrencies"] + [[bin]] name = "zebra-checkpoints" # this setting is required for Zebra's Docker build caches @@ -60,8 +70,11 @@ tracing-error = "0.2.0" tracing-subscriber = "0.3.17" thiserror = "1.0.40" -zebra-node-services = { path = "../zebra-node-services" } -zebra-chain = { path = "../zebra-chain" } +zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.25" } +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.25" } + +# These crates are needed for the block-template-to-proposal binary +zebra-rpc = { path = "../zebra-rpc", version = "1.0.0-beta.25", optional = true } # These crates are needed for the zebra-checkpoints binary itertools = { version = "0.10.5", optional = true } @@ -72,6 +85,3 @@ reqwest = { version = "0.11.18", optional = true } # These crates are needed for the zebra-checkpoints and search-issue-refs binaries tokio = { version = "1.28.2", features = ["full"], optional = true } - -# These crates are needed for the block-template-to-proposal binary -zebra-rpc = { path = "../zebra-rpc", optional = true } diff --git a/zebrad/Cargo.toml b/zebrad/Cargo.toml index 3bd556669a3..6316d5996ec 100644 --- a/zebrad/Cargo.toml +++ b/zebrad/Cargo.toml @@ -1,9 +1,10 @@ [package] # Crate metadata name = "zebrad" +version = "1.0.0-rc.9" authors = ["Zcash Foundation "] +description = "The Zcash Foundation's independent, consensus-compatible implementation of a Zcash node" license = "MIT OR Apache-2.0" -version = "1.0.0-rc.9" repository = "https://github.com/ZcashFoundation/zebra" # Settings that impact compilation @@ -19,6 +20,13 @@ rust-version = "1.66" # when run in the workspace directory default-run = "zebrad" +readme = "../README.md" +homepage = "https://zfnd.org/zebra/" +# crates.io is limited to 5 keywords and categories +keywords = ["zebra", "zcash"] +# Must be one of +categories = ["command-line-utilities", "cryptography::cryptocurrencies"] + [features] # In release builds, don't compile debug logging code, to improve performance. default = ["release_max_level_info"] @@ -108,12 +116,14 @@ test_sync_past_mandatory_checkpoint_mainnet = [] test_sync_past_mandatory_checkpoint_testnet = [] [dependencies] -zebra-chain = { path = "../zebra-chain" } -zebra-consensus = { path = "../zebra-consensus" } -zebra-network = { path = "../zebra-network" } -zebra-node-services = { path = "../zebra-node-services" } -zebra-rpc = { path = "../zebra-rpc" } -zebra-state = { path = "../zebra-state" } +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.25" } +zebra-consensus = { path = "../zebra-consensus", version = "1.0.0-beta.25" } +zebra-network = { path = "../zebra-network", version = "1.0.0-beta.25" } +zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.25" } +zebra-rpc = { path = "../zebra-rpc", version = "1.0.0-beta.25" } +zebra-state = { path = "../zebra-state", version = "1.0.0-beta.25" } +# Required for crates.io publishing, but it's only used in tests +zebra-utils = { path = "../zebra-utils", version = "1.0.0-beta.25", optional = true } abscissa_core = "0.7.0" clap = { version = "4.3.3", features = ["cargo"] } diff --git a/zebrad/README.md b/zebrad/README.md deleted file mode 100644 index 4dee5cbb2d0..00000000000 --- a/zebrad/README.md +++ /dev/null @@ -1,14 +0,0 @@ -# Zebrad - -Zebrad is an application. - -## Getting Started - -This application is authored using [Abscissa], a Rust application framework. - -For more information, see: - -[Documentation] - -[Abscissa]: https://github.com/iqlusioninc/abscissa -[Documentation]: https://docs.rs/abscissa_core/ From 7acc04c9f4afab0f0017f161b3d15ca9b93632da Mon Sep 17 00:00:00 2001 From: teor Date: Tue, 13 Jun 2023 22:31:34 +1000 Subject: [PATCH 083/265] Disable scheduled testnet full sync by default (#6930) --- .github/workflows/continous-integration-docker.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/continous-integration-docker.yml b/.github/workflows/continous-integration-docker.yml index 7c9da72ddc1..40d2313a894 100644 --- a/.github/workflows/continous-integration-docker.yml +++ b/.github/workflows/continous-integration-docker.yml @@ -488,7 +488,7 @@ jobs: name: Zebra tip on testnet needs: [ build, get-available-disks-testnet ] uses: ./.github/workflows/deploy-gcp-tests.yml - if: ${{ github.event_name == 'schedule' || !fromJSON(needs.get-available-disks-testnet.outputs.zebra_tip_disk) || (github.event.inputs.run-full-sync == 'true' && github.event.inputs.network == 'Testnet') }} + if: ${{ (github.event_name == 'schedule' && vars.SCHEDULE_TESTNET_FULL_SYNC == 'true') || !fromJSON(needs.get-available-disks-testnet.outputs.zebra_tip_disk) || (github.event.inputs.run-full-sync == 'true' && github.event.inputs.network == 'Testnet') }} with: app_name: zebrad test_id: full-sync-to-tip-testnet From d2a29b655257e3ac947199098ccc85af50cca92f Mon Sep 17 00:00:00 2001 From: Deirdre Connolly Date: Tue, 13 Jun 2023 16:53:15 -0400 Subject: [PATCH 084/265] change(docs): Add publishing crates as part of release checklist (#6909) * change(docs): Add instructions for publishing crates as part of release checklist * Update release-checklist.md * Update .github/PULL_REQUEST_TEMPLATE/release-checklist.md Co-authored-by: teor * Update .github/PULL_REQUEST_TEMPLATE/release-checklist.md Co-authored-by: teor * Rewrite the release checklist using `cargo release` * Minimise diff * Re-order testing, minimise diff * Add checkbox to checkpoints instruction * Tag/promote release, then publish crates, then publish docker images --------- Co-authored-by: teor --- .../release-checklist.md | 174 ++++++++---------- 1 file changed, 75 insertions(+), 99 deletions(-) diff --git a/.github/PULL_REQUEST_TEMPLATE/release-checklist.md b/.github/PULL_REQUEST_TEMPLATE/release-checklist.md index 1d7a1e4f9e5..2017e9d1113 100644 --- a/.github/PULL_REQUEST_TEMPLATE/release-checklist.md +++ b/.github/PULL_REQUEST_TEMPLATE/release-checklist.md @@ -7,96 +7,77 @@ assignees: '' --- -## Versioning +# Prepare for the Release -### How to Increment Versions +These release steps can be done a week before the release, in separate PRs. +They can be skipped for urgent releases. -Zebra follows [semantic versioning](https://semver.org). Semantic versions look like: MAJOR`.`MINOR`.`PATCH[`-`TAG`.`PRE-RELEASE] +## Checkpoints -The [draft `zebrad` changelog](https://github.com/ZcashFoundation/zebra/releases) will have an automatic version bump. This version is based on [the labels on the PRs in the release](https://github.com/ZcashFoundation/zebra/blob/main/.github/release-drafter.yml). +For performance and security, we want to update the Zebra checkpoints in every release. +- [ ] You can copy the latest checkpoints from CI by following [the zebra-checkpoints README](https://github.com/ZcashFoundation/zebra/blob/main/zebra-utils/README.md#zebra-checkpoints). -Check that the automatic `zebrad` version increment matches the changes in the release: +## Missed Dependency Updates -
+Sometimes `dependabot` misses some dependency updates, or we accidentally turned them off. -If we're releasing a mainnet network upgrade, it is a `major` release: -1. Increment the `major` version of _*all*_ the Zebra crates. -2. Increment the `patch` version of the tower crates. +Here's how we make sure we got everything: +- [ ] Run `cargo update` on the latest `main` branch, and keep the output +- [ ] If needed, update [deny.toml](https://github.com/ZcashFoundation/zebra/blob/main/book/src/dev/continuous-integration.md#fixing-duplicate-dependencies-in-check-denytoml-bans) +- [ ] Open a separate PR with the changes +- [ ] Add the output of `cargo update` to that PR as a comment -If we're not releasing a mainnet network upgrade, check for features, major changes, deprecations, and removals. If this release has any, it is a `minor` release: -1. Increment the `minor` version of `zebrad`. -2. Increment the `pre-release` version of the other crates. -3. Increment the `patch` version of the tower crates. -Otherwise, it is a `patch` release: -1. Increment the `patch` version of `zebrad`. -2. Increment the `pre-release` version of the other crates. -3. Increment the `patch` version of the tower crates. +# Make Release Changes -Zebra's Rust API is not stable or supported, so we keep all the crates on the same beta `pre-release` version. +These release steps can be done a few days before the release, in the same PR: +- [ ] Make sure the PRs with the new checkpoint hashes and missed dependencies are already merged -
+## Versioning -### Version Locations +### How to Increment Versions -Once you know which versions you want to increment, you can find them in the: +Zebra follows [semantic versioning](https://semver.org). Semantic versions look like: MAJOR.MINOR.PATCH[-TAG.PRE-RELEASE] -zebrad (rc): -- [ ] zebrad `Cargo.toml` -- [ ] `README.md` -- [ ] `book/src/user/docker.md` +Choose a release level for `zebrad` based on the changes in the release that users will see: +- mainnet network upgrades are `major` releases +- new features, large changes, deprecations, and removals are `minor` releases +- otherwise, it is a `patch` release -crates (beta): -- [ ] zebra-* `Cargo.toml`s +Zebra's Rust API doesn't have any support or stability guarantees, so we keep all the `zebra-*` and `tower-*` crates on a beta `pre-release` version. -tower (patch): -- [ ] tower-* `Cargo.toml`s +### Update Crate Versions -auto-generated: -- [ ] `Cargo.lock`: run `cargo build` after updating all the `Cargo.toml`s +
-#### Version Tooling +If you're publishing crates for the first time: -You can use `fastmod` to interactively find and replace versions. +- [ ] Install `cargo-release`: `cargo install cargo-release` +- [ ] Make sure you are an owner of the crate or [a member of the Zebra crates.io `owners` group on GitHub](https://github.com/orgs/ZcashFoundation/teams/owners) -For example, you can do something like: -``` -fastmod --extensions rs,toml,md --fixed-strings '1.0.0' '1.1.0' zebrad README.md zebra-network/src/constants.rs book/src/user/docker.md -fastmod --extensions rs,toml,md --fixed-strings '1.0.0-beta.15' '1.0.0-beta.16' zebra-* -fastmod --extensions rs,toml,md --fixed-strings '0.2.30' '0.2.31' tower-batch tower-fallback -cargo build -``` +
-If you use `fastmod`, don't update versions in `CHANGELOG.md` or `zebra-dependencies-for-audit.md`. +- [ ] Update crate versions and do a release dry-run: + - [ ] `cargo release version --verbose --workspace --exclude zebrad beta` + - [ ] `cargo release version --verbose --package zebrad [ major | minor | patch ]` + - [ ] `cargo release publish --verbose --workspace --dry-run` +- [ ] Commit the version changes to your release PR branch using `git`: `cargo release commit --verbose --workspace` ## README +README updates can be skipped for urgent releases. + Update the README to: -- [ ] Remove any "Known Issues" that have been fixed +- [ ] Remove any "Known Issues" that have been fixed since the last release. - [ ] Update the "Build and Run Instructions" with any new dependencies. Check for changes in the `Dockerfile` since the last tag: `git diff docker/Dockerfile`. - [ ] If Zebra has started using newer Rust language features or standard library APIs, update the known working Rust version in the README, book, and `Cargo.toml`s You can use a command like: ```sh - fastmod --fixed-strings '1.58' '1.65' +fastmod --fixed-strings '1.58' '1.65' ``` -## Checkpoints - -For performance and security, we want to update the Zebra checkpoints in every release. -You can copy the latest checkpoints from CI by following [the zebra-checkpoints README](https://github.com/ZcashFoundation/zebra/blob/main/zebra-utils/README.md#zebra-checkpoints). - -## Missed Dependency Updates - -Sometimes `dependabot` misses some dependency updates, or we accidentally turned them off. - -Here's how we make sure we got everything: -- [ ] Run `cargo update` on the latest `main` branch, and keep the output -- [ ] If needed, update [deny.toml](https://github.com/ZcashFoundation/zebra/blob/main/book/src/dev/continuous-integration.md#fixing-duplicate-dependencies-in-check-denytoml-bans) -- [ ] Open a separate PR with the changes -- [ ] Add the output of `cargo update` to that PR as a comment - ## Change Log **Important**: Any merge into `main` deletes any edits to the draft changelog. @@ -106,55 +87,43 @@ We use [the Release Drafter workflow](https://github.com/marketplace/actions/rel To create the final change log: - [ ] Copy the **latest** draft changelog into `CHANGELOG.md` (there can be multiple draft releases) -- [ ] Delete any trivial changes. Keep the list of those, to include in the PR +- [ ] Delete any trivial changes + - [ ] Put the list of deleted changelog entries in a PR comment to make reviewing easier - [ ] Combine duplicate changes -- [ ] Edit change descriptions so they are consistent, and make sense to non-developers +- [ ] Edit change descriptions so they will make sense to Zebra users - [ ] Check the category for each change - Prefer the "Fix" category if you're not sure -
- -#### Change Categories +## Update End of Support -From "Keep a Changelog": -* `Added` for new features. -* `Changed` for changes in existing functionality. -* `Deprecated` for soon-to-be removed features. -* `Removed` for now removed features. -* `Fixed` for any bug fixes. -* `Security` in case of vulnerabilities. +The end of support height is calculated from the current blockchain height: +- [ ] Find where the Zcash blockchain tip is now by using a [Zcash explorer](https://zcashblockexplorer.com/blocks) or other tool. +- [ ] Replace `ESTIMATED_RELEASE_HEIGHT` in [`end_of_support.rs`](https://github.com/ZcashFoundation/zebra/blob/main/zebrad/src/components/sync/end_of_support.rs) with the height you estimate the release will be tagged. -
- -## Release support constants +
-Needed for the end of support feature. Please update the following constants [in this file](https://github.com/ZcashFoundation/zebra/blob/main/zebrad/src/components/sync/end_of_support.rs): +Optional: calculate the release tagging height -- [ ] `ESTIMATED_RELEASE_HEIGHT` (required) - Replace with the estimated height you estimate the release will be tagged. -
- - Find where the Zcash blockchain tip is now by using a [Zcash explorer](https://zcashblockexplorer.com/blocks) or other tool. - - Consider there are aprox `1152` blocks per day (with the current Zcash `75` seconds spacing). - - So for example if you think the release will be tagged somewhere in the next 3 days you can add `1152 * 3` to the current tip height and use that value here. -
+- Add `1152` blocks for each day until the release +- For example, if the release is in 3 days, add `1152 * 3` to the current Mainnet block height -## Create the Release +
### Create the Release PR -After you have the version increments, the updated checkpoints, any missed dependency updates, -and the updated changelog: - -- [ ] Make sure the PRs with the new checkpoint hashes and missed dependencies are already merged -- [ ] Push the version increments, the updated changelog and the release constants into a branch - (for example: `bump-v1.0.0` - this needs to be different to the tag name) +- [ ] Push the version increments, the updated changelog, and the release constants into a branch, + for example: `bump-v1.0.0` - this needs to be different to the tag name - [ ] Create a release PR by adding `&template=release-checklist.md` to the comparing url ([Example](https://github.com/ZcashFoundation/zebra/compare/bump-v1.0.0?expand=1&template=release-checklist.md)). - - [ ] Add the list of deleted changelog entries as a comment to make reviewing easier. - [ ] Freeze the [`batched` queue](https://dashboard.mergify.com/github/ZcashFoundation/repo/zebra/queues) using Mergify. - [ ] Mark all the release PRs as `Critical` priority, so they go in the `urgent` Mergify queue. -### Create the Release -- [ ] Once the PR has been merged, create a new release using the draft release as a base, by clicking the Edit icon in the [draft release](https://github.com/ZcashFoundation/zebra/releases) +# Release Zebra + +## Create the GitHub Pre-Release + +- [ ] Wait for all the release PRs to be merged +- [ ] Create a new release using the draft release as a base, by clicking the Edit icon in the [draft release](https://github.com/ZcashFoundation/zebra/releases) - [ ] Set the tag name to the version tag, for example: `v1.0.0` - [ ] Set the release to target the `main` branch @@ -167,23 +136,30 @@ and the updated changelog: - [ ] Publish the pre-release to GitHub using "Publish Release" - [ ] Delete all the [draft releases from the list of releases](https://github.com/ZcashFoundation/zebra/releases) -## Binary Testing +## Test the Pre-Release - [ ] Wait until the [Docker binaries have been built on `main`](https://github.com/ZcashFoundation/zebra/actions/workflows/continous-integration-docker.yml), and the quick tests have passed. (You can ignore the full sync and `lightwalletd` tests, because they take about a day to run.) - [ ] Wait until the [pre-release deployment machines have successfully launched](https://github.com/ZcashFoundation/zebra/actions/workflows/continous-delivery.yml) + +## Publish Release + - [ ] [Publish the release to GitHub](https://github.com/ZcashFoundation/zebra/releases) by disabling 'pre-release', then clicking "Set as the latest release" + +## Publish Crates + +- [ ] Run `cargo login` +- [ ] Publish the crates to crates.io: `cargo release publish --verbose --workspace --execute` +- [ ] Check that Zebra can be installed from `crates.io`: + `cargo install --force --version 1.0.0 zebrad && ~/.cargo/bin/zebrad` + +## Publish Docker Images - [ ] Wait until [the Docker images have been published](https://github.com/ZcashFoundation/zebra/actions/workflows/release-binaries.yml) - [ ] Test the Docker image using `docker run --tty --interactive zfnd/zebra:v1.0.0`, and put the output in a comment on the PR. (You can use [gcloud cloud shell](https://console.cloud.google.com/home/dashboard?cloudshell=true)) - [ ] Un-freeze the [`batched` queue](https://dashboard.mergify.com/github/ZcashFoundation/repo/zebra/queues) using Mergify. - -## Telling Zebra Users - -- [ ] Post a summary of the important changes in the release in the `#arborist` and `#communications` Slack channels - ## Release Failures If building or running fails after tagging: @@ -191,9 +167,9 @@ If building or running fails after tagging:
1. Fix the bug that caused the failure -2. Increment the patch version again, following these instructions from the start -3. Update the code and documentation with a **new** git tag +2. Start a new `patch` release +3. Skip the **Release Preparation**, and start at the **Release Changes** step 4. Update `CHANGELOG.md` with details about the fix -5. Tag a **new** release +5. Follow the release checklist for the new Zebra version
From 26313fa9fe94dcfacb84bb2e5f22677c1414fb99 Mon Sep 17 00:00:00 2001 From: Pili Guerra Date: Wed, 14 Jun 2023 00:16:56 +0100 Subject: [PATCH 085/265] Keep links within the zebra book whenever possible instead of going out to github (#6938) --- README.md | 2 +- book/src/user/install.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 87fd5092292..e816b1557ad 100644 --- a/README.md +++ b/README.md @@ -58,7 +58,7 @@ This command will run our latest release, and sync it to the tip: docker run zfnd/zebra:1.0.0-rc.9 ``` -For more information, read our [Docker documentation](book/src/user/docker.md). +For more information, read our [Docker documentation](https://zebra.zfnd.org/user/docker.html). ### Building Zebra diff --git a/book/src/user/install.md b/book/src/user/install.md index 6f0394839d6..9f9826f6412 100644 --- a/book/src/user/install.md +++ b/book/src/user/install.md @@ -1,6 +1,6 @@ # Installing Zebra -Follow the [Docker or compilation instructions in the README](https://github.com/ZcashFoundation/zebra#getting-started). +Follow the [Docker or compilation instructions](https://zebra.zfnd.org/index.html#getting-started). #### ARM From fe859bd6a1abe4678a7d29b25c021463cc22b857 Mon Sep 17 00:00:00 2001 From: Deirdre Connolly Date: Wed, 14 Jun 2023 00:35:45 -0400 Subject: [PATCH 086/265] =?UTF-8?q?=E2=9C=A8=20Release=201.0.0=20=E2=9C=A8?= =?UTF-8?q?=20(#6877)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Include zebra-* and tower-fallback versions and paths for tower-batch-control * Remove versions for dev-dependencise * fix(legal): Use correct licenses for tower-batch-control and zebra-network (#6927) * Don't modify whitespace * Resolve rustdoc and cargo-release warnings * Bump semver * Re-up Cargo.lock * Update README.md Co-authored-by: teor * Update book/src/user/docker.md Co-authored-by: teor * Update 'Future Work' section post stable release * Add release notes to CHANGELOG.md * Update CHANGELOG.md Co-authored-by: teor * Remove Docker container change from the changelog Co-authored-by: teor * Point at the appropriate zebra-* crate versions * Update ESTIMATED_RELEASE_HEIGHT to 2,121,200 * Add CHANGELOG summary of the 1.0.0 release * Update CHANGELOG.md * Update CHANGELOG.md Co-authored-by: teor * Bump tower-* beta.* versions and use them --------- Co-authored-by: teor Co-authored-by: Pili Guerra --- CHANGELOG.md | 29 ++++++++++++++++++++ Cargo.lock | 24 ++++++++-------- README.md | 6 ++-- book/src/user/docker.md | 4 +-- tower-batch-control/Cargo.toml | 2 +- tower-fallback/Cargo.toml | 2 +- zebra-chain/Cargo.toml | 4 +-- zebra-consensus/Cargo.toml | 14 +++++----- zebra-network/Cargo.toml | 4 +-- zebra-node-services/Cargo.toml | 4 +-- zebra-rpc/Cargo.toml | 14 +++++----- zebra-script/Cargo.toml | 4 +-- zebra-state/Cargo.toml | 6 ++-- zebra-test/Cargo.toml | 2 +- zebra-utils/Cargo.toml | 8 +++--- zebrad/Cargo.toml | 16 +++++------ zebrad/src/components/sync/end_of_support.rs | 2 +- 17 files changed, 86 insertions(+), 59 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index fb74c251214..94680a1c7a1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,35 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org). +## [Zebra 1.0.0](https://github.com/ZcashFoundation/zebra/releases/tag/v1.0.0) - 2023-06-14 + +This is our 1.0.0 stable release. + +This release also fixes a panic at startup when parsing the app version, [publishes `zebrad` to crates.io](https://crates.io/crates/zebrad), and [publishes to Docker Hub under the `latest` tag](https://hub.docker.com/r/zfnd/zebra/tags). + +Please report bugs to [the Zebra GitHub repository](https://github.com/ZcashFoundation/zebra/issues/new?assignees=&labels=C-bug%2C+S-needs-triage&projects=&template=bug_report.yml&title=) + +### Security + +- Avoid potential concurrency bugs in outbound handshakes ([#6869](https://github.com/ZcashFoundation/zebra/pull/6869)) + +### Changed + +- Publish to [crates.io](https://crates.io/crates/zebrad) ([#6908(https://github.com/ZcashFoundation/zebra/pull/6908)) +- Rename tower-batch to tower-batch-control ([#6907](https://github.com/ZcashFoundation/zebra/pull/6907)) +- Upgrade to ed25519-zebra 4.0.0 ([#6881](https://github.com/ZcashFoundation/zebra/pull/6881)) + +### Fixed + +- Stop panicking at startup when parsing the app version ([#6888](https://github.com/ZcashFoundation/zebra/pull/6888)) +- Avoid a race condition in testing modified JoinSplits ([#6921](https://github.com/ZcashFoundation/zebra/pull/6921)) + +### Contributors + +Thank you to everyone who contributed to this release, we couldn't make Zebra without you: +@dconnolly, @gustavovalverde, @oxarbitrage, @teor2345 and @upbqdn + + ## [Zebra 1.0.0-rc.9](https://github.com/ZcashFoundation/zebra/releases/tag/v1.0.0-rc.9) - 2023-06-07 This release continues to address audit findings. It fixes multiple network protocol and RPC bugs, diff --git a/Cargo.lock b/Cargo.lock index 84323d0187a..db9bd5f2143 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4719,7 +4719,7 @@ dependencies = [ [[package]] name = "tower-batch-control" -version = "0.2.41-beta.1" +version = "0.2.41-beta.2" dependencies = [ "color-eyre", "ed25519-zebra", @@ -4743,7 +4743,7 @@ dependencies = [ [[package]] name = "tower-fallback" -version = "0.2.41-beta.1" +version = "0.2.41-beta.2" dependencies = [ "futures-core", "pin-project", @@ -5639,7 +5639,7 @@ dependencies = [ [[package]] name = "zebra-chain" -version = "1.0.0-beta.25" +version = "1.0.0-beta.26" dependencies = [ "bitflags 2.3.1", "bitflags-serde-legacy", @@ -5699,7 +5699,7 @@ dependencies = [ [[package]] name = "zebra-consensus" -version = "1.0.0-beta.25" +version = "1.0.0-beta.26" dependencies = [ "bellman", "blake2b_simd", @@ -5744,7 +5744,7 @@ dependencies = [ [[package]] name = "zebra-network" -version = "1.0.0-beta.25" +version = "1.0.0-beta.26" dependencies = [ "bitflags 2.3.1", "byteorder", @@ -5784,7 +5784,7 @@ dependencies = [ [[package]] name = "zebra-node-services" -version = "1.0.0-beta.25" +version = "1.0.0-beta.26" dependencies = [ "color-eyre", "jsonrpc-core", @@ -5796,7 +5796,7 @@ dependencies = [ [[package]] name = "zebra-rpc" -version = "1.0.0-beta.25" +version = "1.0.0-beta.26" dependencies = [ "chrono", "futures", @@ -5828,7 +5828,7 @@ dependencies = [ [[package]] name = "zebra-script" -version = "1.0.0-beta.25" +version = "1.0.0-beta.26" dependencies = [ "displaydoc", "hex", @@ -5841,7 +5841,7 @@ dependencies = [ [[package]] name = "zebra-state" -version = "1.0.0-beta.25" +version = "1.0.0-beta.26" dependencies = [ "bincode", "chrono", @@ -5882,7 +5882,7 @@ dependencies = [ [[package]] name = "zebra-test" -version = "1.0.0-beta.25" +version = "1.0.0-beta.26" dependencies = [ "color-eyre", "futures", @@ -5909,7 +5909,7 @@ dependencies = [ [[package]] name = "zebra-utils" -version = "1.0.0-beta.25" +version = "1.0.0-beta.26" dependencies = [ "color-eyre", "hex", @@ -5930,7 +5930,7 @@ dependencies = [ [[package]] name = "zebrad" -version = "1.0.0-rc.9" +version = "1.0.0" dependencies = [ "abscissa_core", "atty", diff --git a/README.md b/README.md index e816b1557ad..9c4ae188603 100644 --- a/README.md +++ b/README.md @@ -55,7 +55,7 @@ section in the Zebra book for system requirements. This command will run our latest release, and sync it to the tip: ```sh -docker run zfnd/zebra:1.0.0-rc.9 +docker run zfnd/zebra:latest ``` For more information, read our [Docker documentation](https://zebra.zfnd.org/user/docker.html). @@ -101,7 +101,7 @@ Note that the package `clang` includes `libclang` as well as the C++ compiler. Once the dependencies are in place, you can build Zebra ```sh -cargo install --locked --git https://github.com/ZcashFoundation/zebra --tag v1.0.0-rc.9 zebrad +cargo install --locked --git https://github.com/ZcashFoundation/zebra --tag v1.0.0 zebrad ``` You can start Zebra by @@ -164,8 +164,6 @@ There are a few bugs in Zebra that we're still working on fixing: ## Future Work -The Zebra team is currently working towards an audited stable release. - We will continue to add new features as part of future network upgrades, and in response to community feedback. ## Documentation diff --git a/book/src/user/docker.md b/book/src/user/docker.md index a47bf2e9b64..e53c51aa6c3 100644 --- a/book/src/user/docker.md +++ b/book/src/user/docker.md @@ -11,13 +11,13 @@ You can deploy Zebra for a daily use with the images available in [Docker Hub](h ### Ready to use image ```shell -docker run --detach zfnd/zebra:1.0.0-rc.9 +docker run --detach zfnd/zebra:latest ``` ### Build it locally ```shell -git clone --depth 1 --branch v1.0.0-rc.9 https://github.com/ZcashFoundation/zebra.git +git clone --depth 1 --branch v1.0.0 https://github.com/ZcashFoundation/zebra.git docker build --file docker/Dockerfile --target runtime --tag zebra:local . docker run --detach zebra:local ``` diff --git a/tower-batch-control/Cargo.toml b/tower-batch-control/Cargo.toml index d8d202acdf4..12b4c265d99 100644 --- a/tower-batch-control/Cargo.toml +++ b/tower-batch-control/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "tower-batch-control" -version = "0.2.41-beta.1" +version = "0.2.41-beta.2" authors = ["Zcash Foundation ", "Tower Maintainers "] description = "Tower middleware for batch request processing" # # Legal diff --git a/tower-fallback/Cargo.toml b/tower-fallback/Cargo.toml index af4aa8d1079..fd18e859eee 100644 --- a/tower-fallback/Cargo.toml +++ b/tower-fallback/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "tower-fallback" -version = "0.2.41-beta.1" +version = "0.2.41-beta.2" authors = ["Zcash Foundation "] description = "A Tower service combinator that sends requests to a first service, then retries processing on a second fallback service if the first service errors." license = "MIT OR Apache-2.0" diff --git a/zebra-chain/Cargo.toml b/zebra-chain/Cargo.toml index 5495b1a356b..a3de6003887 100644 --- a/zebra-chain/Cargo.toml +++ b/zebra-chain/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-chain" -version = "1.0.0-beta.25" +version = "1.0.0-beta.26" authors = ["Zcash Foundation "] description = "Core Zcash data structures" license = "MIT OR Apache-2.0" @@ -116,7 +116,7 @@ rand_chacha = { version = "0.3.1", optional = true } tokio = { version = "1.28.2", features = ["tracing"], optional = true } -zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.25", optional = true } +zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.26", optional = true } [dev-dependencies] # Benchmarks diff --git a/zebra-consensus/Cargo.toml b/zebra-consensus/Cargo.toml index 3c0e108fa77..524bf70b701 100644 --- a/zebra-consensus/Cargo.toml +++ b/zebra-consensus/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-consensus" -version = "1.0.0-beta.25" +version = "1.0.0-beta.26" authors = ["Zcash Foundation "] description = "Implementation of Zcash consensus checks" license = "MIT OR Apache-2.0" @@ -62,13 +62,13 @@ orchard = "0.4.0" zcash_proofs = { version = "0.11.0", features = ["local-prover", "multicore", "download-params"] } -tower-fallback = { path = "../tower-fallback/", version = "0.2.41-beta.1" } -tower-batch-control = { path = "../tower-batch-control/", version = "0.2.41-beta.1" } +tower-fallback = { path = "../tower-fallback/", version = "0.2.41-beta.2" } +tower-batch-control = { path = "../tower-batch-control/", version = "0.2.41-beta.2" } -zebra-script = { path = "../zebra-script", version = "1.0.0-beta.25" } -zebra-state = { path = "../zebra-state", version = "1.0.0-beta.25" } -zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.25" } -zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.25" } +zebra-script = { path = "../zebra-script", version = "1.0.0-beta.26" } +zebra-state = { path = "../zebra-state", version = "1.0.0-beta.26" } +zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.26" } +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.26" } # prod feature progress-bar howudoin = { version = "0.1.2", optional = true } diff --git a/zebra-network/Cargo.toml b/zebra-network/Cargo.toml index f1b5429c235..339086a9c2f 100644 --- a/zebra-network/Cargo.toml +++ b/zebra-network/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-network" -version = "1.0.0-beta.25" +version = "1.0.0-beta.26" authors = ["Zcash Foundation ", "Tower Maintainers "] description = "Networking code for Zebra" # # Legal @@ -78,7 +78,7 @@ howudoin = { version = "0.1.2", optional = true } proptest = { version = "1.2.0", optional = true } proptest-derive = { version = "0.3.0", optional = true } -zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.25" } +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.26" } [dev-dependencies] proptest = "1.2.0" diff --git a/zebra-node-services/Cargo.toml b/zebra-node-services/Cargo.toml index 04639549ad2..61c671adcbb 100644 --- a/zebra-node-services/Cargo.toml +++ b/zebra-node-services/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-node-services" -version = "1.0.0-beta.25" +version = "1.0.0-beta.26" authors = ["Zcash Foundation "] description = "The interfaces of some Zebra node services" license = "MIT OR Apache-2.0" @@ -35,7 +35,7 @@ rpc-client = [ ] [dependencies] -zebra-chain = { path = "../zebra-chain" , version = "1.0.0-beta.25"} +zebra-chain = { path = "../zebra-chain" , version = "1.0.0-beta.26"} # Optional dependencies diff --git a/zebra-rpc/Cargo.toml b/zebra-rpc/Cargo.toml index db597d6b29d..84add04be7a 100644 --- a/zebra-rpc/Cargo.toml +++ b/zebra-rpc/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-rpc" -version = "1.0.0-beta.25" +version = "1.0.0-beta.26" authors = ["Zcash Foundation "] description = "A Zebra JSON Remote Procedure Call (JSON-RPC) interface" license = "MIT OR Apache-2.0" @@ -70,12 +70,12 @@ zcash_address = { version = "0.2.1", optional = true } # Test-only feature proptest-impl proptest = { version = "1.2.0", optional = true } -zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.25", features = ["json-conversion"] } -zebra-consensus = { path = "../zebra-consensus", version = "1.0.0-beta.25" } -zebra-network = { path = "../zebra-network", version = "1.0.0-beta.25" } -zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.25" } -zebra-script = { path = "../zebra-script", version = "1.0.0-beta.25" } -zebra-state = { path = "../zebra-state", version = "1.0.0-beta.25" } +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.26", features = ["json-conversion"] } +zebra-consensus = { path = "../zebra-consensus", version = "1.0.0-beta.26" } +zebra-network = { path = "../zebra-network", version = "1.0.0-beta.26" } +zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.26" } +zebra-script = { path = "../zebra-script", version = "1.0.0-beta.26" } +zebra-state = { path = "../zebra-state", version = "1.0.0-beta.26" } [dev-dependencies] insta = { version = "1.29.0", features = ["redactions", "json", "ron"] } diff --git a/zebra-script/Cargo.toml b/zebra-script/Cargo.toml index 618f51b934e..697dd1a0d11 100644 --- a/zebra-script/Cargo.toml +++ b/zebra-script/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-script" -version = "1.0.0-beta.25" +version = "1.0.0-beta.26" authors = ["Zcash Foundation "] description = "Zebra script verification wrapping zcashd's zcash_script library" license = "MIT OR Apache-2.0" @@ -17,7 +17,7 @@ categories = ["api-bindings", "cryptography::cryptocurrencies"] [dependencies] zcash_script = "0.1.12" -zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.25" } +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.26" } thiserror = "1.0.40" displaydoc = "0.2.4" diff --git a/zebra-state/Cargo.toml b/zebra-state/Cargo.toml index 20df4da55ae..78c0c16d891 100644 --- a/zebra-state/Cargo.toml +++ b/zebra-state/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-state" -version = "1.0.0-beta.25" +version = "1.0.0-beta.26" authors = ["Zcash Foundation "] description = "State contextual verification and storage code for Zebra" license = "MIT OR Apache-2.0" @@ -69,13 +69,13 @@ tracing = "0.1.37" elasticsearch = { version = "8.5.0-alpha.1", package = "elasticsearch", optional = true } serde_json = { version = "1.0.96", package = "serde_json", optional = true } -zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.25" } +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.26" } # prod feature progress-bar howudoin = { version = "0.1.2", optional = true } # test feature proptest-impl -zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.25", optional = true } +zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.26", optional = true } proptest = { version = "1.2.0", optional = true } proptest-derive = { version = "0.3.0", optional = true } diff --git a/zebra-test/Cargo.toml b/zebra-test/Cargo.toml index 649a3835bbc..d6edb5c5aa0 100644 --- a/zebra-test/Cargo.toml +++ b/zebra-test/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-test" -version = "1.0.0-beta.25" +version = "1.0.0-beta.26" authors = ["Zcash Foundation "] description = "Test harnesses and test vectors for Zebra" license = "MIT OR Apache-2.0" diff --git a/zebra-utils/Cargo.toml b/zebra-utils/Cargo.toml index 99c80d62dec..cc65e650c3b 100644 --- a/zebra-utils/Cargo.toml +++ b/zebra-utils/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-utils" -version = "1.0.0-beta.25" +version = "1.0.0-beta.26" authors = ["Zcash Foundation "] description = "Developer tools for Zebra maintenance and testing" license = "MIT OR Apache-2.0" @@ -70,11 +70,11 @@ tracing-error = "0.2.0" tracing-subscriber = "0.3.17" thiserror = "1.0.40" -zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.25" } -zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.25" } +zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.26" } +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.26" } # These crates are needed for the block-template-to-proposal binary -zebra-rpc = { path = "../zebra-rpc", version = "1.0.0-beta.25", optional = true } +zebra-rpc = { path = "../zebra-rpc", version = "1.0.0-beta.26", optional = true } # These crates are needed for the zebra-checkpoints binary itertools = { version = "0.10.5", optional = true } diff --git a/zebrad/Cargo.toml b/zebrad/Cargo.toml index 6316d5996ec..7eb30a2c744 100644 --- a/zebrad/Cargo.toml +++ b/zebrad/Cargo.toml @@ -1,7 +1,7 @@ [package] # Crate metadata name = "zebrad" -version = "1.0.0-rc.9" +version = "1.0.0" authors = ["Zcash Foundation "] description = "The Zcash Foundation's independent, consensus-compatible implementation of a Zcash node" license = "MIT OR Apache-2.0" @@ -116,14 +116,14 @@ test_sync_past_mandatory_checkpoint_mainnet = [] test_sync_past_mandatory_checkpoint_testnet = [] [dependencies] -zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.25" } -zebra-consensus = { path = "../zebra-consensus", version = "1.0.0-beta.25" } -zebra-network = { path = "../zebra-network", version = "1.0.0-beta.25" } -zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.25" } -zebra-rpc = { path = "../zebra-rpc", version = "1.0.0-beta.25" } -zebra-state = { path = "../zebra-state", version = "1.0.0-beta.25" } +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.26" } +zebra-consensus = { path = "../zebra-consensus", version = "1.0.0-beta.26" } +zebra-network = { path = "../zebra-network", version = "1.0.0-beta.26" } +zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.26" } +zebra-rpc = { path = "../zebra-rpc", version = "1.0.0-beta.26" } +zebra-state = { path = "../zebra-state", version = "1.0.0-beta.26" } # Required for crates.io publishing, but it's only used in tests -zebra-utils = { path = "../zebra-utils", version = "1.0.0-beta.25", optional = true } +zebra-utils = { path = "../zebra-utils", version = "1.0.0-beta.26", optional = true } abscissa_core = "0.7.0" clap = { version = "4.3.3", features = ["cargo"] } diff --git a/zebrad/src/components/sync/end_of_support.rs b/zebrad/src/components/sync/end_of_support.rs index 09b5caa60df..9e4cb2aedeb 100644 --- a/zebrad/src/components/sync/end_of_support.rs +++ b/zebrad/src/components/sync/end_of_support.rs @@ -13,7 +13,7 @@ use zebra_chain::{ use crate::application::release_version; /// The estimated height that this release started to run. -pub const ESTIMATED_RELEASE_HEIGHT: u32 = 2_113_936; +pub const ESTIMATED_RELEASE_HEIGHT: u32 = 2_121_200; /// The maximum number of days after `ESTIMATED_RELEASE_HEIGHT` where a Zebra server will run /// without halting. From 17bd7884ea2bad663f950b77598a071a804a2ceb Mon Sep 17 00:00:00 2001 From: Alfredo Garcia Date: Wed, 14 Jun 2023 16:01:39 -0300 Subject: [PATCH 087/265] fix(build): add elasticsearch feature to block serialize (#6709) * add elasticsearch feature to block serialize * fix for elastic feature * add zebra-chain elasticsearch dep to zebrad --- zebra-chain/Cargo.toml | 3 +++ zebra-chain/src/block.rs | 5 ++++- zebra-chain/src/transaction.rs | 5 ++++- zebra-chain/src/transparent.rs | 21 ++++++++++++++++----- zebra-state/Cargo.toml | 1 + zebrad/Cargo.toml | 1 + 6 files changed, 29 insertions(+), 7 deletions(-) diff --git a/zebra-chain/Cargo.toml b/zebra-chain/Cargo.toml index a3de6003887..34f4dd57aef 100644 --- a/zebra-chain/Cargo.toml +++ b/zebra-chain/Cargo.toml @@ -29,6 +29,9 @@ getblocktemplate-rpcs = [ "zcash_address", ] +# Experimental elasticsearch support +elasticsearch = [] + # Test-only features proptest-impl = [ diff --git a/zebra-chain/src/block.rs b/zebra-chain/src/block.rs index 3ae0988194a..d472e930c7f 100644 --- a/zebra-chain/src/block.rs +++ b/zebra-chain/src/block.rs @@ -43,7 +43,10 @@ pub use arbitrary::LedgerState; /// A Zcash block, containing a header and a list of transactions. #[derive(Clone, Debug, Eq, PartialEq)] -#[cfg_attr(any(test, feature = "proptest-impl"), derive(Serialize))] +#[cfg_attr( + any(test, feature = "proptest-impl", feature = "elasticsearch"), + derive(Serialize) +)] pub struct Block { /// The block header, containing block metadata. pub header: Arc
, diff --git a/zebra-chain/src/transaction.rs b/zebra-chain/src/transaction.rs index 0b2c25583d2..583ca9681e8 100644 --- a/zebra-chain/src/transaction.rs +++ b/zebra-chain/src/transaction.rs @@ -63,7 +63,10 @@ use crate::{ /// internally by different enum variants. Because we checkpoint on Canopy /// activation, we do not validate any pre-Sapling transaction types. #[derive(Clone, Debug, PartialEq, Eq)] -#[cfg_attr(any(test, feature = "proptest-impl"), derive(Serialize))] +#[cfg_attr( + any(test, feature = "proptest-impl", feature = "elasticsearch"), + derive(Serialize) +)] pub enum Transaction { /// A fully transparent transaction (`version = 1`). V1 { diff --git a/zebra-chain/src/transparent.rs b/zebra-chain/src/transparent.rs index afea036afb6..7982468288c 100644 --- a/zebra-chain/src/transparent.rs +++ b/zebra-chain/src/transparent.rs @@ -66,7 +66,10 @@ pub const EXTRA_ZEBRA_COINBASE_DATA: &str = "z\u{1F993}"; // // TODO: rename to ExtraCoinbaseData, because height is also part of the coinbase data? #[derive(Clone, Eq, PartialEq)] -#[cfg_attr(any(test, feature = "proptest-impl"), derive(Serialize))] +#[cfg_attr( + any(test, feature = "proptest-impl", feature = "elasticsearch"), + derive(Serialize) +)] pub struct CoinbaseData( /// Invariant: this vec, together with the coinbase height, must be less than /// 100 bytes. We enforce this by only constructing CoinbaseData fields by @@ -110,7 +113,11 @@ impl std::fmt::Debug for CoinbaseData { /// /// A particular transaction output reference. #[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)] -#[cfg_attr(any(test, feature = "proptest-impl"), derive(Arbitrary, Serialize))] +#[cfg_attr(any(test, feature = "proptest-impl"), derive(Arbitrary))] +#[cfg_attr( + any(test, feature = "proptest-impl", feature = "elasticsearch"), + derive(Serialize) +)] pub struct OutPoint { /// References the transaction that contains the UTXO being spent. /// @@ -145,7 +152,10 @@ impl OutPoint { /// A transparent input to a transaction. #[derive(Clone, Debug, Eq, PartialEq)] -#[cfg_attr(any(test, feature = "proptest-impl"), derive(Serialize))] +#[cfg_attr( + any(test, feature = "proptest-impl", feature = "elasticsearch"), + derive(Serialize) +)] pub enum Input { /// A reference to an output of a previous transaction. PrevOut { @@ -383,9 +393,10 @@ impl Input { /// that spends my UTXO and sends 1 ZEC to you and 1 ZEC back to me /// (just like receiving change). #[derive(Clone, Debug, Eq, PartialEq, Hash)] +#[cfg_attr(any(test, feature = "proptest-impl"), derive(Arbitrary, Deserialize))] #[cfg_attr( - any(test, feature = "proptest-impl"), - derive(Arbitrary, Serialize, Deserialize) + any(test, feature = "proptest-impl", feature = "elasticsearch"), + derive(Serialize) )] pub struct Output { /// Transaction value. diff --git a/zebra-state/Cargo.toml b/zebra-state/Cargo.toml index 78c0c16d891..41c71c58449 100644 --- a/zebra-state/Cargo.toml +++ b/zebra-state/Cargo.toml @@ -39,6 +39,7 @@ proptest-impl = [ elasticsearch = [ "dep:elasticsearch", "dep:serde_json", + "zebra-chain/elasticsearch", ] [dependencies] diff --git a/zebrad/Cargo.toml b/zebrad/Cargo.toml index 7eb30a2c744..bd00bb75d9b 100644 --- a/zebrad/Cargo.toml +++ b/zebrad/Cargo.toml @@ -44,6 +44,7 @@ getblocktemplate-rpcs = [ elasticsearch = [ "zebra-state/elasticsearch", + "zebra-chain/elasticsearch", ] sentry = ["dep:sentry"] From 219d47227076268fbaa9da0edfc5aa074a0c25f3 Mon Sep 17 00:00:00 2001 From: Alfredo Garcia Date: Wed, 14 Jun 2023 16:02:55 -0300 Subject: [PATCH 088/265] fix(compatibility): Replace or add RPC content type header when applicable (#6885) * ignore client supplied content-type header and use json always * rename method * add one more check to test * Add missing proptest-impl dependency from zebrad to zebra-rpc * change to replace only specific content type * remove cargo mods * refactor `insert_or_replace_content_type_header` * add security comments * allow variants of text/plain ocntent_type --------- Co-authored-by: teor --- zebra-node-services/src/rpc_client.rs | 38 +++++++++ .../src/server/http_request_compatibility.rs | 49 +++++++++-- zebrad/tests/acceptance.rs | 82 +++++++++++++++++++ 3 files changed, 161 insertions(+), 8 deletions(-) diff --git a/zebra-node-services/src/rpc_client.rs b/zebra-node-services/src/rpc_client.rs index e214af7350e..350b373aa72 100644 --- a/zebra-node-services/src/rpc_client.rs +++ b/zebra-node-services/src/rpc_client.rs @@ -43,6 +43,44 @@ impl RpcRequestClient { .await } + /// Builds rpc request with a variable `content-type`. + pub async fn call_with_content_type( + &self, + method: impl AsRef, + params: impl AsRef, + content_type: String, + ) -> reqwest::Result { + let method = method.as_ref(); + let params = params.as_ref(); + + self.client + .post(format!("http://{}", &self.rpc_address)) + .body(format!( + r#"{{"jsonrpc": "2.0", "method": "{method}", "params": {params}, "id":123 }}"# + )) + .header("Content-Type", content_type) + .send() + .await + } + + /// Builds rpc request with no content type. + pub async fn call_with_no_content_type( + &self, + method: impl AsRef, + params: impl AsRef, + ) -> reqwest::Result { + let method = method.as_ref(); + let params = params.as_ref(); + + self.client + .post(format!("http://{}", &self.rpc_address)) + .body(format!( + r#"{{"jsonrpc": "2.0", "method": "{method}", "params": {params}, "id":123 }}"# + )) + .send() + .await + } + /// Builds rpc request and gets text from response pub async fn text_from_call( &self, diff --git a/zebra-rpc/src/server/http_request_compatibility.rs b/zebra-rpc/src/server/http_request_compatibility.rs index 63f445e917b..99e604843fb 100644 --- a/zebra-rpc/src/server/http_request_compatibility.rs +++ b/zebra-rpc/src/server/http_request_compatibility.rs @@ -43,8 +43,8 @@ impl RequestMiddleware for FixHttpRequestMiddleware { ) -> jsonrpc_http_server::RequestMiddlewareAction { tracing::trace!(?request, "original HTTP request"); - // Fix the request headers - FixHttpRequestMiddleware::add_missing_content_type_header(request.headers_mut()); + // Fix the request headers if needed and we can do so. + FixHttpRequestMiddleware::insert_or_replace_content_type_header(request.headers_mut()); // Fix the request body let request = request.map(|body| { @@ -103,11 +103,44 @@ impl FixHttpRequestMiddleware { .replace(", \"jsonrpc\": \"1.0\"", "") } - /// If the `content-type` HTTP header is not present, - /// add an `application/json` content type header. - pub fn add_missing_content_type_header(headers: &mut hyper::header::HeaderMap) { - headers - .entry(hyper::header::CONTENT_TYPE) - .or_insert(hyper::header::HeaderValue::from_static("application/json")); + /// Insert or replace client supplied `content-type` HTTP header to `application/json` in the following cases: + /// + /// - no `content-type` supplied. + /// - supplied `content-type` start with `text/plain`, for example: + /// - `text/plain` + /// - `text/plain;` + /// - `text/plain; charset=utf-8` + /// + /// `application/json` is the only `content-type` accepted by the Zebra rpc endpoint: + /// + /// + /// + /// # Security + /// + /// - `content-type` headers exist so that applications know they are speaking the correct protocol with the correct format. + /// We can be a bit flexible, but there are some types (such as binary) we shouldn't allow. + /// In particular, the "application/x-www-form-urlencoded" header should be rejected, so browser forms can't be used to attack + /// a local RPC port. See "The Role of Routers in the CSRF Attack" in + /// + /// - Checking all the headers is secure, but only because hyper has custom code that just reads the first content-type header. + /// + pub fn insert_or_replace_content_type_header(headers: &mut hyper::header::HeaderMap) { + if !headers.contains_key(hyper::header::CONTENT_TYPE) + || headers + .get(hyper::header::CONTENT_TYPE) + .filter(|value| { + value + .to_str() + .ok() + .unwrap_or_default() + .starts_with("text/plain") + }) + .is_some() + { + headers.insert( + hyper::header::CONTENT_TYPE, + hyper::header::HeaderValue::from_static("application/json"), + ); + } } } diff --git a/zebrad/tests/acceptance.rs b/zebrad/tests/acceptance.rs index c9953fba1e2..57062d59877 100644 --- a/zebrad/tests/acceptance.rs +++ b/zebrad/tests/acceptance.rs @@ -1467,6 +1467,88 @@ async fn rpc_endpoint(parallel_cpu_threads: bool) -> Result<()> { Ok(()) } +/// Test that the JSON-RPC endpoint responds to requests with different content types. +/// +/// This test ensures that the curl examples of zcashd rpc methods will also work in Zebra. +/// +/// https://zcash.github.io/rpc/getblockchaininfo.html +#[tokio::test] +async fn rpc_endpoint_client_content_type() -> Result<()> { + let _init_guard = zebra_test::init(); + if zebra_test::net::zebra_skip_network_tests() { + return Ok(()); + } + + // Write a configuration that has RPC listen_addr set + // [Note on port conflict](#Note on port conflict) + let mut config = random_known_rpc_port_config(true)?; + + let dir = testdir()?.with_config(&mut config)?; + let mut child = dir.spawn_child(args!["start"])?; + + // Wait until port is open. + child.expect_stdout_line_matches( + format!("Opened RPC endpoint at {}", config.rpc.listen_addr.unwrap()).as_str(), + )?; + + // Create an http client + let client = RpcRequestClient::new(config.rpc.listen_addr.unwrap()); + + // Call to `getinfo` RPC method with a no content type. + let res = client + .call_with_no_content_type("getinfo", "[]".to_string()) + .await?; + + // Zebra will insert valid `application/json` content type and succeed. + assert!(res.status().is_success()); + + // Call to `getinfo` RPC method with a `text/plain`. + let res = client + .call_with_content_type("getinfo", "[]".to_string(), "text/plain".to_string()) + .await?; + + // Zebra will replace to the valid `application/json` content type and succeed. + assert!(res.status().is_success()); + + // Call to `getinfo` RPC method with a `text/plain` content type as the zcashd rpc docs. + let res = client + .call_with_content_type("getinfo", "[]".to_string(), "text/plain;".to_string()) + .await?; + + // Zebra will replace to the valid `application/json` content type and succeed. + assert!(res.status().is_success()); + + // Call to `getinfo` RPC method with a `text/plain; other string` content type. + let res = client + .call_with_content_type( + "getinfo", + "[]".to_string(), + "text/plain; other string".to_string(), + ) + .await?; + + // Zebra will replace to the valid `application/json` content type and succeed. + assert!(res.status().is_success()); + + // Call to `getinfo` RPC method with a valid `application/json` content type. + let res = client + .call_with_content_type("getinfo", "[]".to_string(), "application/json".to_string()) + .await?; + + // Zebra will not replace valid content type and succeed. + assert!(res.status().is_success()); + + // Call to `getinfo` RPC method with invalid string as content type. + let res = client + .call_with_content_type("getinfo", "[]".to_string(), "whatever".to_string()) + .await?; + + // Zebra will not replace unrecognized content type and fail. + assert!(res.status().is_client_error()); + + Ok(()) +} + /// Test that Zebra's non-blocking logger works, by creating lots of debug output, but not reading the logs. /// Then make sure Zebra drops excess log lines. (Previously, it would block waiting for logs to be read.) /// From dde790e92a94068a4e92abbe9db369674fea17ad Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 14 Jun 2023 19:04:06 +0000 Subject: [PATCH 089/265] build(deps): bump log from 0.4.18 to 0.4.19 (#6919) Bumps [log](https://github.com/rust-lang/log) from 0.4.18 to 0.4.19. - [Release notes](https://github.com/rust-lang/log/releases) - [Changelog](https://github.com/rust-lang/log/blob/master/CHANGELOG.md) - [Commits](https://github.com/rust-lang/log/compare/0.4.18...0.4.19) --- updated-dependencies: - dependency-name: log dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- zebrad/Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index db9bd5f2143..02bbb1f5a47 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2377,9 +2377,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.18" +version = "0.4.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "518ef76f2f87365916b142844c16d8fefd85039bc5699050210a7778ee1cd1de" +checksum = "b06a4cde4c0f271a446782e3eff8de789548ce57dbc8eca9292c27f4a42004b4" [[package]] name = "lz4-sys" diff --git a/zebrad/Cargo.toml b/zebrad/Cargo.toml index bd00bb75d9b..e5a4f91f080 100644 --- a/zebrad/Cargo.toml +++ b/zebrad/Cargo.toml @@ -182,7 +182,7 @@ metrics-exporter-prometheus = { version = "0.12.0", default-features = false, fe # # zebrad uses tracing for logging, # we only use `log` to set and print the static log levels in transitive dependencies -log = "0.4.18" +log = "0.4.19" # prod feature progress-bar howudoin = { version = "0.1.2", features = ["term-line"], optional = true } From 9f0e18282cce73e203b3a888f3cbf6412ba8c2b8 Mon Sep 17 00:00:00 2001 From: Arya Date: Wed, 14 Jun 2023 15:05:11 -0400 Subject: [PATCH 090/265] fix(logs): Avoid grouping logs for separate tasks (#6923) * Removes span.enter() in async code * Restores `accept_span` and uses in_scope() instead of enter() * Update zebra-network/src/peer_set/initialize.rs --- zebra-network/src/peer_set/initialize.rs | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/zebra-network/src/peer_set/initialize.rs b/zebra-network/src/peer_set/initialize.rs index d306475b722..98b32648809 100644 --- a/zebra-network/src/peer_set/initialize.rs +++ b/zebra-network/src/peer_set/initialize.rs @@ -607,6 +607,13 @@ where let _guard = accept_span.enter(); debug!("got incoming connection"); + + // # Correctness + // + // Holding the drop guard returned by Span::enter across .await points will + // result in incorrect traces if it yields. + // + // This await is okay because the handshaker's `poll_ready` method always returns Ready. handshaker.ready().await?; // TODO: distinguish between proxied listeners and direct listeners let handshaker_span = info_span!("listen_handshaker", peer = ?connected_addr); @@ -638,6 +645,9 @@ where handshakes.push(Box::pin(handshake_task)); } + // We need to drop the guard before yielding. + std::mem::drop(_guard); + // Rate-limit inbound connection handshakes. // But sleep longer after a successful connection, // so we can clear out failed connections at a higher rate. From 4323097794d78db40a616b19a200ac04eb6c028b Mon Sep 17 00:00:00 2001 From: Alfredo Garcia Date: Wed, 14 Jun 2023 16:06:08 -0300 Subject: [PATCH 091/265] remove redundant configuration feature (#6929) --- zebra-chain/src/block/merkle.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/zebra-chain/src/block/merkle.rs b/zebra-chain/src/block/merkle.rs index 4e6dd98919a..9f1ef0d45f6 100644 --- a/zebra-chain/src/block/merkle.rs +++ b/zebra-chain/src/block/merkle.rs @@ -9,7 +9,7 @@ use crate::{ transaction::{self, Transaction, UnminedTx, UnminedTxId, VerifiedUnminedTx}, }; -#[cfg(any(any(test, feature = "proptest-impl"), feature = "proptest-impl"))] +#[cfg(any(test, feature = "proptest-impl"))] use proptest_derive::Arbitrary; /// The root of the Bitcoin-inherited transaction Merkle tree, binding the From fd78556000df6d0f206417c5d9b489d5bbfb8f01 Mon Sep 17 00:00:00 2001 From: Conrado Gouvea Date: Wed, 14 Jun 2023 16:07:02 -0300 Subject: [PATCH 092/265] fix: prevent progress bar from panicking using workaround (#6940) * fix: prevent progress bar from panicking using workaround * remove more progress bar lengths * re-add set_len() calls commented out; restore one that shouldn't cause panics * Update zebra-state/src/service/non_finalized_state.rs Co-authored-by: teor --------- Co-authored-by: teor --- zebra-network/src/address_book_updater.rs | 14 ++++---- zebra-network/src/peer_set/limit.rs | 4 +-- .../src/service/non_finalized_state.rs | 14 ++++---- zebrad/src/components/mempool.rs | 36 +++++++------------ 4 files changed, 27 insertions(+), 41 deletions(-) diff --git a/zebra-network/src/address_book_updater.rs b/zebra-network/src/address_book_updater.rs index d839ebbb1af..59d02155c97 100644 --- a/zebra-network/src/address_book_updater.rs +++ b/zebra-network/src/address_book_updater.rs @@ -98,19 +98,17 @@ impl AddressBookUpdater { let address_info = *address_info.borrow_and_update(); address_bar - .set_pos(u64::try_from(address_info.num_addresses).expect("fits in u64")) - .set_len(u64::try_from(address_info.address_limit).expect("fits in u64")); + .set_pos(u64::try_from(address_info.num_addresses).expect("fits in u64")); + // .set_len(u64::try_from(address_info.address_limit).expect("fits in u64")); let never_attempted = address_info.never_attempted_alternate + address_info.never_attempted_gossiped; - never_bar - .set_pos(u64::try_from(never_attempted).expect("fits in u64")) - .set_len(u64::try_from(address_info.address_limit).expect("fits in u64")); + never_bar.set_pos(u64::try_from(never_attempted).expect("fits in u64")); + // .set_len(u64::try_from(address_info.address_limit).expect("fits in u64")); - failed_bar - .set_pos(u64::try_from(address_info.failed).expect("fits in u64")) - .set_len(u64::try_from(address_info.address_limit).expect("fits in u64")); + failed_bar.set_pos(u64::try_from(address_info.failed).expect("fits in u64")); + // .set_len(u64::try_from(address_info.address_limit).expect("fits in u64")); } } diff --git a/zebra-network/src/peer_set/limit.rs b/zebra-network/src/peer_set/limit.rs index 5b5f4a61273..e23c4c4c06a 100644 --- a/zebra-network/src/peer_set/limit.rs +++ b/zebra-network/src/peer_set/limit.rs @@ -115,8 +115,8 @@ impl ActiveConnectionCounter { #[cfg(feature = "progress-bar")] self.connection_bar - .set_pos(u64::try_from(self.count).expect("fits in u64")) - .set_len(u64::try_from(self.limit).expect("fits in u64")); + .set_pos(u64::try_from(self.count).expect("fits in u64")); + // .set_len(u64::try_from(self.limit).expect("fits in u64")); self.count } diff --git a/zebra-state/src/service/non_finalized_state.rs b/zebra-state/src/service/non_finalized_state.rs index ffc6c41b897..bdcb1c10eb2 100644 --- a/zebra-state/src/service/non_finalized_state.rs +++ b/zebra-state/src/service/non_finalized_state.rs @@ -662,9 +662,8 @@ impl NonFinalizedState { .best_chain() .map(|chain| chain.non_finalized_root_height().0 - 1); - chain_count_bar - .set_pos(u64::try_from(self.chain_count()).expect("fits in u64")) - .set_len(u64::try_from(MAX_NON_FINALIZED_CHAIN_FORKS).expect("fits in u64")); + chain_count_bar.set_pos(u64::try_from(self.chain_count()).expect("fits in u64")); + // .set_len(u64::try_from(MAX_NON_FINALIZED_CHAIN_FORKS).expect("fits in u64")); if let Some(finalized_tip_height) = finalized_tip_height { chain_count_bar.desc(format!("Finalized Root {finalized_tip_height}")); @@ -701,10 +700,11 @@ impl NonFinalizedState { // - the chain this bar was previously assigned to might have changed position. chain_length_bar .label(format!("Fork {fork_height}")) - .set_pos(u64::try_from(chain.len()).expect("fits in u64")) - .set_len(u64::from( - zebra_chain::transparent::MIN_TRANSPARENT_COINBASE_MATURITY, - )); + .set_pos(u64::try_from(chain.len()).expect("fits in u64")); + // TODO: should this be MAX_BLOCK_REORG_HEIGHT? + // .set_len(u64::from( + // zebra_chain::transparent::MIN_TRANSPARENT_COINBASE_MATURITY, + // )); // display work as bits let mut desc = format!( diff --git a/zebrad/src/components/mempool.rs b/zebrad/src/components/mempool.rs index 7415dffe03c..ceb73ad8eec 100644 --- a/zebrad/src/components/mempool.rs +++ b/zebrad/src/components/mempool.rs @@ -411,41 +411,29 @@ impl Mempool { || self.transaction_cost_bar.is_none() || self.rejected_count_bar.is_none()) { - let max_transaction_count = self.config.tx_cost_limit + let _max_transaction_count = self.config.tx_cost_limit / zebra_chain::transaction::MEMPOOL_TRANSACTION_COST_THRESHOLD; - self.queued_count_bar = Some( - howudoin::new() - .label("Mempool Queue") - .set_pos(0u64) - .set_len( - u64::try_from(downloads::MAX_INBOUND_CONCURRENCY).expect("fits in u64"), - ), - ); + self.queued_count_bar = Some(*howudoin::new().label("Mempool Queue").set_pos(0u64)); + // .set_len( + // u64::try_from(downloads::MAX_INBOUND_CONCURRENCY).expect("fits in u64"), + // ), - self.transaction_count_bar = Some( - howudoin::new() - .label("Mempool Txs") - .set_pos(0u64) - .set_len(max_transaction_count), - ); + self.transaction_count_bar = Some(*howudoin::new().label("Mempool Txs").set_pos(0u64)); + // .set_len(max_transaction_count), self.transaction_cost_bar = Some( howudoin::new() .label("Mempool Cost") .set_pos(0u64) - .set_len(self.config.tx_cost_limit) + // .set_len(self.config.tx_cost_limit) .fmt_as_bytes(true), ); - self.rejected_count_bar = Some( - howudoin::new() - .label("Mempool Rejects") - .set_pos(0u64) - .set_len( - u64::try_from(storage::MAX_EVICTION_MEMORY_ENTRIES).expect("fits in u64"), - ), - ); + self.rejected_count_bar = Some(*howudoin::new().label("Mempool Rejects").set_pos(0u64)); + // .set_len( + // u64::try_from(storage::MAX_EVICTION_MEMORY_ENTRIES).expect("fits in u64"), + // ), } // Update if the mempool has ever been active From 3e24b1d21c23f8b99e087d9ae8d1eb3fae09086e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 14 Jun 2023 19:07:50 +0000 Subject: [PATCH 093/265] build(deps): bump docker/metadata-action from 4.5.0 to 4.6.0 (#6941) Bumps [docker/metadata-action](https://github.com/docker/metadata-action) from 4.5.0 to 4.6.0. - [Release notes](https://github.com/docker/metadata-action/releases) - [Commits](https://github.com/docker/metadata-action/compare/v4.5.0...v4.6.0) --- updated-dependencies: - dependency-name: docker/metadata-action dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/build-docker-image.yml | 2 +- .github/workflows/zcash-lightwalletd.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build-docker-image.yml b/.github/workflows/build-docker-image.yml index 8eeed4214aa..fda6d1d68bf 100644 --- a/.github/workflows/build-docker-image.yml +++ b/.github/workflows/build-docker-image.yml @@ -86,7 +86,7 @@ jobs: # Automatic tag management and OCI Image Format Specification for labels - name: Docker meta id: meta - uses: docker/metadata-action@v4.5.0 + uses: docker/metadata-action@v4.6.0 with: # list of Docker images to use as base name for tags images: | diff --git a/.github/workflows/zcash-lightwalletd.yml b/.github/workflows/zcash-lightwalletd.yml index 50adc90dc32..08979957f98 100644 --- a/.github/workflows/zcash-lightwalletd.yml +++ b/.github/workflows/zcash-lightwalletd.yml @@ -75,7 +75,7 @@ jobs: # Automatic tag management and OCI Image Format Specification for labels - name: Docker meta id: meta - uses: docker/metadata-action@v4.5.0 + uses: docker/metadata-action@v4.6.0 with: # list of Docker images to use as base name for tags images: | From 4d1ff3b363c0f987c36089c4fd4984d1dac09aa0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 14 Jun 2023 19:08:33 +0000 Subject: [PATCH 094/265] build(deps): bump docker/build-push-action from 4.1.0 to 4.1.1 (#6942) Bumps [docker/build-push-action](https://github.com/docker/build-push-action) from 4.1.0 to 4.1.1. - [Release notes](https://github.com/docker/build-push-action/releases) - [Commits](https://github.com/docker/build-push-action/compare/v4.1.0...v4.1.1) --- updated-dependencies: - dependency-name: docker/build-push-action dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/build-docker-image.yml | 2 +- .github/workflows/zcash-lightwalletd.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build-docker-image.yml b/.github/workflows/build-docker-image.yml index fda6d1d68bf..6ae6f28166d 100644 --- a/.github/workflows/build-docker-image.yml +++ b/.github/workflows/build-docker-image.yml @@ -146,7 +146,7 @@ jobs: # Build and push image to Google Artifact Registry, and possibly DockerHub - name: Build & push id: docker_build - uses: docker/build-push-action@v4.1.0 + uses: docker/build-push-action@v4.1.1 with: target: ${{ inputs.dockerfile_target }} context: . diff --git a/.github/workflows/zcash-lightwalletd.yml b/.github/workflows/zcash-lightwalletd.yml index 08979957f98..28b444589f6 100644 --- a/.github/workflows/zcash-lightwalletd.yml +++ b/.github/workflows/zcash-lightwalletd.yml @@ -130,7 +130,7 @@ jobs: # Build and push image to Google Artifact Registry - name: Build & push id: docker_build - uses: docker/build-push-action@v4.1.0 + uses: docker/build-push-action@v4.1.1 with: target: build context: . From c4c29eb4665aa2213c61a73a7b8d7e438fc1c1dc Mon Sep 17 00:00:00 2001 From: Marek Date: Wed, 14 Jun 2023 21:09:10 +0200 Subject: [PATCH 095/265] Fix doc links pointing to private items (#6944) Our documentation for some public items contains links to private items. These links resolve fine because we use the `--document-private-flags`, but still generate warnings during the compilation of the docs. This commit (hopefully) correctly suppresses the warnings, and updates CI. --- .cargo/config.toml | 15 +++++++++------ .github/workflows/lint.yml | 4 ++-- 2 files changed, 11 insertions(+), 8 deletions(-) diff --git a/.cargo/config.toml b/.cargo/config.toml index 5b0f95da173..6be194a0c74 100644 --- a/.cargo/config.toml +++ b/.cargo/config.toml @@ -55,12 +55,6 @@ rustflags = [ # Documentation "-Wmissing_docs", - # These rustdoc -A and -W settings must be the same as the RUSTDOCFLAGS in: - # https://github.com/ZcashFoundation/zebra/blob/main/.github/workflows/lint.yml#L152 - - # Links in public docs can point to private items. - "-Arustdoc::private_intra_doc_links", - # TODOs: # `cargo fix` might help do these fixes, # or add a config.toml to sub-directories which should allow these lints, @@ -82,3 +76,12 @@ rustflags = [ # fix hidden lifetime parameters #"-Wrust_2018_idioms", ] + +[build] +rustdocflags = [ + # The -A and -W settings must be the same as the `RUSTDOCFLAGS` in: + # https://github.com/ZcashFoundation/zebra/blob/main/.github/workflows/lint.yml#L151 + + # Links in public docs can point to private items. + "-Arustdoc::private_intra_doc_links", +] diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index ac3be34d7c3..a8ec87324a5 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -145,8 +145,8 @@ jobs: # cargo doc doesn't support '-- -D warnings', so we have to add it here # https://github.com/rust-lang/cargo/issues/8424#issuecomment-774662296 # - # These -A and -W settings must be the same as the rustdoc settings in: - # https://github.com/ZcashFoundation/zebra/blob/main/.cargo/config.toml#L53 + # The -A and -W settings must be the same as the `rustdocflags` in: + # https://github.com/ZcashFoundation/zebra/blob/main/.cargo/config.toml#L87 env: RUSTDOCFLAGS: -D warnings -A rustdoc::private_intra_doc_links From e870c389a9d96e47c5f09d72cb204c573a995a48 Mon Sep 17 00:00:00 2001 From: Deirdre Connolly Date: Wed, 14 Jun 2023 15:09:41 -0400 Subject: [PATCH 096/265] change(docs): Add `cargo clean` step to crate publishing steps (#6959) This bit me mid-publish so --- .github/PULL_REQUEST_TEMPLATE/release-checklist.md | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/PULL_REQUEST_TEMPLATE/release-checklist.md b/.github/PULL_REQUEST_TEMPLATE/release-checklist.md index 2017e9d1113..708ed25e50c 100644 --- a/.github/PULL_REQUEST_TEMPLATE/release-checklist.md +++ b/.github/PULL_REQUEST_TEMPLATE/release-checklist.md @@ -149,6 +149,7 @@ The end of support height is calculated from the current blockchain height: ## Publish Crates - [ ] Run `cargo login` +- [ ] Run `cargo clean` in the zebra repo - [ ] Publish the crates to crates.io: `cargo release publish --verbose --workspace --execute` - [ ] Check that Zebra can be installed from `crates.io`: `cargo install --force --version 1.0.0 zebrad && ~/.cargo/bin/zebrad` From 9bf1c980c28db91e204a4300288ebf3d9eadcbc7 Mon Sep 17 00:00:00 2001 From: Deirdre Connolly Date: Wed, 14 Jun 2023 16:00:53 -0400 Subject: [PATCH 097/265] change(docs): Explicitly invoke --execute when bumping crate versions (#6949) * Explicitly invoke --execute when bumping crate versions * Update .github/PULL_REQUEST_TEMPLATE/release-checklist.md Co-authored-by: teor * Update .github/PULL_REQUEST_TEMPLATE/release-checklist.md Co-authored-by: teor --------- Co-authored-by: teor --- .github/PULL_REQUEST_TEMPLATE/release-checklist.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/PULL_REQUEST_TEMPLATE/release-checklist.md b/.github/PULL_REQUEST_TEMPLATE/release-checklist.md index 708ed25e50c..539780530fa 100644 --- a/.github/PULL_REQUEST_TEMPLATE/release-checklist.md +++ b/.github/PULL_REQUEST_TEMPLATE/release-checklist.md @@ -58,10 +58,10 @@ Zebra's Rust API doesn't have any support or stability guarantees, so we keep al
- [ ] Update crate versions and do a release dry-run: - - [ ] `cargo release version --verbose --workspace --exclude zebrad beta` - - [ ] `cargo release version --verbose --package zebrad [ major | minor | patch ]` - - [ ] `cargo release publish --verbose --workspace --dry-run` -- [ ] Commit the version changes to your release PR branch using `git`: `cargo release commit --verbose --workspace` + - [ ] `cargo release version --verbose --execute --workspace --exclude zebrad beta` + - [ ] `cargo release version --verbose --execute --package zebrad [ major | minor | patch ]` + - [ ] `cargo release publish --verbose --dry-run --workspace` +- [ ] Commit the version changes to your release PR branch using `git`: `cargo release commit --verbose --execute --workspace` ## README From 912693bf0a4063e9835b18dd61b02f0840db6345 Mon Sep 17 00:00:00 2001 From: teor Date: Thu, 15 Jun 2023 08:15:51 +1000 Subject: [PATCH 098/265] Fix Dockerfile cache use (#6933) --- docker/Dockerfile | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/docker/Dockerfile b/docker/Dockerfile index 36c6751b417..4f21f9aee5d 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -107,10 +107,11 @@ COPY --from=us-docker.pkg.dev/zealous-zebra/zebra/lightwalletd /opt/lightwalletd # # This is the caching Docker layer for Rust! # -# TODO: is it faster to use --tests here? -RUN cargo chef cook --release --features "${TEST_FEATURES} ${FEATURES}" --workspace --recipe-path recipe.json +# TODO: add --locked when cargo-chef supports it +RUN cargo chef cook --tests --release --features "${TEST_FEATURES} ${FEATURES}" --workspace --recipe-path recipe.json COPY . . +# Test Zebra RUN cargo test --locked --release --features "${TEST_FEATURES} ${FEATURES}" --workspace --no-run RUN cp /opt/zebrad/target/release/zebrad /usr/local/bin RUN cp /opt/zebrad/target/release/zebra-checkpoints /usr/local/bin @@ -127,10 +128,12 @@ ENTRYPOINT [ "/entrypoint.sh" ] # `test` stage. This step is a dependency for the `runtime` stage, which uses the resulting # zebrad binary from this step. FROM deps AS release -RUN cargo chef cook --release --features "${FEATURES}" --recipe-path recipe.json + +# TODO: add --locked when cargo-chef supports it +RUN cargo chef cook --release --features "${FEATURES}" --package zebrad --bin zebrad --recipe-path recipe.json COPY . . -# Build zebra +# Build zebrad RUN cargo build --locked --release --features "${FEATURES}" --package zebrad --bin zebrad COPY ./docker/runtime-entrypoint.sh / From 32ea511a73b8e25d26b59c653c25eec85cb43b1c Mon Sep 17 00:00:00 2001 From: teor Date: Thu, 15 Jun 2023 10:43:41 +1000 Subject: [PATCH 099/265] fix(net): Reduce inbound service overloads and add a timeout (#6950) * Increase concurrency limit, reduce peer broadcast * Fix a div_ceil() TODO * Document security requirements of inbound peer overload handling * Reduce drop probability and fix its formatting * Put a 5 second timeout on inbound service requests * Update drop probability tests * Add error types, metrics, and logging for InboundTimeout errors --- Cargo.lock | 1 + zebra-network/Cargo.toml | 1 + zebra-network/src/constants.rs | 2 +- zebra-network/src/peer/connection.rs | 56 +++++++++++++++---- .../src/peer/connection/tests/vectors.rs | 34 +++++------ zebra-network/src/peer/error.rs | 6 ++ zebra-network/src/peer_set/set.rs | 7 ++- zebrad/src/commands/start.rs | 10 +++- zebrad/src/components/inbound.rs | 13 +++-- zebrad/src/components/inbound/downloads.rs | 2 +- 10 files changed, 94 insertions(+), 38 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 02bbb1f5a47..1c29b1fd6c8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5759,6 +5759,7 @@ dependencies = [ "itertools", "lazy_static", "metrics 0.21.0", + "num-integer", "ordered-map", "pin-project", "proptest", diff --git a/zebra-network/Cargo.toml b/zebra-network/Cargo.toml index 339086a9c2f..3601607136f 100644 --- a/zebra-network/Cargo.toml +++ b/zebra-network/Cargo.toml @@ -46,6 +46,7 @@ humantime-serde = "1.1.1" indexmap = { version = "1.9.3", features = ["serde"] } itertools = "0.10.5" lazy_static = "1.4.0" +num-integer = "0.1.45" ordered-map = "0.4.2" pin-project = "1.1.0" rand = { version = "0.8.5", package = "rand" } diff --git a/zebra-network/src/constants.rs b/zebra-network/src/constants.rs index f327cce4b4c..e137fb7b212 100644 --- a/zebra-network/src/constants.rs +++ b/zebra-network/src/constants.rs @@ -349,7 +349,7 @@ pub const MIN_OVERLOAD_DROP_PROBABILITY: f32 = 0.05; /// The maximum probability of dropping a peer connection when it receives an /// [`Overloaded`](crate::PeerError::Overloaded) error. -pub const MAX_OVERLOAD_DROP_PROBABILITY: f32 = 0.95; +pub const MAX_OVERLOAD_DROP_PROBABILITY: f32 = 0.5; /// The minimum interval between logging peer set status updates. pub const MIN_PEER_SET_LOG_INTERVAL: Duration = Duration::from_secs(60); diff --git a/zebra-network/src/peer/connection.rs b/zebra-network/src/peer/connection.rs index 318357dbd6b..71838366c35 100644 --- a/zebra-network/src/peer/connection.rs +++ b/zebra-network/src/peer/connection.rs @@ -16,7 +16,7 @@ use futures::{ }; use rand::{thread_rng, Rng}; use tokio::time::{sleep, Sleep}; -use tower::{load_shed::error::Overloaded, Service, ServiceExt}; +use tower::{Service, ServiceExt}; use tracing_futures::Instrument; use zebra_chain::{ @@ -1283,6 +1283,12 @@ where // before sending the next inbound request. tokio::task::yield_now().await; + // # Security + // + // Holding buffer slots for a long time can cause hangs: + // + // + // The inbound service must be called immediately after a buffer slot is reserved. if self.svc.ready().await.is_err() { self.fail_with(PeerError::ServiceShutdown).await; return; @@ -1290,12 +1296,28 @@ where let rsp = match self.svc.call(req.clone()).await { Err(e) => { - if e.is::() { + if e.is::() { + // # Security + // + // The peer request queue must have a limited length. + // The buffer and load shed layers are added in `start::start()`. tracing::debug!("inbound service is overloaded, may close connection"); let now = Instant::now(); - self.handle_inbound_overload(req, now).await; + self.handle_inbound_overload(req, now, PeerError::Overloaded) + .await; + } else if e.is::() { + // # Security + // + // Peer requests must have a timeout. + // The timeout layer is added in `start::start()`. + tracing::info!(%req, "inbound service request timed out, may close connection"); + + let now = Instant::now(); + + self.handle_inbound_overload(req, now, PeerError::InboundTimeout) + .await; } else { // We could send a reject to the remote peer, but that might cause // them to disconnect, and we might be using them to sync blocks. @@ -1431,7 +1453,8 @@ where tokio::task::yield_now().await; } - /// Handle inbound service overload error responses by randomly terminating some connections. + /// Handle inbound service overload and timeout error responses by randomly terminating some + /// connections. /// /// # Security /// @@ -1450,15 +1473,19 @@ where /// The inbound connection rate-limit also makes it hard for multiple peers to perform this /// attack, because each inbound connection can only send one inbound request before its /// probability of being disconnected increases. - async fn handle_inbound_overload(&mut self, req: Request, now: Instant) { + async fn handle_inbound_overload(&mut self, req: Request, now: Instant, error: PeerError) { let prev = self.last_overload_time.replace(now); let drop_connection_probability = overload_drop_connection_probability(now, prev); if thread_rng().gen::() < drop_connection_probability { - metrics::counter!("pool.closed.loadshed", 1); + if matches!(error, PeerError::Overloaded) { + metrics::counter!("pool.closed.loadshed", 1); + } else { + metrics::counter!("pool.closed.inbound.timeout", 1); + } tracing::info!( - drop_connection_probability, + drop_connection_probability = format!("{drop_connection_probability:.3}"), remote_user_agent = ?self.connection_info.remote.user_agent, negotiated_version = ?self.connection_info.negotiated_version, peer = ?self.metrics_label, @@ -1467,14 +1494,19 @@ where remote_height = ?self.connection_info.remote.start_height, cached_addrs = ?self.cached_addrs.len(), connection_state = ?self.state, - "inbound service is overloaded, closing connection", + "inbound service {error} error, closing connection", ); - self.update_state_metrics(format!("In::Req::{}/Rsp::Overload::Error", req.command())); - self.fail_with(PeerError::Overloaded).await; + self.update_state_metrics(format!("In::Req::{}/Rsp::{error}::Error", req.command())); + self.fail_with(error).await; } else { - self.update_state_metrics(format!("In::Req::{}/Rsp::Overload::Ignored", req.command())); - metrics::counter!("pool.ignored.loadshed", 1); + self.update_state_metrics(format!("In::Req::{}/Rsp::{error}::Ignored", req.command())); + + if matches!(error, PeerError::Overloaded) { + metrics::counter!("pool.ignored.loadshed", 1); + } else { + metrics::counter!("pool.ignored.inbound.timeout", 1); + } } } } diff --git a/zebra-network/src/peer/connection/tests/vectors.rs b/zebra-network/src/peer/connection/tests/vectors.rs index cca8c8b2064..4ab4db7af0a 100644 --- a/zebra-network/src/peer/connection/tests/vectors.rs +++ b/zebra-network/src/peer/connection/tests/vectors.rs @@ -687,11 +687,11 @@ fn overload_probability_reduces_over_time() { let drop_probability = overload_drop_connection_probability(now, Some(prev)); assert!( drop_probability <= MAX_OVERLOAD_DROP_PROBABILITY, - "if the overloads are very close together, drops can optionally decrease", + "if the overloads are very close together, drops can optionally decrease: {drop_probability} <= {MAX_OVERLOAD_DROP_PROBABILITY}", ); assert!( MAX_OVERLOAD_DROP_PROBABILITY - drop_probability < 0.001, - "if the overloads are very close together, drops can only decrease slightly", + "if the overloads are very close together, drops can only decrease slightly: {drop_probability}", ); let last_probability = drop_probability; @@ -700,11 +700,11 @@ fn overload_probability_reduces_over_time() { let drop_probability = overload_drop_connection_probability(now, Some(prev)); assert!( drop_probability < last_probability, - "if the overloads decrease, drops should decrease", + "if the overloads decrease, drops should decrease: {drop_probability} < {last_probability}", ); assert!( MAX_OVERLOAD_DROP_PROBABILITY - drop_probability < 0.001, - "if the overloads are very close together, drops can only decrease slightly", + "if the overloads are very close together, drops can only decrease slightly: {drop_probability}", ); let last_probability = drop_probability; @@ -713,11 +713,11 @@ fn overload_probability_reduces_over_time() { let drop_probability = overload_drop_connection_probability(now, Some(prev)); assert!( drop_probability < last_probability, - "if the overloads decrease, drops should decrease", + "if the overloads decrease, drops should decrease: {drop_probability} < {last_probability}", ); assert!( MAX_OVERLOAD_DROP_PROBABILITY - drop_probability < 0.001, - "if the overloads are very close together, drops can only decrease slightly", + "if the overloads are very close together, drops can only decrease slightly: {drop_probability}", ); let last_probability = drop_probability; @@ -726,11 +726,11 @@ fn overload_probability_reduces_over_time() { let drop_probability = overload_drop_connection_probability(now, Some(prev)); assert!( drop_probability < last_probability, - "if the overloads decrease, drops should decrease", + "if the overloads decrease, drops should decrease: {drop_probability} < {last_probability}", ); assert!( MAX_OVERLOAD_DROP_PROBABILITY - drop_probability < 0.01, - "if the overloads are very close together, drops can only decrease slightly", + "if the overloads are very close together, drops can only decrease slightly: {drop_probability}", ); let last_probability = drop_probability; @@ -739,11 +739,11 @@ fn overload_probability_reduces_over_time() { let drop_probability = overload_drop_connection_probability(now, Some(prev)); assert!( drop_probability < last_probability, - "if the overloads decrease, drops should decrease", + "if the overloads decrease, drops should decrease: {drop_probability} < {last_probability}", ); assert!( - MAX_OVERLOAD_DROP_PROBABILITY - drop_probability > 0.5, - "if the overloads are distant, drops should decrease a lot", + MAX_OVERLOAD_DROP_PROBABILITY - drop_probability > 0.4, + "if the overloads are distant, drops should decrease a lot: {drop_probability}", ); let last_probability = drop_probability; @@ -752,11 +752,11 @@ fn overload_probability_reduces_over_time() { let drop_probability = overload_drop_connection_probability(now, Some(prev)); assert!( drop_probability < last_probability, - "if the overloads decrease, drops should decrease", + "if the overloads decrease, drops should decrease: {drop_probability} < {last_probability}", ); - assert!( - MAX_OVERLOAD_DROP_PROBABILITY - drop_probability > 0.7, - "if the overloads are distant, drops should decrease a lot", + assert_eq!( + drop_probability, MIN_OVERLOAD_DROP_PROBABILITY, + "if overloads are far apart, drops should have minimum drop probability: {drop_probability}", ); let _last_probability = drop_probability; @@ -765,14 +765,14 @@ fn overload_probability_reduces_over_time() { let drop_probability = overload_drop_connection_probability(now, Some(prev)); assert_eq!( drop_probability, MIN_OVERLOAD_DROP_PROBABILITY, - "if overloads are far apart, drops should have minimum drop probability", + "if overloads are far apart, drops should have minimum drop probability: {drop_probability}", ); // Base case: no previous overload let drop_probability = overload_drop_connection_probability(now, None); assert_eq!( drop_probability, MIN_OVERLOAD_DROP_PROBABILITY, - "if there is no previous overload time, overloads should have minimum drop probability", + "if there is no previous overload time, overloads should have minimum drop probability: {drop_probability}", ); } diff --git a/zebra-network/src/peer/error.rs b/zebra-network/src/peer/error.rs index 4d842ba5cc9..6263fb56119 100644 --- a/zebra-network/src/peer/error.rs +++ b/zebra-network/src/peer/error.rs @@ -82,6 +82,11 @@ pub enum PeerError { #[error("Internal services over capacity")] Overloaded, + /// This peer request's caused an internal service timeout, so the connection was dropped + /// to shed load or prevent attacks. + #[error("Internal services timed out")] + InboundTimeout, + /// This node's internal services are no longer able to service requests. #[error("Internal services have failed or shutdown")] ServiceShutdown, @@ -142,6 +147,7 @@ impl PeerError { PeerError::Serialization(inner) => format!("Serialization({inner})").into(), PeerError::DuplicateHandshake => "DuplicateHandshake".into(), PeerError::Overloaded => "Overloaded".into(), + PeerError::InboundTimeout => "InboundTimeout".into(), PeerError::ServiceShutdown => "ServiceShutdown".into(), PeerError::NotFoundResponse(_) => "NotFoundResponse".into(), PeerError::NotFoundRegistry(_) => "NotFoundRegistry".into(), diff --git a/zebra-network/src/peer_set/set.rs b/zebra-network/src/peer_set/set.rs index b3ff2a92df4..0353d377f5e 100644 --- a/zebra-network/src/peer_set/set.rs +++ b/zebra-network/src/peer_set/set.rs @@ -111,6 +111,7 @@ use futures::{ stream::FuturesUnordered, }; use itertools::Itertools; +use num_integer::div_ceil; use tokio::{ sync::{broadcast, oneshot::error::TryRecvError, watch}, task::JoinHandle, @@ -808,9 +809,11 @@ where /// Given a number of ready peers calculate to how many of them Zebra will /// actually send the request to. Return this number. pub(crate) fn number_of_peers_to_broadcast(&self) -> usize { - // We are currently sending broadcast messages to half of the total peers. + // We are currently sending broadcast messages to a third of the total peers. + const PEER_FRACTION_TO_BROADCAST: usize = 3; + // Round up, so that if we have one ready peer, it gets the request. - (self.ready_services.len() + 1) / 2 + div_ceil(self.ready_services.len(), PEER_FRACTION_TO_BROADCAST) } /// Returns the list of addresses in the peer set. diff --git a/zebrad/src/commands/start.rs b/zebrad/src/commands/start.rs index edbc29d291d..67b9b3e78e2 100644 --- a/zebrad/src/commands/start.rs +++ b/zebrad/src/commands/start.rs @@ -84,7 +84,7 @@ use zebra_rpc::server::RpcServer; use crate::{ application::{app_version, user_agent}, components::{ - inbound::{self, InboundSetupData}, + inbound::{self, InboundSetupData, MAX_INBOUND_RESPONSE_TIME}, mempool::{self, Mempool}, sync::{self, show_block_chain_progress, VERIFICATION_PIPELINE_SCALING_MULTIPLIER}, tokio::{RuntimeRun, TokioComponent}, @@ -132,10 +132,18 @@ impl StartCmd { // The service that our node uses to respond to requests by peers. The // load_shed middleware ensures that we reduce the size of the peer set // in response to excess load. + // + // # Security + // + // This layer stack is security-sensitive, modifying it can cause hangs, + // or enable denial of service attacks. + // + // See `zebra_network::Connection::drive_peer_request()` for details. let (setup_tx, setup_rx) = oneshot::channel(); let inbound = ServiceBuilder::new() .load_shed() .buffer(inbound::downloads::MAX_INBOUND_CONCURRENCY) + .timeout(MAX_INBOUND_RESPONSE_TIME) .service(Inbound::new( config.sync.full_verify_concurrency_limit, setup_rx, diff --git a/zebrad/src/components/inbound.rs b/zebrad/src/components/inbound.rs index d7c9ca08485..e93aa8517f0 100644 --- a/zebrad/src/components/inbound.rs +++ b/zebrad/src/components/inbound.rs @@ -11,6 +11,7 @@ use std::{ pin::Pin, sync::Arc, task::{Context, Poll}, + time::Duration, }; use chrono::Utc; @@ -18,6 +19,7 @@ use futures::{ future::{FutureExt, TryFutureExt}, stream::Stream, }; +use num_integer::div_ceil; use tokio::sync::oneshot::{self, error::TryRecvError}; use tower::{buffer::Buffer, timeout::Timeout, util::BoxService, Service, ServiceExt}; @@ -50,6 +52,12 @@ mod tests; use downloads::Downloads as BlockDownloads; +/// The maximum amount of time an inbound service response can take. +/// +/// If the response takes longer than this time, it will be cancelled, +/// and the peer might be disconnected. +pub const MAX_INBOUND_RESPONSE_TIME: Duration = Duration::from_secs(5); + /// The number of bytes the [`Inbound`] service will queue in response to a single block or /// transaction request, before ignoring any additional block or transaction IDs in that request. /// @@ -374,10 +382,7 @@ impl Service for Inbound { let mut peers = peers.sanitized(now); // Truncate the list - // - // TODO: replace with div_ceil once it stabilises - // https://github.com/rust-lang/rust/issues/88581 - let address_limit = (peers.len() + ADDR_RESPONSE_LIMIT_DENOMINATOR - 1) / ADDR_RESPONSE_LIMIT_DENOMINATOR; + let address_limit = div_ceil(peers.len(), ADDR_RESPONSE_LIMIT_DENOMINATOR); let address_limit = MAX_ADDRS_IN_MESSAGE.min(address_limit); peers.truncate(address_limit); diff --git a/zebrad/src/components/inbound/downloads.rs b/zebrad/src/components/inbound/downloads.rs index aa8b2cf6c25..11200e66435 100644 --- a/zebrad/src/components/inbound/downloads.rs +++ b/zebrad/src/components/inbound/downloads.rs @@ -49,7 +49,7 @@ type BoxError = Box; /// Since Zebra keeps an `inv` index, inbound downloads for malicious blocks /// will be directed to the malicious node that originally gossiped the hash. /// Therefore, this attack can be carried out by a single malicious node. -pub const MAX_INBOUND_CONCURRENCY: usize = 20; +pub const MAX_INBOUND_CONCURRENCY: usize = 30; /// The action taken in response to a peer's gossiped block hash. pub enum DownloadAction { From 2997add3319f7fa96addcfbf84fc95ddba78f0b8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 15 Jun 2023 03:08:37 +0000 Subject: [PATCH 100/265] build(deps): bump clap from 4.3.3 to 4.3.4 (#6957) Bumps [clap](https://github.com/clap-rs/clap) from 4.3.3 to 4.3.4. - [Release notes](https://github.com/clap-rs/clap/releases) - [Changelog](https://github.com/clap-rs/clap/blob/master/CHANGELOG.md) - [Commits](https://github.com/clap-rs/clap/compare/v4.3.3...v4.3.4) --- updated-dependencies: - dependency-name: clap dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 14 +++++++------- zebrad/Cargo.toml | 2 +- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1c29b1fd6c8..0b008412e94 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -12,7 +12,7 @@ dependencies = [ "arc-swap", "backtrace", "canonical-path", - "clap 4.3.3", + "clap 4.3.4", "color-eyre", "fs-err", "once_cell", @@ -773,9 +773,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.3.3" +version = "4.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca8f255e4b8027970e78db75e78831229c9815fdbfa67eb1a1b777a62e24b4a0" +checksum = "80672091db20273a15cf9fdd4e47ed43b5091ec9841bf4c6145c9dfbbcae09ed" dependencies = [ "clap_builder", "clap_derive", @@ -784,9 +784,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.3.3" +version = "4.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "acd4f3c17c83b0ba34ffbc4f8bbd74f079413f747f84a6f89292f138057e36ab" +checksum = "c1458a1df40e1e2afebb7ab60ce55c1fa8f431146205aa5f4887e0b111c27636" dependencies = [ "anstream", "anstyle", @@ -956,7 +956,7 @@ dependencies = [ "anes", "cast", "ciborium", - "clap 4.3.3", + "clap 4.3.4", "criterion-plot", "is-terminal", "itertools", @@ -5936,7 +5936,7 @@ dependencies = [ "abscissa_core", "atty", "chrono", - "clap 4.3.3", + "clap 4.3.4", "color-eyre", "console-subscriber", "dirs", diff --git a/zebrad/Cargo.toml b/zebrad/Cargo.toml index e5a4f91f080..0686f26a3be 100644 --- a/zebrad/Cargo.toml +++ b/zebrad/Cargo.toml @@ -127,7 +127,7 @@ zebra-state = { path = "../zebra-state", version = "1.0.0-beta.26" } zebra-utils = { path = "../zebra-utils", version = "1.0.0-beta.26", optional = true } abscissa_core = "0.7.0" -clap = { version = "4.3.3", features = ["cargo"] } +clap = { version = "4.3.4", features = ["cargo"] } chrono = { version = "0.4.26", default-features = false, features = ["clock", "std"] } humantime-serde = "1.1.1" indexmap = "1.9.3" From a50955f7222187a388cf29a8eceb5ee80b10879c Mon Sep 17 00:00:00 2001 From: teor Date: Thu, 15 Jun 2023 13:08:56 +1000 Subject: [PATCH 101/265] Move macOS to tier 3 support: no builds (#6965) --- book/src/user/supported-platforms.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/book/src/user/supported-platforms.md b/book/src/user/supported-platforms.md index 07d7d2e970a..152be56537e 100644 --- a/book/src/user/supported-platforms.md +++ b/book/src/user/supported-platforms.md @@ -32,7 +32,6 @@ For the full requirements, see [Tier 2 platform policy](platform-tier-policy.md# | platform | os | notes | rust | artifacts | -------|-------|-------|-------|------- -| `x86_64-apple-darwin` | [GitHub macos-latest](https://github.com/actions/virtual-environments#available-environments) | 64-bit | [latest stable release](https://github.com/rust-lang/rust/releases) | N/A | `x86_64-unknown-linux-gnu` | [GitHub ubuntu-latest](https://github.com/actions/virtual-environments#available-environments) | 64-bit | [latest stable release](https://github.com/rust-lang/rust/releases) | N/A | `x86_64-unknown-linux-gnu` | [GitHub ubuntu-latest](https://github.com/actions/virtual-environments#available-environments) | 64-bit | [latest beta release](https://github.com/rust-lang/rust/blob/beta/src/version) | N/A @@ -47,3 +46,5 @@ For the full requirements, see [Tier 3 platform policy](platform-tier-policy.md# | platform | os | notes | rust | artifacts | -------|-------|-------|-------|------- | `aarch64-unknown-linux-gnu` | [Debian 11](https://www.debian.org/releases/bullseye/) | 64-bit | [latest stable release](https://github.com/rust-lang/rust/releases) | N/A +| `x86_64-apple-darwin` | [GitHub macos-latest](https://github.com/actions/virtual-environments#available-environments) | 64-bit | [latest stable release](https://github.com/rust-lang/rust/releases) | N/A + From e748d9a833a1ec92228d0a1ed2b345ae353fd5e8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 15 Jun 2023 03:09:03 +0000 Subject: [PATCH 102/265] build(deps): bump baptiste0928/cargo-install from 2.0.0 to 2.1.0 (#6903) Bumps [baptiste0928/cargo-install](https://github.com/baptiste0928/cargo-install) from 2.0.0 to 2.1.0. - [Release notes](https://github.com/baptiste0928/cargo-install/releases) - [Changelog](https://github.com/baptiste0928/cargo-install/blob/main/CHANGELOG.md) - [Commits](https://github.com/baptiste0928/cargo-install/compare/v2.0.0...v2.1.0) --- updated-dependencies: - dependency-name: baptiste0928/cargo-install dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/continous-integration-os.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/continous-integration-os.yml b/.github/workflows/continous-integration-os.yml index c7adf92b7ab..8d2bb504df8 100644 --- a/.github/workflows/continous-integration-os.yml +++ b/.github/workflows/continous-integration-os.yml @@ -299,7 +299,7 @@ jobs: curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain=stable --profile=minimal - name: Install cargo-machete - uses: baptiste0928/cargo-install@v2.0.0 + uses: baptiste0928/cargo-install@v2.1.0 with: crate: cargo-machete From 8a7c871480dc76bc94e71d77a25e692603f16486 Mon Sep 17 00:00:00 2001 From: Alfredo Garcia Date: Thu, 15 Jun 2023 02:12:45 -0300 Subject: [PATCH 103/265] rename(state): do additional renaming for clarification purposes (#6967) * do renames by script ``` git ls-tree --full-tree -r --name-only HEAD | \ grep -v 'book' | grep -v 'CHANGELOG.md' | \ xargs sed -i 's/CommitFinalized/CommitCheckpointVerified/g' git ls-tree --full-tree -r --name-only HEAD | \ grep -v 'book' | grep -v 'CHANGELOG.md' | \ xargs sed -i 's/commit_finalized_block/commit_checkpoint_verified/g' git ls-tree --full-tree -r --name-only HEAD | \ grep -v 'book' | grep -v 'CHANGELOG.md' | \ xargs sed -i 's/drain_queue_and_commit_finalized/drain_finalized_queue_and_commit/g' git ls-tree --full-tree -r --name-only HEAD | \ grep -v 'book' | grep -v 'CHANGELOG.md' | \ xargs sed -i 's/queue_and_commit_finalized/queue_and_commit_to_finalized_state/g' git ls-tree --full-tree -r --name-only HEAD | \ grep -v 'book' | grep -v 'CHANGELOG.md' | \ xargs sed -i 's/queued_finalized_blocks/finalized_state_queued_blocks/g' git ls-tree --full-tree -r --name-only HEAD | \ grep -v 'book' | grep -v 'CHANGELOG.md' | \ xargs sed -i 's/max_queued_finalized_height/max_finalized_queue_height/g' git ls-tree --full-tree -r --name-only HEAD | \ grep -v 'book' | grep -v 'CHANGELOG.md' | \ xargs sed -i 's/send_finalized_block_error/send_checkpoint_verified_block_error/g' git ls-tree --full-tree -r --name-only HEAD | \ grep -v 'book' | grep -v 'CHANGELOG.md' | \ xargs sed -i 's/QueuedFinalized/QueuedCheckpointVerified/g' git ls-tree --full-tree -r --name-only HEAD | \ grep -v 'book' | grep -v 'CHANGELOG.md' | \ xargs sed -i 's/queue_and_commit_non_finalized/queue_and_commit_to_non_finalized_state/g' git ls-tree --full-tree -r --name-only HEAD | \ grep -v 'book' | grep -v 'CHANGELOG.md' | \ xargs sed -i 's/queued_non_finalized_blocks/non_finalized_state_queued_blocks/g' git ls-tree --full-tree -r --name-only HEAD | \ grep -v 'book' | grep -v 'CHANGELOG.md' | \ xargs sed -i 's/send_non_finalized_block_error/send_semantically_verified_block_error/g' git ls-tree --full-tree -r --name-only HEAD | \ grep -v 'book' | grep -v 'CHANGELOG.md' | \ xargs sed -i 's/QueuedNonFinalized/QueuedSemanticallyVerified/g' git ls-tree --full-tree -r --name-only HEAD | \ grep -v 'book' | grep -v 'CHANGELOG.md' | \ xargs sed -i 's/last_sent_finalized_block_hash/finalized_block_write_last_sent_hash/g' git ls-tree --full-tree -r --name-only HEAD | \ grep -v 'book' | grep -v 'CHANGELOG.md' | \ xargs sed -i 's/sent_non_finalized_block_hashes/non_finalized_block_write_sent_hashes/g' git ls-tree --full-tree -r --name-only HEAD | \ grep -v 'book' | grep -v 'CHANGELOG.md' | \ xargs sed -i 's/invalid_block_reset_receiver/invalid_block_write_reset_receiver/g' cargo fmt --all ``` * add missing log renames by script ``` git ls-tree --full-tree -r --name-only HEAD | \ grep -v 'book' | grep -v 'CHANGELOG.md' | \ xargs sed -i 's/queued finalized block/finalized state queue block/g' git ls-tree --full-tree -r --name-only HEAD | \ grep -v 'book' | grep -v 'CHANGELOG.md' | \ xargs sed -i 's/queued non-finalized block/non-finalized state queue block/g' cargo fmt --all ``` --- zebra-consensus/src/checkpoint.rs | 14 +- zebra-state/src/service.rs | 167 ++++++++++-------- zebra-state/src/service/finalized_state.rs | 4 +- zebra-state/src/service/queued_blocks.rs | 21 ++- .../service/queued_blocks/tests/vectors.rs | 6 +- zebra-state/src/service/tests.rs | 8 +- zebra-state/src/service/write.rs | 6 +- 7 files changed, 120 insertions(+), 106 deletions(-) diff --git a/zebra-consensus/src/checkpoint.rs b/zebra-consensus/src/checkpoint.rs index b575d79d8b7..2334383b76a 100644 --- a/zebra-consensus/src/checkpoint.rs +++ b/zebra-consensus/src/checkpoint.rs @@ -970,7 +970,7 @@ pub enum VerifyCheckpointError { #[error("checkpoint verifier was dropped")] Dropped, #[error(transparent)] - CommitFinalized(BoxError), + CommitCheckpointVerified(BoxError), #[error(transparent)] Tip(BoxError), #[error(transparent)] @@ -1084,19 +1084,19 @@ where // we don't reject the entire checkpoint. // Instead, we reset the verifier to the successfully committed state tip. let state_service = self.state_service.clone(); - let commit_finalized_block = tokio::spawn(async move { + let commit_checkpoint_verified = tokio::spawn(async move { let hash = req_block .rx .await .map_err(Into::into) - .map_err(VerifyCheckpointError::CommitFinalized) + .map_err(VerifyCheckpointError::CommitCheckpointVerified) .expect("CheckpointVerifier does not leave dangling receivers")?; // We use a `ServiceExt::oneshot`, so that every state service // `poll_ready` has a corresponding `call`. See #1593. match state_service .oneshot(zs::Request::CommitCheckpointVerifiedBlock(req_block.block)) - .map_err(VerifyCheckpointError::CommitFinalized) + .map_err(VerifyCheckpointError::CommitCheckpointVerified) .await? { zs::Response::Committed(committed_hash) => { @@ -1110,10 +1110,10 @@ where let state_service = self.state_service.clone(); let reset_sender = self.reset_sender.clone(); async move { - let result = commit_finalized_block.await; + let result = commit_checkpoint_verified.await; // Avoid a panic on shutdown // - // When `zebrad` is terminated using Ctrl-C, the `commit_finalized_block` task + // When `zebrad` is terminated using Ctrl-C, the `commit_checkpoint_verified` task // can return a `JoinError::Cancelled`. We expect task cancellation on shutdown, // so we don't need to panic here. The persistent state is correct even when the // task is cancelled, because block data is committed inside transactions, in @@ -1121,7 +1121,7 @@ where let result = if zebra_chain::shutdown::is_shutting_down() { Err(VerifyCheckpointError::ShuttingDown) } else { - result.expect("commit_finalized_block should not panic") + result.expect("commit_checkpoint_verified should not panic") }; if result.is_err() { // If there was an error committing the block, then this verifier diff --git a/zebra-state/src/service.rs b/zebra-state/src/service.rs index 2f229da9908..75d1f0ac1ca 100644 --- a/zebra-state/src/service.rs +++ b/zebra-state/src/service.rs @@ -86,7 +86,7 @@ mod tests; pub use finalized_state::{OutputIndex, OutputLocation, TransactionLocation}; -use self::queued_blocks::{QueuedFinalized, QueuedNonFinalized, SentHashes}; +use self::queued_blocks::{QueuedCheckpointVerified, QueuedSemanticallyVerified, SentHashes}; /// A read-write service for Zebra's cached blockchain state. /// @@ -124,25 +124,26 @@ pub(crate) struct StateService { // /// Queued blocks for the [`NonFinalizedState`] that arrived out of order. /// These blocks are awaiting their parent blocks before they can do contextual verification. - queued_non_finalized_blocks: QueuedBlocks, + non_finalized_state_queued_blocks: QueuedBlocks, /// Queued blocks for the [`FinalizedState`] that arrived out of order. /// These blocks are awaiting their parent blocks before they can do contextual verification. /// /// Indexed by their parent block hash. - queued_finalized_blocks: HashMap, + finalized_state_queued_blocks: HashMap, /// A channel to send blocks to the `block_write_task`, /// so they can be written to the [`NonFinalizedState`]. non_finalized_block_write_sender: - Option>, + Option>, /// A channel to send blocks to the `block_write_task`, /// so they can be written to the [`FinalizedState`]. /// /// This sender is dropped after the state has finished sending all the checkpointed blocks, /// and the lowest non-finalized block arrives. - finalized_block_write_sender: Option>, + finalized_block_write_sender: + Option>, /// The [`block::Hash`] of the most recent block sent on /// `finalized_block_write_sender` or `non_finalized_block_write_sender`. @@ -151,25 +152,25 @@ pub(crate) struct StateService { /// - the finalized tip, if there are stored blocks, or /// - the genesis block's parent hash, if the database is empty. /// - /// If `invalid_block_reset_receiver` gets a reset, this is: + /// If `invalid_block_write_reset_receiver` gets a reset, this is: /// - the hash of the last valid committed block (the parent of the invalid block). // // TODO: // - turn this into an IndexMap containing recent non-finalized block hashes and heights // (they are all potential tips) // - remove block hashes once their heights are strictly less than the finalized tip - last_sent_finalized_block_hash: block::Hash, + finalized_block_write_last_sent_hash: block::Hash, /// A set of block hashes that have been sent to the block write task. /// Hashes of blocks below the finalized tip height are periodically pruned. - sent_non_finalized_block_hashes: SentHashes, + non_finalized_block_write_sent_hashes: SentHashes, /// If an invalid block is sent on `finalized_block_write_sender` /// or `non_finalized_block_write_sender`, /// this channel gets the [`block::Hash`] of the valid tip. // // TODO: add tests for finalized and non-finalized resets (#2654) - invalid_block_reset_receiver: tokio::sync::mpsc::UnboundedReceiver, + invalid_block_write_reset_receiver: tokio::sync::mpsc::UnboundedReceiver, // Pending UTXO Request Tracking // @@ -188,11 +189,11 @@ pub(crate) struct StateService { // Metrics // - /// A metric tracking the maximum height that's currently in `queued_finalized_blocks` + /// A metric tracking the maximum height that's currently in `finalized_state_queued_blocks` /// - /// Set to `f64::NAN` if `queued_finalized_blocks` is empty, because grafana shows NaNs + /// Set to `f64::NAN` if `finalized_state_queued_blocks` is empty, because grafana shows NaNs /// as a break in the graph. - max_queued_finalized_height: f64, + max_finalized_queue_height: f64, } /// A read-only service for accessing Zebra's cached blockchain state. @@ -245,16 +246,16 @@ impl Drop for StateService { // Close the channels (non-blocking) // This makes the block write thread exit the next time it checks the channels. // We want to do this here so we get any errors or panics from the block write task before it shuts down. - self.invalid_block_reset_receiver.close(); + self.invalid_block_write_reset_receiver.close(); std::mem::drop(self.finalized_block_write_sender.take()); std::mem::drop(self.non_finalized_block_write_sender.take()); self.clear_finalized_block_queue( - "dropping the state: dropped unused queued finalized block", + "dropping the state: dropped unused finalized state queue block", ); self.clear_non_finalized_block_queue( - "dropping the state: dropped unused queued non-finalized block", + "dropping the state: dropped unused non-finalized state queue block", ); // Then drop self.read_service, which checks the block write task for panics, @@ -364,7 +365,7 @@ impl StateService { tokio::sync::mpsc::unbounded_channel(); let (finalized_block_write_sender, finalized_block_write_receiver) = tokio::sync::mpsc::unbounded_channel(); - let (invalid_block_reset_sender, invalid_block_reset_receiver) = + let (invalid_block_reset_sender, invalid_block_write_reset_receiver) = tokio::sync::mpsc::unbounded_channel(); let finalized_state_for_writing = finalized_state.clone(); @@ -396,25 +397,25 @@ impl StateService { let full_verifier_utxo_lookahead = full_verifier_utxo_lookahead.expect("unexpected negative height"); - let queued_non_finalized_blocks = QueuedBlocks::default(); + let non_finalized_state_queued_blocks = QueuedBlocks::default(); let pending_utxos = PendingUtxos::default(); - let last_sent_finalized_block_hash = finalized_state.db.finalized_tip_hash(); + let finalized_block_write_last_sent_hash = finalized_state.db.finalized_tip_hash(); let state = Self { network, full_verifier_utxo_lookahead, - queued_non_finalized_blocks, - queued_finalized_blocks: HashMap::new(), + non_finalized_state_queued_blocks, + finalized_state_queued_blocks: HashMap::new(), non_finalized_block_write_sender: Some(non_finalized_block_write_sender), finalized_block_write_sender: Some(finalized_block_write_sender), - last_sent_finalized_block_hash, - sent_non_finalized_block_hashes: SentHashes::default(), - invalid_block_reset_receiver, + finalized_block_write_last_sent_hash, + non_finalized_block_write_sent_hashes: SentHashes::default(), + invalid_block_write_reset_receiver, pending_utxos, last_prune: Instant::now(), read_service: read_service.clone(), - max_queued_finalized_height: f64::NAN, + max_finalized_queue_height: f64::NAN, }; timer.finish(module_path!(), line!(), "initializing state service"); @@ -457,7 +458,7 @@ impl StateService { /// Queue a finalized block for verification and storage in the finalized state. /// /// Returns a channel receiver that provides the result of the block commit. - fn queue_and_commit_finalized( + fn queue_and_commit_to_finalized_state( &mut self, checkpoint_verified: CheckpointVerifiedBlock, ) -> oneshot::Receiver> { @@ -472,7 +473,7 @@ impl StateService { // If we're close to the final checkpoint, make the block's UTXOs available for // full verification of non-finalized blocks, even when it is in the channel. if self.is_close_to_final_checkpoint(queued_height) { - self.sent_non_finalized_block_hashes + self.non_finalized_block_write_sent_hashes .add_finalized(&checkpoint_verified) } @@ -482,23 +483,23 @@ impl StateService { if self.finalized_block_write_sender.is_some() { // We're still committing finalized blocks if let Some(duplicate_queued) = self - .queued_finalized_blocks + .finalized_state_queued_blocks .insert(queued_prev_hash, queued) { - Self::send_finalized_block_error( + Self::send_checkpoint_verified_block_error( duplicate_queued, "dropping older finalized block: got newer duplicate block", ); } - self.drain_queue_and_commit_finalized(); + self.drain_finalized_queue_and_commit(); } else { // We've finished committing finalized blocks, so drop any repeated queued blocks, // and return an error. // // TODO: track the latest sent height, and drop any blocks under that height - // every time we send some blocks (like QueuedNonFinalizedBlocks) - Self::send_finalized_block_error( + // every time we send some blocks (like QueuedSemanticallyVerifiedBlocks) + Self::send_checkpoint_verified_block_error( queued, "already finished committing finalized blocks: dropped duplicate block, \ block is already committed to the state", @@ -510,39 +511,39 @@ impl StateService { ); } - if self.queued_finalized_blocks.is_empty() { - self.max_queued_finalized_height = f64::NAN; - } else if self.max_queued_finalized_height.is_nan() - || self.max_queued_finalized_height < queued_height.0 as f64 + if self.finalized_state_queued_blocks.is_empty() { + self.max_finalized_queue_height = f64::NAN; + } else if self.max_finalized_queue_height.is_nan() + || self.max_finalized_queue_height < queued_height.0 as f64 { // if there are still blocks in the queue, then either: // - the new block was lower than the old maximum, and there was a gap before it, // so the maximum is still the same (and we skip this code), or // - the new block is higher than the old maximum, and there is at least one gap // between the finalized tip and the new maximum - self.max_queued_finalized_height = queued_height.0 as f64; + self.max_finalized_queue_height = queued_height.0 as f64; } metrics::gauge!( "state.checkpoint.queued.max.height", - self.max_queued_finalized_height, + self.max_finalized_queue_height, ); metrics::gauge!( "state.checkpoint.queued.block.count", - self.queued_finalized_blocks.len() as f64, + self.finalized_state_queued_blocks.len() as f64, ); rsp_rx } - /// Finds queued finalized blocks to be committed to the state in order, + /// Finds finalized state queue blocks to be committed to the state in order, /// removes them from the queue, and sends them to the block commit task. /// /// After queueing a finalized block, this method checks whether the newly /// queued block (and any of its descendants) can be committed to the state. /// /// Returns an error if the block commit channel has been closed. - pub fn drain_queue_and_commit_finalized(&mut self) { + pub fn drain_finalized_queue_and_commit(&mut self) { use tokio::sync::mpsc::error::{SendError, TryRecvError}; // # Correctness & Performance @@ -551,8 +552,8 @@ impl StateService { // because it is called directly from the tokio executor's Future threads. // If a block failed, we need to start again from a valid tip. - match self.invalid_block_reset_receiver.try_recv() { - Ok(reset_tip_hash) => self.last_sent_finalized_block_hash = reset_tip_hash, + match self.invalid_block_write_reset_receiver.try_recv() { + Ok(reset_tip_hash) => self.finalized_block_write_last_sent_hash = reset_tip_hash, Err(TryRecvError::Disconnected) => { info!("Block commit task closed the block reset channel. Is Zebra shutting down?"); return; @@ -562,12 +563,12 @@ impl StateService { } while let Some(queued_block) = self - .queued_finalized_blocks - .remove(&self.last_sent_finalized_block_hash) + .finalized_state_queued_blocks + .remove(&self.finalized_block_write_last_sent_hash) { let last_sent_finalized_block_height = queued_block.0.height; - self.last_sent_finalized_block_hash = queued_block.0.hash; + self.finalized_block_write_last_sent_hash = queued_block.0.hash; // If we've finished sending finalized blocks, ignore any repeated blocks. // (Blocks can be repeated after a syncer reset.) @@ -577,7 +578,7 @@ impl StateService { // If the receiver is closed, we can't send any more blocks. if let Err(SendError(queued)) = send_result { // If Zebra is shutting down, drop blocks and return an error. - Self::send_finalized_block_error( + Self::send_checkpoint_verified_block_error( queued, "block commit task exited. Is Zebra shutting down?", ); @@ -595,15 +596,18 @@ impl StateService { } } - /// Drops all queued finalized blocks, and sends an error on their result channels. + /// Drops all finalized state queue blocks, and sends an error on their result channels. fn clear_finalized_block_queue(&mut self, error: impl Into + Clone) { - for (_hash, queued) in self.queued_finalized_blocks.drain() { - Self::send_finalized_block_error(queued, error.clone()); + for (_hash, queued) in self.finalized_state_queued_blocks.drain() { + Self::send_checkpoint_verified_block_error(queued, error.clone()); } } - /// Send an error on a `QueuedFinalized` block's result channel, and drop the block - fn send_finalized_block_error(queued: QueuedFinalized, error: impl Into) { + /// Send an error on a `QueuedCheckpointVerified` block's result channel, and drop the block + fn send_checkpoint_verified_block_error( + queued: QueuedCheckpointVerified, + error: impl Into, + ) { let (finalized, rsp_tx) = queued; // The block sender might have already given up on this block, @@ -612,15 +616,18 @@ impl StateService { std::mem::drop(finalized); } - /// Drops all queued non-finalized blocks, and sends an error on their result channels. + /// Drops all non-finalized state queue blocks, and sends an error on their result channels. fn clear_non_finalized_block_queue(&mut self, error: impl Into + Clone) { - for (_hash, queued) in self.queued_non_finalized_blocks.drain() { - Self::send_non_finalized_block_error(queued, error.clone()); + for (_hash, queued) in self.non_finalized_state_queued_blocks.drain() { + Self::send_semantically_verified_block_error(queued, error.clone()); } } - /// Send an error on a `QueuedNonFinalized` block's result channel, and drop the block - fn send_non_finalized_block_error(queued: QueuedNonFinalized, error: impl Into) { + /// Send an error on a `QueuedSemanticallyVerified` block's result channel, and drop the block + fn send_semantically_verified_block_error( + queued: QueuedSemanticallyVerified, + error: impl Into, + ) { let (finalized, rsp_tx) = queued; // The block sender might have already given up on this block, @@ -637,7 +644,7 @@ impl StateService { /// /// [1]: https://zebra.zfnd.org/dev/rfcs/0005-state-updates.html#committing-non-finalized-blocks #[instrument(level = "debug", skip(self, semantically_verrified))] - fn queue_and_commit_non_finalized( + fn queue_and_commit_to_non_finalized_state( &mut self, semantically_verrified: SemanticallyVerifiedBlock, ) -> oneshot::Receiver> { @@ -645,7 +652,7 @@ impl StateService { let parent_hash = semantically_verrified.block.header.previous_block_hash; if self - .sent_non_finalized_block_hashes + .non_finalized_block_write_sent_hashes .contains(&semantically_verrified.hash) { let (rsp_tx, rsp_rx) = oneshot::channel(); @@ -672,7 +679,7 @@ impl StateService { // has been queued but not yet committed to the state fails the older request and replaces // it with the newer request. let rsp_rx = if let Some((_, old_rsp_tx)) = self - .queued_non_finalized_blocks + .non_finalized_state_queued_blocks .get_mut(&semantically_verrified.hash) { tracing::debug!("replacing older queued request with new request"); @@ -682,7 +689,7 @@ impl StateService { rsp_rx } else { let (rsp_tx, rsp_rx) = oneshot::channel(); - self.queued_non_finalized_blocks + self.non_finalized_state_queued_blocks .queue((semantically_verrified, rsp_tx)); rsp_rx }; @@ -697,9 +704,10 @@ impl StateService { // TODO: configure the state with the last checkpoint hash instead? if self.finalized_block_write_sender.is_some() && self - .queued_non_finalized_blocks - .has_queued_children(self.last_sent_finalized_block_hash) - && self.read_service.db.finalized_tip_hash() == self.last_sent_finalized_block_hash + .non_finalized_state_queued_blocks + .has_queued_children(self.finalized_block_write_last_sent_hash) + && self.read_service.db.finalized_tip_hash() + == self.finalized_block_write_last_sent_hash { // Tell the block write task to stop committing finalized blocks, // and move on to committing non-finalized blocks. @@ -728,10 +736,10 @@ impl StateService { "Finalized state must have at least one block before committing non-finalized state", ); - self.queued_non_finalized_blocks + self.non_finalized_state_queued_blocks .prune_by_height(finalized_tip_height); - self.sent_non_finalized_block_hashes + self.non_finalized_block_write_sent_hashes .prune_by_height(finalized_tip_height); } @@ -740,7 +748,7 @@ impl StateService { /// Returns `true` if `hash` is a valid previous block hash for new non-finalized blocks. fn can_fork_chain_at(&self, hash: &block::Hash) -> bool { - self.sent_non_finalized_block_hashes.contains(hash) + self.non_finalized_block_write_sent_hashes.contains(hash) || &self.read_service.db.finalized_tip_hash() == hash } @@ -765,18 +773,19 @@ impl StateService { while let Some(parent_hash) = new_parents.pop() { let queued_children = self - .queued_non_finalized_blocks + .non_finalized_state_queued_blocks .dequeue_children(parent_hash); for queued_child in queued_children { let (SemanticallyVerifiedBlock { hash, .. }, _) = queued_child; - self.sent_non_finalized_block_hashes.add(&queued_child.0); + self.non_finalized_block_write_sent_hashes + .add(&queued_child.0); let send_result = non_finalized_block_write_sender.send(queued_child); if let Err(SendError(queued)) = send_result { // If Zebra is shutting down, drop blocks and return an error. - Self::send_non_finalized_block_error( + Self::send_semantically_verified_block_error( queued, "block commit task exited. Is Zebra shutting down?", ); @@ -792,7 +801,7 @@ impl StateService { } } - self.sent_non_finalized_block_hashes.finish_batch(); + self.non_finalized_block_write_sent_hashes.finish_batch(); }; } @@ -905,7 +914,7 @@ impl Service for StateService { let span = Span::current(); match req { - // Uses queued_non_finalized_blocks and pending_utxos in the StateService + // Uses non_finalized_state_queued_blocks and pending_utxos in the StateService // Accesses shared writeable state in the StateService, NonFinalizedState, and ZebraDb. Request::CommitSemanticallyVerifiedBlock(semantically_verified) => { self.assert_block_can_be_validated(&semantically_verified); @@ -925,7 +934,9 @@ impl Service for StateService { // https://docs.rs/tokio/latest/tokio/task/fn.block_in_place.html let rsp_rx = tokio::task::block_in_place(move || { - span.in_scope(|| self.queue_and_commit_non_finalized(semantically_verified)) + span.in_scope(|| { + self.queue_and_commit_to_non_finalized_state(semantically_verified) + }) }); // TODO: @@ -954,7 +965,7 @@ impl Service for StateService { .boxed() } - // Uses queued_finalized_blocks and pending_utxos in the StateService. + // Uses finalized_state_queued_blocks and pending_utxos in the StateService. // Accesses shared writeable state in the StateService. Request::CommitCheckpointVerifiedBlock(finalized) => { // # Consensus @@ -971,7 +982,7 @@ impl Service for StateService { // // This method doesn't block, access the database, or perform CPU-intensive tasks, // so we can run it directly in the tokio executor's Future threads. - let rsp_rx = self.queue_and_commit_finalized(finalized); + let rsp_rx = self.queue_and_commit_to_finalized_state(finalized); // TODO: // - check for panics in the block write task here, @@ -996,7 +1007,7 @@ impl Service for StateService { .boxed() } - // Uses pending_utxos and queued_non_finalized_blocks in the StateService. + // Uses pending_utxos and non_finalized_state_queued_blocks in the StateService. // If the UTXO isn't in the queued blocks, runs concurrently using the ReadStateService. Request::AwaitUtxo(outpoint) => { // Prepare the AwaitUtxo future from PendingUxtos. @@ -1008,7 +1019,7 @@ impl Service for StateService { // Check the non-finalized block queue outside the returned future, // so we can access mutable state fields. - if let Some(utxo) = self.queued_non_finalized_blocks.utxo(&outpoint) { + if let Some(utxo) = self.non_finalized_state_queued_blocks.utxo(&outpoint) { self.pending_utxos.respond(&outpoint, utxo); // We're finished, the returned future gets the UTXO from the respond() channel. @@ -1018,7 +1029,7 @@ impl Service for StateService { } // Check the sent non-finalized blocks - if let Some(utxo) = self.sent_non_finalized_block_hashes.utxo(&outpoint) { + if let Some(utxo) = self.non_finalized_block_write_sent_hashes.utxo(&outpoint) { self.pending_utxos.respond(&outpoint, utxo); // We're finished, the returned future gets the UTXO from the respond() channel. @@ -1027,7 +1038,7 @@ impl Service for StateService { return response_fut; } - // We ignore any UTXOs in FinalizedState.queued_finalized_blocks, + // We ignore any UTXOs in FinalizedState.finalized_state_queued_blocks, // because it is only used during checkpoint verification. // // This creates a rare race condition, but it doesn't seem to happen much in practice. diff --git a/zebra-state/src/service/finalized_state.rs b/zebra-state/src/service/finalized_state.rs index 1ac34e2c5db..c6ca264f38e 100644 --- a/zebra-state/src/service/finalized_state.rs +++ b/zebra-state/src/service/finalized_state.rs @@ -24,7 +24,7 @@ use zebra_chain::{block, parameters::Network}; use crate::{ request::ContextuallyVerifiedBlockWithTrees, - service::{check, QueuedFinalized}, + service::{check, QueuedCheckpointVerified}, BoxError, CheckpointVerifiedBlock, CloneError, Config, }; @@ -167,7 +167,7 @@ impl FinalizedState { /// order. pub fn commit_finalized( &mut self, - ordered_block: QueuedFinalized, + ordered_block: QueuedCheckpointVerified, ) -> Result { let (checkpoint_verified, rsp_tx) = ordered_block; let result = self.commit_finalized_direct( diff --git a/zebra-state/src/service/queued_blocks.rs b/zebra-state/src/service/queued_blocks.rs index 9f350ea2c05..7a009605c53 100644 --- a/zebra-state/src/service/queued_blocks.rs +++ b/zebra-state/src/service/queued_blocks.rs @@ -15,14 +15,14 @@ use crate::{BoxError, CheckpointVerifiedBlock, SemanticallyVerifiedBlock}; #[cfg(test)] mod tests; -/// A queued finalized block, and its corresponding [`Result`] channel. -pub type QueuedFinalized = ( +/// A finalized state queue block, and its corresponding [`Result`] channel. +pub type QueuedCheckpointVerified = ( CheckpointVerifiedBlock, oneshot::Sender>, ); -/// A queued non-finalized block, and its corresponding [`Result`] channel. -pub type QueuedNonFinalized = ( +/// A non-finalized state queue block, and its corresponding [`Result`] channel. +pub type QueuedSemanticallyVerified = ( SemanticallyVerifiedBlock, oneshot::Sender>, ); @@ -31,7 +31,7 @@ pub type QueuedNonFinalized = ( #[derive(Debug, Default)] pub struct QueuedBlocks { /// Blocks awaiting their parent blocks for contextual verification. - blocks: HashMap, + blocks: HashMap, /// Hashes from `queued_blocks`, indexed by parent hash. by_parent: HashMap>, /// Hashes from `queued_blocks`, indexed by block height. @@ -47,7 +47,7 @@ impl QueuedBlocks { /// /// - if a block with the same `block::Hash` has already been queued. #[instrument(skip(self), fields(height = ?new.0.height, hash = %new.0.hash))] - pub fn queue(&mut self, new: QueuedNonFinalized) { + pub fn queue(&mut self, new: QueuedSemanticallyVerified) { let new_hash = new.0.hash; let new_height = new.0.height; let parent_hash = new.0.block.header.previous_block_hash; @@ -86,7 +86,10 @@ impl QueuedBlocks { /// Dequeue and return all blocks that were waiting for the arrival of /// `parent`. #[instrument(skip(self), fields(%parent_hash))] - pub fn dequeue_children(&mut self, parent_hash: block::Hash) -> Vec { + pub fn dequeue_children( + &mut self, + parent_hash: block::Hash, + ) -> Vec { let queued_children = self .by_parent .remove(&parent_hash) @@ -176,7 +179,7 @@ impl QueuedBlocks { } /// Return the queued block if it has already been registered - pub fn get_mut(&mut self, hash: &block::Hash) -> Option<&mut QueuedNonFinalized> { + pub fn get_mut(&mut self, hash: &block::Hash) -> Option<&mut QueuedSemanticallyVerified> { self.blocks.get_mut(hash) } @@ -208,7 +211,7 @@ impl QueuedBlocks { /// Returns all key-value pairs of blocks as an iterator. /// /// Doesn't update the metrics, because it is only used when the state is being dropped. - pub fn drain(&mut self) -> Drain<'_, block::Hash, QueuedNonFinalized> { + pub fn drain(&mut self) -> Drain<'_, block::Hash, QueuedSemanticallyVerified> { self.known_utxos.clear(); self.known_utxos.shrink_to_fit(); self.by_parent.clear(); diff --git a/zebra-state/src/service/queued_blocks/tests/vectors.rs b/zebra-state/src/service/queued_blocks/tests/vectors.rs index bd8dcbeb8e2..203caf706e6 100644 --- a/zebra-state/src/service/queued_blocks/tests/vectors.rs +++ b/zebra-state/src/service/queued_blocks/tests/vectors.rs @@ -9,17 +9,17 @@ use zebra_test::prelude::*; use crate::{ arbitrary::Prepare, - service::queued_blocks::{QueuedBlocks, QueuedNonFinalized}, + service::queued_blocks::{QueuedBlocks, QueuedSemanticallyVerified}, tests::FakeChainHelper, }; // Quick helper trait for making queued blocks with throw away channels trait IntoQueued { - fn into_queued(self) -> QueuedNonFinalized; + fn into_queued(self) -> QueuedSemanticallyVerified; } impl IntoQueued for Arc { - fn into_queued(self) -> QueuedNonFinalized { + fn into_queued(self) -> QueuedSemanticallyVerified { let (rsp_tx, _) = oneshot::channel(); (self.prepare(), rsp_tx) } diff --git a/zebra-state/src/service/tests.rs b/zebra-state/src/service/tests.rs index aed292313f4..5adfaabdf39 100644 --- a/zebra-state/src/service/tests.rs +++ b/zebra-state/src/service/tests.rs @@ -424,7 +424,7 @@ proptest! { expected_finalized_value_pool += *block_value_pool; } - let result_receiver = state_service.queue_and_commit_finalized(block.clone()); + let result_receiver = state_service.queue_and_commit_to_finalized_state(block.clone()); let result = result_receiver.blocking_recv(); prop_assert!(result.is_ok(), "unexpected failed finalized block commit: {:?}", result); @@ -450,7 +450,7 @@ proptest! { let block_value_pool = &block.block.chain_value_pool_change(&transparent::utxos_from_ordered_utxos(utxos))?; expected_non_finalized_value_pool += *block_value_pool; - let result_receiver = state_service.queue_and_commit_non_finalized(block.clone()); + let result_receiver = state_service.queue_and_commit_to_non_finalized_state(block.clone()); let result = result_receiver.blocking_recv(); prop_assert!(result.is_ok(), "unexpected failed non-finalized block commit: {:?}", result); @@ -509,7 +509,7 @@ proptest! { TipAction::grow_with(expected_block.clone().into()) }; - let result_receiver = state_service.queue_and_commit_finalized(block); + let result_receiver = state_service.queue_and_commit_to_finalized_state(block); let result = result_receiver.blocking_recv(); prop_assert!(result.is_ok(), "unexpected failed finalized block commit: {:?}", result); @@ -532,7 +532,7 @@ proptest! { TipAction::grow_with(expected_block.clone().into()) }; - let result_receiver = state_service.queue_and_commit_non_finalized(block); + let result_receiver = state_service.queue_and_commit_to_non_finalized_state(block); let result = result_receiver.blocking_recv(); prop_assert!(result.is_ok(), "unexpected failed non-finalized block commit: {:?}", result); diff --git a/zebra-state/src/service/write.rs b/zebra-state/src/service/write.rs index 74d6de14400..94392d2aa2c 100644 --- a/zebra-state/src/service/write.rs +++ b/zebra-state/src/service/write.rs @@ -17,7 +17,7 @@ use crate::{ check, finalized_state::{FinalizedState, ZebraDb}, non_finalized_state::NonFinalizedState, - queued_blocks::{QueuedFinalized, QueuedNonFinalized}, + queued_blocks::{QueuedCheckpointVerified, QueuedSemanticallyVerified}, BoxError, ChainTipBlock, ChainTipSender, CloneError, }, CommitSemanticallyVerifiedError, SemanticallyVerifiedBlock, @@ -131,8 +131,8 @@ fn update_latest_chain_channels( ) )] pub fn write_blocks_from_channels( - mut finalized_block_write_receiver: UnboundedReceiver, - mut non_finalized_block_write_receiver: UnboundedReceiver, + mut finalized_block_write_receiver: UnboundedReceiver, + mut non_finalized_block_write_receiver: UnboundedReceiver, mut finalized_state: FinalizedState, mut non_finalized_state: NonFinalizedState, invalid_block_reset_sender: UnboundedSender, From dc1eb18aaf75494cd6995876f12f57300a3734df Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 15 Jun 2023 18:35:22 +0000 Subject: [PATCH 104/265] build(deps): bump tj-actions/changed-files from 36.1.0 to 36.2.1 (#6973) Bumps [tj-actions/changed-files](https://github.com/tj-actions/changed-files) from 36.1.0 to 36.2.1. - [Release notes](https://github.com/tj-actions/changed-files/releases) - [Changelog](https://github.com/tj-actions/changed-files/blob/main/HISTORY.md) - [Commits](https://github.com/tj-actions/changed-files/compare/v36.1.0...v36.2.1) --- updated-dependencies: - dependency-name: tj-actions/changed-files dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/lint.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index a8ec87324a5..71e61543557 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -37,7 +37,7 @@ jobs: - name: Rust files id: changed-files-rust - uses: tj-actions/changed-files@v36.1.0 + uses: tj-actions/changed-files@v36.2.1 with: files: | **/*.rs @@ -49,7 +49,7 @@ jobs: - name: Workflow files id: changed-files-workflows - uses: tj-actions/changed-files@v36.1.0 + uses: tj-actions/changed-files@v36.2.1 with: files: | .github/workflows/*.yml From 98478c3b754321001f998a7acd55d63b709d9927 Mon Sep 17 00:00:00 2001 From: teor Date: Fri, 16 Jun 2023 04:35:44 +1000 Subject: [PATCH 105/265] fix(doc): Add `fastmod --hidden` to mass-renames.md (#6913) * Add `fastmod --hidden` to mass-renames.md * Do multiple skip paths * Do multiple sed renames in the same sed --------- Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> --- book/src/dev/mass-renames.md | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/book/src/dev/mass-renames.md b/book/src/dev/mass-renames.md index cd9eda6de01..9240d873e7f 100644 --- a/book/src/dev/mass-renames.md +++ b/book/src/dev/mass-renames.md @@ -13,14 +13,14 @@ so changing them can lead to unexpected test failures or hangs. You can use `sed` to rename all the instances of a name in Zebra's code, documentation, and tests: ```sh git ls-tree --full-tree -r --name-only HEAD | \ -xargs sed -i 's/OldName/NewName/g' +xargs sed -i -e 's/OldName/NewName/g' -e 's/OtherOldName/OtherNewName/g' ``` Or excluding specific paths: ```sh git ls-tree --full-tree -r --name-only HEAD | \ -grep -v 'path-to-skip' | \ -xargs sed -i 's/OldName/NewName/g' +grep -v -e 'path-to-skip' -e 'other-path-to-skip' | \ +xargs sed -i -e 's/OldName/NewName/g' -e 's/OtherOldName/OtherNewName/g' ``` `sed` also supports regular expressions to replace a pattern with another pattern. @@ -47,7 +47,8 @@ git worktree add ../zebra-pr origin/pr-branch-name cd ../zebra-sed # run the scripts in the PR or commit message git ls-tree --full-tree -r --name-only HEAD | \ -xargs sed -i 's/OldName/NewName/g' +grep -v -e 'path-to-skip' -e 'other-path-to-skip' | \ +xargs sed -i -e 's/OldName/NewName/g' -e 's/OtherOldName/OtherNewName/g' cargo fmt --all ``` @@ -66,9 +67,11 @@ and ask the author to re-run the script on the latest `main`. You can use `fastmod` to rename some instances, but skip others: ```sh -fastmod --fixed-strings "OldName" "NewName" [paths to change] +fastmod --hidden --fixed-strings "OldName" "NewName" [paths to change] ``` +Using the `--hidden` flag does renames in `.github` workflows, issue templates, and other configs. + `fastmod` also supports regular expressions to replace a pattern with another pattern. Here's how to make a PR with these replacements: From 484f3d746d38cf7ce40a65b39943f49ec339bffe Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 15 Jun 2023 18:35:56 +0000 Subject: [PATCH 106/265] build(deps): bump bitflags from 2.3.1 to 2.3.2 (#6943) Bumps [bitflags](https://github.com/bitflags/bitflags) from 2.3.1 to 2.3.2. - [Release notes](https://github.com/bitflags/bitflags/releases) - [Changelog](https://github.com/bitflags/bitflags/blob/main/CHANGELOG.md) - [Commits](https://github.com/bitflags/bitflags/compare/2.3.1...2.3.2) --- updated-dependencies: - dependency-name: bitflags dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 10 +++++----- zebra-chain/Cargo.toml | 2 +- zebra-network/Cargo.toml | 2 +- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0b008412e94..0d2e47ed0e7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -470,9 +470,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.3.1" +version = "2.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6776fc96284a0bb647b615056fc496d1fe1644a7ab01829818a6d91cae888b84" +checksum = "6dbe3c979c178231552ecba20214a8272df4e09f232a87aef4320cf06539aded" [[package]] name = "bitflags-serde-legacy" @@ -480,7 +480,7 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b64e60c28b6d25ad92e8b367801ff9aa12b41d05fc8798055d296bace4a60cc" dependencies = [ - "bitflags 2.3.1", + "bitflags 2.3.2", "serde", ] @@ -5641,7 +5641,7 @@ dependencies = [ name = "zebra-chain" version = "1.0.0-beta.26" dependencies = [ - "bitflags 2.3.1", + "bitflags 2.3.2", "bitflags-serde-legacy", "bitvec", "blake2b_simd", @@ -5746,7 +5746,7 @@ dependencies = [ name = "zebra-network" version = "1.0.0-beta.26" dependencies = [ - "bitflags 2.3.1", + "bitflags 2.3.2", "byteorder", "bytes", "chrono", diff --git a/zebra-chain/Cargo.toml b/zebra-chain/Cargo.toml index 34f4dd57aef..394be69aa21 100644 --- a/zebra-chain/Cargo.toml +++ b/zebra-chain/Cargo.toml @@ -49,7 +49,7 @@ bench = ["zebra-test"] # Cryptography bitvec = "1.0.1" -bitflags = "2.2.1" +bitflags = "2.3.2" bitflags-serde-legacy = "0.1.1" blake2b_simd = "1.0.1" blake2s_simd = "1.0.1" diff --git a/zebra-network/Cargo.toml b/zebra-network/Cargo.toml index 3601607136f..ae5639881d1 100644 --- a/zebra-network/Cargo.toml +++ b/zebra-network/Cargo.toml @@ -36,7 +36,7 @@ progress-bar = [ proptest-impl = ["proptest", "proptest-derive", "zebra-chain/proptest-impl"] [dependencies] -bitflags = "2.2.1" +bitflags = "2.3.2" byteorder = "1.4.3" bytes = "1.4.0" chrono = { version = "0.4.26", default-features = false, features = ["clock", "std"] } From 1e12a58b5f04c618e68def6a2847b3bfcd929257 Mon Sep 17 00:00:00 2001 From: teor Date: Fri, 16 Jun 2023 07:11:24 +1000 Subject: [PATCH 107/265] fix(handshake): Add extra timeout logging to peer TCP connections (#6969) * Add a missing timeout to outbound TCP connections * Move inbound handshakes into their own function, replacing the manual span * Delete a useless manual span in zebra_network::config * Add an extra timeout to the spawned inbound handshake task --- zebra-network/src/config.rs | 5 - zebra-network/src/constants.rs | 6 +- zebra-network/src/peer/connector.rs | 5 +- zebra-network/src/peer_set/initialize.rs | 129 +++++++++++------- .../src/peer_set/initialize/tests/vectors.rs | 3 +- .../components/inbound/tests/real_peer_set.rs | 16 ++- 6 files changed, 100 insertions(+), 64 deletions(-) diff --git a/zebra-network/src/config.rs b/zebra-network/src/config.rs index 78662a712b3..067a50ba09c 100644 --- a/zebra-network/src/config.rs +++ b/zebra-network/src/config.rs @@ -320,11 +320,6 @@ impl Config { Ok(Ok(ip_addrs)) => { let ip_addrs: Vec = ip_addrs.map(canonical_peer_addr).collect(); - // if we're logging at debug level, - // the full list of IP addresses will be shown in the log message - let debug_span = debug_span!("", remote_ip_addrs = ?ip_addrs); - let _span_guard = debug_span.enter(); - // This log is needed for user debugging, but it's annoying during tests. #[cfg(not(test))] info!(seed = ?host, remote_ip_count = ?ip_addrs.len(), "resolved seed peer IP addresses"); diff --git a/zebra-network/src/constants.rs b/zebra-network/src/constants.rs index e137fb7b212..0f9d2fac2cf 100644 --- a/zebra-network/src/constants.rs +++ b/zebra-network/src/constants.rs @@ -83,7 +83,11 @@ pub const PEERSET_BUFFER_SIZE: usize = 3; /// and receiving a response from a remote peer. pub const REQUEST_TIMEOUT: Duration = Duration::from_secs(20); -/// The timeout for handshakes when connecting to new peers. +/// The timeout for connections and handshakes when connecting to new peers. +/// +/// Outbound TCP connections must complete within this timeout, +/// then the handshake messages get an additional `HANDSHAKE_TIMEOUT` to complete. +/// (Inbound TCP accepts can't have a timeout, because they are handled by the OS.) /// /// This timeout should remain small, because it helps stop slow peers getting /// into the peer set. This is particularly important for network-constrained diff --git a/zebra-network/src/peer/connector.rs b/zebra-network/src/peer/connector.rs index dd2342c7929..e7047ea7128 100644 --- a/zebra-network/src/peer/connector.rs +++ b/zebra-network/src/peer/connector.rs @@ -7,13 +7,14 @@ use std::{ }; use futures::prelude::*; -use tokio::net::TcpStream; +use tokio::{net::TcpStream, time::timeout}; use tower::{Service, ServiceExt}; use tracing_futures::Instrument; use zebra_chain::chain_tip::{ChainTip, NoChainTip}; use crate::{ + constants::HANDSHAKE_TIMEOUT, peer::{Client, ConnectedAddr, Handshake, HandshakeRequest}, peer_set::ConnectionTracker, BoxError, PeerSocketAddr, Request, Response, @@ -93,7 +94,7 @@ where let connector_span = info_span!("connector", peer = ?connected_addr); async move { - let tcp_stream = TcpStream::connect(*addr).await?; + let tcp_stream = timeout(HANDSHAKE_TIMEOUT, TcpStream::connect(*addr)).await??; let client = hs .oneshot(HandshakeRequest:: { data_stream: tcp_stream, diff --git a/zebra-network/src/peer_set/initialize.rs b/zebra-network/src/peer_set/initialize.rs index 98b32648809..2d01437afce 100644 --- a/zebra-network/src/peer_set/initialize.rs +++ b/zebra-network/src/peer_set/initialize.rs @@ -34,7 +34,7 @@ use zebra_chain::chain_tip::ChainTip; use crate::{ address_book_updater::AddressBookUpdater, - constants, + constants::{self, HANDSHAKE_TIMEOUT}, meta_addr::{MetaAddr, MetaAddrChange}, peer::{ self, address_is_valid_for_inbound_listeners, HandshakeRequest, MinimumPeerVersion, @@ -100,9 +100,9 @@ pub async fn init( Arc>, ) where - S: Service + Clone + Send + 'static, + S: Service + Clone + Send + Sync + 'static, S::Future: Send + 'static, - C: ChainTip + Clone + Send + 'static, + C: ChainTip + Clone + Send + Sync + 'static, { // If we want Zebra to operate with no network, // we should implement a `zebrad` command that doesn't use `zebra-network`. @@ -551,7 +551,7 @@ async fn accept_inbound_connections( config: Config, listener: TcpListener, min_inbound_peer_connection_interval: Duration, - mut handshaker: S, + handshaker: S, peerset_tx: futures::channel::mpsc::Sender, ) -> Result<(), BoxError> where @@ -579,6 +579,7 @@ where None => unreachable!("handshakes never terminates, because it contains a future that never resolves"), }, + // This future must wait until new connections are available: it can't have a timeout. inbound_result = listener.accept() => inbound_result, }; @@ -602,51 +603,26 @@ where "handshaking on an open inbound peer connection" ); - let connected_addr = peer::ConnectedAddr::new_inbound_direct(addr); - let accept_span = info_span!("listen_accept", peer = ?connected_addr); - let _guard = accept_span.enter(); - - debug!("got incoming connection"); - - // # Correctness - // - // Holding the drop guard returned by Span::enter across .await points will - // result in incorrect traces if it yields. - // - // This await is okay because the handshaker's `poll_ready` method always returns Ready. - handshaker.ready().await?; - // TODO: distinguish between proxied listeners and direct listeners - let handshaker_span = info_span!("listen_handshaker", peer = ?connected_addr); - - // Construct a handshake future but do not drive it yet.... - let handshake = handshaker.call(HandshakeRequest { - data_stream: tcp_stream, - connected_addr, + let handshake_task = accept_inbound_handshake( + addr, + handshaker.clone(), + tcp_stream, connection_tracker, - }); - // ... instead, spawn a new task to handle this connection - { - let mut peerset_tx = peerset_tx.clone(); - - let handshake_task = tokio::spawn( - async move { - let handshake_result = handshake.await; - - if let Ok(client) = handshake_result { - // The connection limit makes sure this send doesn't block - let _ = peerset_tx.send((addr, client)).await; - } else { - debug!(?handshake_result, "error handshaking with inbound peer"); - } - } - .instrument(handshaker_span), - ); - - handshakes.push(Box::pin(handshake_task)); - } - - // We need to drop the guard before yielding. - std::mem::drop(_guard); + peerset_tx.clone(), + ) + .await?; + + // This timeout helps locate inbound peer connection hangs, see #6763 for details. + handshakes.push(Box::pin( + tokio::time::timeout( + // Only trigger this timeout if the inner handshake timeout fails + HANDSHAKE_TIMEOUT + Duration::from_millis(500), + handshake_task, + ) + .inspect_err(|_elapsed| { + info!("timeout in spawned accept_inbound_handshake() task") + }), + )); // Rate-limit inbound connection handshakes. // But sleep longer after a successful connection, @@ -676,6 +652,63 @@ where } } +/// Set up a new inbound connection as a Zcash peer. +/// +/// Uses `handshaker` to perform a Zcash network protocol handshake, and sends +/// the [`peer::Client`] result over `peerset_tx`. +#[instrument(skip(handshaker, tcp_stream, connection_tracker, peerset_tx))] +async fn accept_inbound_handshake( + addr: PeerSocketAddr, + mut handshaker: S, + tcp_stream: TcpStream, + connection_tracker: ConnectionTracker, + peerset_tx: futures::channel::mpsc::Sender, +) -> Result, BoxError> +where + S: Service, Response = peer::Client, Error = BoxError> + + Clone, + S::Future: Send + 'static, +{ + let connected_addr = peer::ConnectedAddr::new_inbound_direct(addr); + + debug!("got incoming connection"); + + // # Correctness + // + // Holding the drop guard returned by Span::enter across .await points will + // result in incorrect traces if it yields. + // + // This await is okay because the handshaker's `poll_ready` method always returns Ready. + handshaker.ready().await?; + // TODO: distinguish between proxied listeners and direct listeners + let handshaker_span = info_span!("listen_handshaker", peer = ?connected_addr); + + // Construct a handshake future but do not drive it yet.... + let handshake = handshaker.call(HandshakeRequest { + data_stream: tcp_stream, + connected_addr, + connection_tracker, + }); + // ... instead, spawn a new task to handle this connection + let mut peerset_tx = peerset_tx.clone(); + + let handshake_task = tokio::spawn( + async move { + let handshake_result = handshake.await; + + if let Ok(client) = handshake_result { + // The connection limit makes sure this send doesn't block + let _ = peerset_tx.send((addr, client)).await; + } else { + debug!(?handshake_result, "error handshaking with inbound peer"); + } + } + .instrument(handshaker_span), + ); + + Ok(handshake_task) +} + /// An action that the peer crawler can take. enum CrawlerAction { /// Drop the demand signal because there are too many pending handshakes. diff --git a/zebra-network/src/peer_set/initialize/tests/vectors.rs b/zebra-network/src/peer_set/initialize/tests/vectors.rs index f949506cdaf..a0abe128ce6 100644 --- a/zebra-network/src/peer_set/initialize/tests/vectors.rs +++ b/zebra-network/src/peer_set/initialize/tests/vectors.rs @@ -1453,7 +1453,7 @@ async fn init_with_peer_limit( default_config: impl Into>, ) -> Arc> where - S: Service + Clone + Send + 'static, + S: Service + Clone + Send + Sync + 'static, S::Future: Send + 'static, { // This test might fail on machines with no configured IPv4 addresses @@ -1610,6 +1610,7 @@ where S: Service, Response = peer::Client, Error = BoxError> + Clone + Send + + Sync + 'static, S::Future: Send + 'static, { diff --git a/zebrad/src/components/inbound/tests/real_peer_set.rs b/zebrad/src/components/inbound/tests/real_peer_set.rs index 6e734aae1de..ac773145966 100644 --- a/zebrad/src/components/inbound/tests/real_peer_set.rs +++ b/zebrad/src/components/inbound/tests/real_peer_set.rs @@ -6,10 +6,7 @@ use futures::FutureExt; use indexmap::IndexSet; use tokio::{sync::oneshot, task::JoinHandle}; use tower::{ - buffer::Buffer, - builder::ServiceBuilder, - util::{BoxCloneService, BoxService}, - ServiceExt, + buffer::Buffer, builder::ServiceBuilder, load_shed::LoadShed, util::BoxService, ServiceExt, }; use zebra_chain::{ @@ -600,7 +597,12 @@ async fn setup( // connected peer which responds with isolated_peer_response Buffer, // inbound service - BoxCloneService, + LoadShed< + Buffer< + BoxService, + zebra_network::Request, + >, + >, // outbound peer set (only has the connected peer) Buffer< BoxService, @@ -626,11 +628,11 @@ async fn setup( // Inbound let (setup_tx, setup_rx) = oneshot::channel(); let inbound_service = Inbound::new(MAX_INBOUND_CONCURRENCY, setup_rx); + // TODO: add a timeout just above the service, if needed let inbound_service = ServiceBuilder::new() - .boxed_clone() .load_shed() .buffer(10) - .service(inbound_service); + .service(BoxService::new(inbound_service)); // State // UTXO verification doesn't matter for these tests. From 02fdeb61fcd7780999e84b7f1d5ea2b3249feff5 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 15 Jun 2023 21:11:44 +0000 Subject: [PATCH 108/265] build(deps): bump sentry from 0.31.3 to 0.31.4 (#6958) Bumps [sentry](https://github.com/getsentry/sentry-rust) from 0.31.3 to 0.31.4. - [Release notes](https://github.com/getsentry/sentry-rust/releases) - [Changelog](https://github.com/getsentry/sentry-rust/blob/master/CHANGELOG.md) - [Commits](https://github.com/getsentry/sentry-rust/compare/0.31.3...0.31.4) --- updated-dependencies: - dependency-name: sentry dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> --- Cargo.lock | 24 ++++++++++++------------ zebrad/Cargo.toml | 2 +- 2 files changed, 13 insertions(+), 13 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0d2e47ed0e7..2d40306ae06 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3949,9 +3949,9 @@ checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" [[package]] name = "sentry" -version = "0.31.3" +version = "0.31.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de31c6e03322af2175d3c850c5b5e11efcadc01948cd1fb7b5ad0a7c7b6c7ff2" +checksum = "9e0bd2cbc3398be701a933e5b7357a4b6b1f94038d2054f118cba90b481a9fbe" dependencies = [ "httpdate", "reqwest", @@ -3967,9 +3967,9 @@ dependencies = [ [[package]] name = "sentry-backtrace" -version = "0.31.3" +version = "0.31.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "264e3ad27da3d1ad81b499dbcceae0a50e0e6ffc4b65b93f47d5180d46827644" +checksum = "9cf043f9bcb6c9ae084b7f10fb363a697c924badcbe7dac2dbeecea31271ed0c" dependencies = [ "backtrace", "once_cell", @@ -3979,9 +3979,9 @@ dependencies = [ [[package]] name = "sentry-contexts" -version = "0.31.3" +version = "0.31.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7144590f7950647e4df5bd95f234c3aa29124729c54bd2457e1224d701d1a91c" +checksum = "16bde19e361cff463253371dbabee51dab416c6f9285d6e62106539f96d12079" dependencies = [ "hostname", "libc", @@ -3993,9 +3993,9 @@ dependencies = [ [[package]] name = "sentry-core" -version = "0.31.3" +version = "0.31.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35614ecf115f55d93583baa02a85cb63acb6567cf91b17690d1147bac1739ca4" +checksum = "fe345c342f17e48b65451f424ce0848405b6b3a84fa0007ba444b84754bf760a" dependencies = [ "once_cell", "rand 0.8.5", @@ -4006,9 +4006,9 @@ dependencies = [ [[package]] name = "sentry-tracing" -version = "0.31.3" +version = "0.31.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eec56ebafd7cfc1175bccdf277be582ccc3308b8c353dca5831261a967a6e28c" +checksum = "dc167b6746500ea4bb86c2c13afe7ca6f75f2ed1bcfd84243e870780b8ced529" dependencies = [ "sentry-backtrace", "sentry-core", @@ -4018,9 +4018,9 @@ dependencies = [ [[package]] name = "sentry-types" -version = "0.31.3" +version = "0.31.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c56f616602a3b282bf4b4e8e5b4d10bcf9412a987df91c592b95a1f6ef1ee43" +checksum = "62d10a5962144f5fb65bb1290551623e6b976f442cb2fcb4e1dfe9fe6f8e8df4" dependencies = [ "debugid", "getrandom 0.2.10", diff --git a/zebrad/Cargo.toml b/zebrad/Cargo.toml index 0686f26a3be..b69b3d722c6 100644 --- a/zebrad/Cargo.toml +++ b/zebrad/Cargo.toml @@ -163,7 +163,7 @@ num-integer = "0.1.45" rand = { version = "0.8.5", package = "rand" } # prod feature sentry -sentry = { version = "0.31.3", default-features = false, features = ["backtrace", "contexts", "reqwest", "rustls", "tracing"], optional = true } +sentry = { version = "0.31.4", default-features = false, features = ["backtrace", "contexts", "reqwest", "rustls", "tracing"], optional = true } # prod feature flamegraph tracing-flame = { version = "0.2.0", optional = true } From 7598a59fd252d7b4e4caed71acb2f0921269cf47 Mon Sep 17 00:00:00 2001 From: teor Date: Fri, 16 Jun 2023 20:30:38 +1000 Subject: [PATCH 109/265] Remove an outdated TODO in release-binaries.yml (#6978) --- .github/workflows/release-binaries.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/workflows/release-binaries.yml b/.github/workflows/release-binaries.yml index 319777dec22..2ec7338b5d5 100644 --- a/.github/workflows/release-binaries.yml +++ b/.github/workflows/release-binaries.yml @@ -41,7 +41,6 @@ jobs: dockerfile_path: ./docker/Dockerfile dockerfile_target: runtime image_name: zebra - # TODO: change this to `-experimental` when we release Zebra `1.0.0` tag_suffix: .experimental network: Testnet rpc_port: '18232' From 06a27ed937c1fc8cbf9ba7cee3001e56560729ad Mon Sep 17 00:00:00 2001 From: teor Date: Fri, 16 Jun 2023 20:30:46 +1000 Subject: [PATCH 110/265] Fix a broken link in CHANGELOG.md (#6979) --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 94680a1c7a1..1230bb0de46 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -20,7 +20,7 @@ Please report bugs to [the Zebra GitHub repository](https://github.com/ZcashFoun ### Changed -- Publish to [crates.io](https://crates.io/crates/zebrad) ([#6908(https://github.com/ZcashFoundation/zebra/pull/6908)) +- Publish to [crates.io](https://crates.io/crates/zebrad) ([#6908](https://github.com/ZcashFoundation/zebra/pull/6908)) - Rename tower-batch to tower-batch-control ([#6907](https://github.com/ZcashFoundation/zebra/pull/6907)) - Upgrade to ed25519-zebra 4.0.0 ([#6881](https://github.com/ZcashFoundation/zebra/pull/6881)) From b199a3008d177350d4f5369a4cb1b840c7672eb8 Mon Sep 17 00:00:00 2001 From: teor Date: Sat, 17 Jun 2023 02:32:21 +1000 Subject: [PATCH 111/265] change(release): Add more cargo clean to the release checklist (#6964) * Add more cargo clean to the release checklist * Add locked to the install command --- .github/PULL_REQUEST_TEMPLATE/release-checklist.md | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/.github/PULL_REQUEST_TEMPLATE/release-checklist.md b/.github/PULL_REQUEST_TEMPLATE/release-checklist.md index 539780530fa..4a97b69104e 100644 --- a/.github/PULL_REQUEST_TEMPLATE/release-checklist.md +++ b/.github/PULL_REQUEST_TEMPLATE/release-checklist.md @@ -57,7 +57,8 @@ Zebra's Rust API doesn't have any support or stability guarantees, so we keep al -- [ ] Update crate versions and do a release dry-run: +- [ ] Update crate versions and do a release dry-run + - [ ] `cargo clean` (optional) - [ ] `cargo release version --verbose --execute --workspace --exclude zebrad beta` - [ ] `cargo release version --verbose --execute --package zebrad [ major | minor | patch ]` - [ ] `cargo release publish --verbose --dry-run --workspace` @@ -149,15 +150,16 @@ The end of support height is calculated from the current blockchain height: ## Publish Crates - [ ] Run `cargo login` -- [ ] Run `cargo clean` in the zebra repo +- [ ] Run `cargo clean` in the zebra repo (optional) - [ ] Publish the crates to crates.io: `cargo release publish --verbose --workspace --execute` - [ ] Check that Zebra can be installed from `crates.io`: - `cargo install --force --version 1.0.0 zebrad && ~/.cargo/bin/zebrad` + `cargo install --locked --force --version 1.0.0 zebrad && ~/.cargo/bin/zebrad` + and put the output in a comment on the PR. ## Publish Docker Images - [ ] Wait until [the Docker images have been published](https://github.com/ZcashFoundation/zebra/actions/workflows/release-binaries.yml) - [ ] Test the Docker image using `docker run --tty --interactive zfnd/zebra:v1.0.0`, - and put the output in a comment on the PR. + and put the output in a comment on the PR. (You can use [gcloud cloud shell](https://console.cloud.google.com/home/dashboard?cloudshell=true)) - [ ] Un-freeze the [`batched` queue](https://dashboard.mergify.com/github/ZcashFoundation/repo/zebra/queues) using Mergify. From 58df76d4b7b466c238d8b01b510b5a89ce849b19 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 16 Jun 2023 21:02:22 +0000 Subject: [PATCH 112/265] build(deps): bump sentry from 0.31.4 to 0.31.5 (#6988) Bumps [sentry](https://github.com/getsentry/sentry-rust) from 0.31.4 to 0.31.5. - [Release notes](https://github.com/getsentry/sentry-rust/releases) - [Changelog](https://github.com/getsentry/sentry-rust/blob/master/CHANGELOG.md) - [Commits](https://github.com/getsentry/sentry-rust/compare/0.31.4...0.31.5) --- updated-dependencies: - dependency-name: sentry dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 63 +++++++++++++++++++++++++++-------------------- zebrad/Cargo.toml | 2 +- 2 files changed, 37 insertions(+), 28 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2d40306ae06..bfc22aa0ab2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1945,7 +1945,7 @@ checksum = "0646026eb1b3eea4cd9ba47912ea5ce9cc07713d105b1a14698f4e6433d348b7" dependencies = [ "http", "hyper", - "rustls 0.21.1", + "rustls 0.21.2", "tokio", "tokio-rustls", ] @@ -2571,7 +2571,7 @@ dependencies = [ "once_cell", "rustls 0.20.8", "webpki", - "webpki-roots", + "webpki-roots 0.22.6", ] [[package]] @@ -3639,7 +3639,7 @@ dependencies = [ "once_cell", "percent-encoding", "pin-project-lite", - "rustls 0.21.1", + "rustls 0.21.2", "rustls-pemfile", "serde", "serde_json", @@ -3653,7 +3653,7 @@ dependencies = [ "wasm-bindgen", "wasm-bindgen-futures", "web-sys", - "webpki-roots", + "webpki-roots 0.22.6", "winreg", ] @@ -3784,9 +3784,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.21.1" +version = "0.21.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c911ba11bc8433e811ce56fde130ccf32f5127cab0e0194e9c68c5a5b671791e" +checksum = "e32ca28af694bc1bbf399c33a516dbdf1c90090b8ab23c2bc24f834aa2247f5f" dependencies = [ "log", "ring", @@ -3949,27 +3949,27 @@ checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" [[package]] name = "sentry" -version = "0.31.4" +version = "0.31.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e0bd2cbc3398be701a933e5b7357a4b6b1f94038d2054f118cba90b481a9fbe" +checksum = "01b0ad16faa5d12372f914ed40d00bda21a6d1bdcc99264c5e5e1c9495cf3654" dependencies = [ "httpdate", "reqwest", - "rustls 0.20.8", + "rustls 0.21.2", "sentry-backtrace", "sentry-contexts", "sentry-core", "sentry-tracing", "tokio", "ureq", - "webpki-roots", + "webpki-roots 0.22.6", ] [[package]] name = "sentry-backtrace" -version = "0.31.4" +version = "0.31.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cf043f9bcb6c9ae084b7f10fb363a697c924badcbe7dac2dbeecea31271ed0c" +checksum = "11f2ee8f147bb5f22ac59b5c35754a759b9a6f6722402e2a14750b2a63fc59bd" dependencies = [ "backtrace", "once_cell", @@ -3979,9 +3979,9 @@ dependencies = [ [[package]] name = "sentry-contexts" -version = "0.31.4" +version = "0.31.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16bde19e361cff463253371dbabee51dab416c6f9285d6e62106539f96d12079" +checksum = "dcd133362c745151eeba0ac61e3ba8350f034e9fe7509877d08059fe1d7720c6" dependencies = [ "hostname", "libc", @@ -3993,9 +3993,9 @@ dependencies = [ [[package]] name = "sentry-core" -version = "0.31.4" +version = "0.31.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe345c342f17e48b65451f424ce0848405b6b3a84fa0007ba444b84754bf760a" +checksum = "7163491708804a74446642ff2c80b3acd668d4b9e9f497f85621f3d250fd012b" dependencies = [ "once_cell", "rand 0.8.5", @@ -4006,9 +4006,9 @@ dependencies = [ [[package]] name = "sentry-tracing" -version = "0.31.4" +version = "0.31.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc167b6746500ea4bb86c2c13afe7ca6f75f2ed1bcfd84243e870780b8ced529" +checksum = "5aca8b88978677a27ee1a91beafe4052306c474c06f582321fde72d2e2cc2f7f" dependencies = [ "sentry-backtrace", "sentry-core", @@ -4018,9 +4018,9 @@ dependencies = [ [[package]] name = "sentry-types" -version = "0.31.4" +version = "0.31.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62d10a5962144f5fb65bb1290551623e6b976f442cb2fcb4e1dfe9fe6f8e8df4" +checksum = "9e7a88e0c1922d19b3efee12a8215f6a8a806e442e665ada71cc222cab72985f" dependencies = [ "debugid", "getrandom 0.2.10", @@ -4555,7 +4555,7 @@ version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" dependencies = [ - "rustls 0.21.1", + "rustls 0.21.2", "tokio", ] @@ -5035,17 +5035,17 @@ checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" [[package]] name = "ureq" -version = "2.6.2" +version = "2.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "338b31dd1314f68f3aabf3ed57ab922df95ffcd902476ca7ba3c4ce7b908c46d" +checksum = "d4b45063f47caea744e48f5baa99169bd8bd9b882d80a99941141327bbb00f99" dependencies = [ - "base64 0.13.1", + "base64 0.21.2", "log", "once_cell", - "rustls 0.20.8", + "rustls 0.21.2", + "rustls-webpki", "url", - "webpki", - "webpki-roots", + "webpki-roots 0.23.1", ] [[package]] @@ -5255,6 +5255,15 @@ dependencies = [ "webpki", ] +[[package]] +name = "webpki-roots" +version = "0.23.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b03058f88386e5ff5310d9111d53f48b17d732b401aeb83a8d5190f2ac459338" +dependencies = [ + "rustls-webpki", +] + [[package]] name = "which" version = "4.4.0" diff --git a/zebrad/Cargo.toml b/zebrad/Cargo.toml index b69b3d722c6..8623419a0f3 100644 --- a/zebrad/Cargo.toml +++ b/zebrad/Cargo.toml @@ -163,7 +163,7 @@ num-integer = "0.1.45" rand = { version = "0.8.5", package = "rand" } # prod feature sentry -sentry = { version = "0.31.4", default-features = false, features = ["backtrace", "contexts", "reqwest", "rustls", "tracing"], optional = true } +sentry = { version = "0.31.5", default-features = false, features = ["backtrace", "contexts", "reqwest", "rustls", "tracing"], optional = true } # prod feature flamegraph tracing-flame = { version = "0.2.0", optional = true } From 0da2dcb5038fd1c7e6ea12ae6478ed742484b0ad Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 16 Jun 2023 23:43:23 +0000 Subject: [PATCH 113/265] build(deps): bump serde_json from 1.0.96 to 1.0.97 (#6987) Bumps [serde_json](https://github.com/serde-rs/json) from 1.0.96 to 1.0.97. - [Release notes](https://github.com/serde-rs/json/releases) - [Commits](https://github.com/serde-rs/json/compare/v1.0.96...v1.0.97) --- updated-dependencies: - dependency-name: serde_json dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- zebra-chain/Cargo.toml | 2 +- zebra-node-services/Cargo.toml | 4 ++-- zebra-rpc/Cargo.toml | 2 +- zebra-state/Cargo.toml | 2 +- zebra-utils/Cargo.toml | 2 +- zebrad/Cargo.toml | 2 +- 7 files changed, 9 insertions(+), 9 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index bfc22aa0ab2..fdaf7eceba6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4064,9 +4064,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.96" +version = "1.0.97" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "057d394a50403bcac12672b2b18fb387ab6d289d957dab67dd201875391e52f1" +checksum = "bdf3bf93142acad5821c99197022e170842cdbc1c30482b98750c688c640842a" dependencies = [ "indexmap", "itoa", diff --git a/zebra-chain/Cargo.toml b/zebra-chain/Cargo.toml index 394be69aa21..905b548efa2 100644 --- a/zebra-chain/Cargo.toml +++ b/zebra-chain/Cargo.toml @@ -105,7 +105,7 @@ redjubjub = "0.7.0" reddsa = "0.5.0" # Production feature json-conversion -serde_json = { version = "1.0.95", optional = true } +serde_json = { version = "1.0.97", optional = true } # Experimental feature getblocktemplate-rpcs zcash_address = { version = "0.2.1", optional = true } diff --git a/zebra-node-services/Cargo.toml b/zebra-node-services/Cargo.toml index 61c671adcbb..70b469727ea 100644 --- a/zebra-node-services/Cargo.toml +++ b/zebra-node-services/Cargo.toml @@ -44,7 +44,7 @@ color-eyre = { version = "0.6.2", optional = true } jsonrpc-core = { version = "18.0.0", optional = true } reqwest = { version = "0.11.18", optional = true } serde = { version = "1.0.164", optional = true } -serde_json = { version = "1.0.95", optional = true } +serde_json = { version = "1.0.97", optional = true } [dev-dependencies] @@ -52,4 +52,4 @@ color-eyre = "0.6.2" jsonrpc-core = "18.0.0" reqwest = "0.11.18" serde = "1.0.164" -serde_json = "1.0.95" +serde_json = "1.0.97" diff --git a/zebra-rpc/Cargo.toml b/zebra-rpc/Cargo.toml index 84add04be7a..e9f6976e6ee 100644 --- a/zebra-rpc/Cargo.toml +++ b/zebra-rpc/Cargo.toml @@ -51,7 +51,7 @@ jsonrpc-http-server = "18.0.0" num_cpus = "1.15.0" # zebra-rpc needs the preserve_order feature in serde_json, which is a dependency of jsonrpc-core -serde_json = { version = "1.0.96", features = ["preserve_order"] } +serde_json = { version = "1.0.97", features = ["preserve_order"] } indexmap = { version = "1.9.3", features = ["serde"] } tokio = { version = "1.28.2", features = ["time", "rt-multi-thread", "macros", "tracing"] } diff --git a/zebra-state/Cargo.toml b/zebra-state/Cargo.toml index 41c71c58449..89ea428ab04 100644 --- a/zebra-state/Cargo.toml +++ b/zebra-state/Cargo.toml @@ -68,7 +68,7 @@ tracing = "0.1.37" # elasticsearch specific dependencies. elasticsearch = { version = "8.5.0-alpha.1", package = "elasticsearch", optional = true } -serde_json = { version = "1.0.96", package = "serde_json", optional = true } +serde_json = { version = "1.0.97", package = "serde_json", optional = true } zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.26" } diff --git a/zebra-utils/Cargo.toml b/zebra-utils/Cargo.toml index cc65e650c3b..98e3b042c0e 100644 --- a/zebra-utils/Cargo.toml +++ b/zebra-utils/Cargo.toml @@ -65,7 +65,7 @@ tinyvec = { version = "1.6.0", features = ["rustc_1_55"] } structopt = "0.3.26" hex = "0.4.3" -serde_json = "1.0.96" +serde_json = "1.0.97" tracing-error = "0.2.0" tracing-subscriber = "0.3.17" thiserror = "1.0.40" diff --git a/zebrad/Cargo.toml b/zebrad/Cargo.toml index 8623419a0f3..521f383dddf 100644 --- a/zebrad/Cargo.toml +++ b/zebrad/Cargo.toml @@ -210,7 +210,7 @@ regex = "1.8.4" semver = "1.0.17" # zebra-rpc needs the preserve_order feature, it also makes test results more stable -serde_json = { version = "1.0.96", features = ["preserve_order"] } +serde_json = { version = "1.0.97", features = ["preserve_order"] } tempfile = "3.5.0" hyper = { version = "0.14.26", features = ["http1", "http2", "server"]} From 73ce8fbef0e84472e18e24c05178025fc8b5498c Mon Sep 17 00:00:00 2001 From: Arya Date: Sun, 18 Jun 2023 20:53:00 -0400 Subject: [PATCH 114/265] fix(network): Limit number of peer connections per IP address, Ignore new peer connections from the same IP and port (#6980) * Limits num peer conns per ip * Update zebra-network/src/peer_set/set.rs * Update zebra-network/src/constants.rs * Apply suggestions from code review Co-authored-by: teor * Keep old peer connections, rather than replacing them with new connections * Adds max_conns_per_ip field Configures the max to usize::MAX for some tests. * Adds a test to check that max_conns_per_ip is enforced --------- Co-authored-by: teor --- zebra-network/src/constants.rs | 4 ++ zebra-network/src/peer_set/initialize.rs | 1 + zebra-network/src/peer_set/set.rs | 51 ++++++++++++++++++- zebra-network/src/peer_set/set/tests.rs | 24 +++++++++ zebra-network/src/peer_set/set/tests/prop.rs | 5 ++ .../src/peer_set/set/tests/vectors.rs | 49 ++++++++++++++++++ 6 files changed, 133 insertions(+), 1 deletion(-) diff --git a/zebra-network/src/constants.rs b/zebra-network/src/constants.rs index 0f9d2fac2cf..483b63ecfe2 100644 --- a/zebra-network/src/constants.rs +++ b/zebra-network/src/constants.rs @@ -67,6 +67,10 @@ pub const INBOUND_PEER_LIMIT_MULTIPLIER: usize = 5; /// See [`INBOUND_PEER_LIMIT_MULTIPLIER`] for details. pub const OUTBOUND_PEER_LIMIT_MULTIPLIER: usize = 3; +/// The maximum number of peer connections Zebra will keep for a given IP address +/// before it drops any additional peer connections with that IP. +pub const MAX_CONNS_PER_IP: usize = 3; + /// The buffer size for the peer set. /// /// This should be greater than 1 to avoid sender contention, but also reasonably diff --git a/zebra-network/src/peer_set/initialize.rs b/zebra-network/src/peer_set/initialize.rs index 2d01437afce..de27c8d37fb 100644 --- a/zebra-network/src/peer_set/initialize.rs +++ b/zebra-network/src/peer_set/initialize.rs @@ -179,6 +179,7 @@ where inv_receiver, address_metrics, MinimumPeerVersion::new(latest_chain_tip, config.network), + None, ); let peer_set = Buffer::new(BoxService::new(peer_set), constants::PEERSET_BUFFER_SIZE); diff --git a/zebra-network/src/peer_set/set.rs b/zebra-network/src/peer_set/set.rs index 0353d377f5e..0f52a067c05 100644 --- a/zebra-network/src/peer_set/set.rs +++ b/zebra-network/src/peer_set/set.rs @@ -251,6 +251,10 @@ where /// The last time we logged a message about the peer set size last_peer_log: Option, + + /// The configured maximum number of peers that can be in the + /// peer set per IP, defaults to [`crate::constants::MAX_CONNS_PER_IP`] + max_conns_per_ip: usize, } impl Drop for PeerSet @@ -270,6 +274,7 @@ where D::Error: Into, C: ChainTip, { + #[allow(clippy::too_many_arguments)] /// Construct a peerset which uses `discover` to manage peer connections. /// /// Arguments: @@ -282,6 +287,9 @@ where /// - `inv_stream`: receives inventory changes from peers, /// allowing the peer set to direct inventory requests; /// - `address_book`: when peer set is busy, it logs address book diagnostics. + /// - `minimum_peer_version`: endpoint to see the minimum peer protocol version in real time. + /// - `max_conns_per_ip`: configured maximum number of peers that can be in the + /// peer set per IP, defaults to [`crate::constants::MAX_CONNS_PER_IP`]. pub fn new( config: &Config, discover: D, @@ -290,6 +298,7 @@ where inv_stream: broadcast::Receiver, address_metrics: watch::Receiver, minimum_peer_version: MinimumPeerVersion, + max_conns_per_ip: Option, ) -> Self { Self { // New peers @@ -317,6 +326,8 @@ where // Metrics last_peer_log: None, address_metrics, + + max_conns_per_ip: max_conns_per_ip.unwrap_or(crate::constants::MAX_CONNS_PER_IP), } } @@ -476,6 +487,26 @@ where } } + /// Returns the number of peer connections Zebra already has with + /// the provided IP address + /// + /// # Performance + /// + /// This method is `O(connected peers)`, so it should not be called from a loop + /// that is already iterating through the peer set. + fn num_peers_with_ip(&self, ip: IpAddr) -> usize { + self.ready_services + .keys() + .chain(self.cancel_handles.keys()) + .filter(|addr| addr.ip() == ip) + .count() + } + + /// Returns `true` if Zebra is already connected to the IP and port in `addr`. + fn has_peer_with_addr(&self, addr: PeerSocketAddr) -> bool { + self.ready_services.contains_key(&addr) || self.cancel_handles.contains_key(&addr) + } + /// Checks for newly inserted or removed services. /// /// Puts inserted services in the unready list. @@ -496,7 +527,25 @@ where // - always do the same checks on every ready peer, and // - check for any errors that happened right after the handshake trace!(?key, "got Change::Insert from Discover"); - self.remove(&key); + + // # Security + // + // Drop the new peer if we are already connected to it. + // Preferring old connections avoids connection thrashing. + if self.has_peer_with_addr(key) { + std::mem::drop(svc); + continue; + } + + // # Security + // + // drop the new peer if there are already `MAX_CONNS_PER_IP` peers with + // the same IP address in the peer set. + if self.num_peers_with_ip(key.ip()) >= self.max_conns_per_ip { + std::mem::drop(svc); + continue; + } + self.push_unready(key, svc); } } diff --git a/zebra-network/src/peer_set/set/tests.rs b/zebra-network/src/peer_set/set/tests.rs index a24330f9833..d111792e0e9 100644 --- a/zebra-network/src/peer_set/set/tests.rs +++ b/zebra-network/src/peer_set/set/tests.rs @@ -117,6 +117,7 @@ struct PeerSetBuilder { inv_stream: Option>, address_book: Option>>, minimum_peer_version: Option>, + max_conns_per_ip: Option, } impl PeerSetBuilder<(), ()> { @@ -137,6 +138,7 @@ impl PeerSetBuilder { inv_stream: self.inv_stream, address_book: self.address_book, minimum_peer_version: self.minimum_peer_version, + max_conns_per_ip: self.max_conns_per_ip, } } @@ -146,13 +148,33 @@ impl PeerSetBuilder { minimum_peer_version: MinimumPeerVersion, ) -> PeerSetBuilder { PeerSetBuilder { + config: self.config, + discover: self.discover, + demand_signal: self.demand_signal, + handle_rx: self.handle_rx, + inv_stream: self.inv_stream, + address_book: self.address_book, minimum_peer_version: Some(minimum_peer_version), + max_conns_per_ip: self.max_conns_per_ip, + } + } + + /// Use the provided [`MinimumPeerVersion`] instance when constructing the [`PeerSet`] instance. + pub fn max_conns_per_ip(self, max_conns_per_ip: usize) -> PeerSetBuilder { + assert!( + max_conns_per_ip > 0, + "max_conns_per_ip must be greater than zero" + ); + + PeerSetBuilder { config: self.config, discover: self.discover, demand_signal: self.demand_signal, handle_rx: self.handle_rx, inv_stream: self.inv_stream, address_book: self.address_book, + minimum_peer_version: self.minimum_peer_version, + max_conns_per_ip: Some(max_conns_per_ip), } } } @@ -175,6 +197,7 @@ where let minimum_peer_version = self .minimum_peer_version .expect("`minimum_peer_version` must be set"); + let max_conns_per_ip = self.max_conns_per_ip; let demand_signal = self .demand_signal @@ -196,6 +219,7 @@ where inv_stream, address_metrics, minimum_peer_version, + max_conns_per_ip, ); (peer_set, guard) diff --git a/zebra-network/src/peer_set/set/tests/prop.rs b/zebra-network/src/peer_set/set/tests/prop.rs index f8388880bae..b7301fea214 100644 --- a/zebra-network/src/peer_set/set/tests/prop.rs +++ b/zebra-network/src/peer_set/set/tests/prop.rs @@ -42,6 +42,7 @@ proptest! { let (mut peer_set, _peer_set_guard) = PeerSetBuilder::new() .with_discover(discovered_peers) .with_minimum_peer_version(minimum_peer_version) + .max_conns_per_ip(usize::MAX) .build(); check_if_only_up_to_date_peers_are_live( @@ -72,6 +73,7 @@ proptest! { let (mut peer_set, _peer_set_guard) = PeerSetBuilder::new() .with_discover(discovered_peers) .with_minimum_peer_version(minimum_peer_version.clone()) + .max_conns_per_ip(usize::MAX) .build(); check_if_only_up_to_date_peers_are_live( @@ -122,6 +124,7 @@ proptest! { let (mut peer_set, _peer_set_guard) = PeerSetBuilder::new() .with_discover(discovered_peers) .with_minimum_peer_version(minimum_peer_version.clone()) + .max_conns_per_ip(usize::MAX) .build(); // Get the total number of active peers @@ -197,6 +200,7 @@ proptest! { let (mut peer_set, _peer_set_guard) = PeerSetBuilder::new() .with_discover(discovered_peers) .with_minimum_peer_version(minimum_peer_version.clone()) + .max_conns_per_ip(usize::MAX) .build(); // Remove peers, test broadcast until there is only 1 peer left in the peerset @@ -267,6 +271,7 @@ proptest! { let (mut peer_set, _peer_set_guard) = PeerSetBuilder::new() .with_discover(discovered_peers) .with_minimum_peer_version(minimum_peer_version.clone()) + .max_conns_per_ip(usize::MAX) .build(); // Remove peers diff --git a/zebra-network/src/peer_set/set/tests/vectors.rs b/zebra-network/src/peer_set/set/tests/vectors.rs index 89ff294a827..44bf9c1aaff 100644 --- a/zebra-network/src/peer_set/set/tests/vectors.rs +++ b/zebra-network/src/peer_set/set/tests/vectors.rs @@ -174,6 +174,55 @@ fn peer_set_ready_multiple_connections() { }); } +#[test] +fn peer_set_rejects_connections_past_per_ip_limit() { + const NUM_PEER_VERSIONS: usize = crate::constants::MAX_CONNS_PER_IP + 1; + + // Use three peers with the same version + let peer_version = Version::min_specified_for_upgrade(Network::Mainnet, NetworkUpgrade::Nu5); + let peer_versions = PeerVersions { + peer_versions: [peer_version; NUM_PEER_VERSIONS].into_iter().collect(), + }; + + // Start the runtime + let (runtime, _init_guard) = zebra_test::init_async(); + let _guard = runtime.enter(); + + // Pause the runtime's timer so that it advances automatically. + // + // CORRECTNESS: This test does not depend on external resources that could really timeout, like + // real network connections. + tokio::time::pause(); + + // Get peers and client handles of them + let (discovered_peers, handles) = peer_versions.mock_peer_discovery(); + let (minimum_peer_version, _best_tip_height) = + MinimumPeerVersion::with_mock_chain_tip(Network::Mainnet); + + // Make sure we have the right number of peers + assert_eq!(handles.len(), NUM_PEER_VERSIONS); + + runtime.block_on(async move { + // Build a peerset + let (mut peer_set, _peer_set_guard) = PeerSetBuilder::new() + .with_discover(discovered_peers) + .with_minimum_peer_version(minimum_peer_version.clone()) + .build(); + + // Get peerset ready + let peer_ready = peer_set + .ready() + .await + .expect("peer set service is always ready"); + + // Check we have the right amount of ready services + assert_eq!( + peer_ready.ready_services.len(), + crate::constants::MAX_CONNS_PER_IP + ); + }); +} + /// Check that a peer set with an empty inventory registry routes requests to a random ready peer. #[test] fn peer_set_route_inv_empty_registry() { From 859353b4175fe70ddb74c00334e5e050230046e5 Mon Sep 17 00:00:00 2001 From: teor Date: Mon, 19 Jun 2023 13:39:59 +1000 Subject: [PATCH 115/265] fix(panic): Stop panicking when handling inbound connection handshakes (#6984) * Remove a redundant outbound connector timeout * Fix panics in inbound connection handshaker * Refactor to simplify FuturesUnordered types --- zebra-network/src/peer/connector.rs | 9 ++- zebra-network/src/peer/handshake.rs | 4 ++ zebra-network/src/peer_set/initialize.rs | 76 +++++++++++++++++------- 3 files changed, 65 insertions(+), 24 deletions(-) diff --git a/zebra-network/src/peer/connector.rs b/zebra-network/src/peer/connector.rs index e7047ea7128..67947f9e448 100644 --- a/zebra-network/src/peer/connector.rs +++ b/zebra-network/src/peer/connector.rs @@ -7,14 +7,13 @@ use std::{ }; use futures::prelude::*; -use tokio::{net::TcpStream, time::timeout}; +use tokio::net::TcpStream; use tower::{Service, ServiceExt}; use tracing_futures::Instrument; use zebra_chain::chain_tip::{ChainTip, NoChainTip}; use crate::{ - constants::HANDSHAKE_TIMEOUT, peer::{Client, ConnectedAddr, Handshake, HandshakeRequest}, peer_set::ConnectionTracker, BoxError, PeerSocketAddr, Request, Response, @@ -93,8 +92,12 @@ where let connected_addr = ConnectedAddr::new_outbound_direct(addr); let connector_span = info_span!("connector", peer = ?connected_addr); + // # Security + // + // `zebra_network::init()` implements a connection timeout on this future. + // Any code outside this future does not have a timeout. async move { - let tcp_stream = timeout(HANDSHAKE_TIMEOUT, TcpStream::connect(*addr)).await??; + let tcp_stream = TcpStream::connect(*addr).await?; let client = hs .oneshot(HandshakeRequest:: { data_stream: tcp_stream, diff --git a/zebra-network/src/peer/handshake.rs b/zebra-network/src/peer/handshake.rs index f6660ac0597..01cfe98e859 100644 --- a/zebra-network/src/peer/handshake.rs +++ b/zebra-network/src/peer/handshake.rs @@ -876,6 +876,10 @@ where let relay = self.relay; let minimum_peer_version = self.minimum_peer_version.clone(); + // # Security + // + // `zebra_network::init()` implements a connection timeout on this future. + // Any code outside this future does not have a timeout. let fut = async move { debug!( addr = ?connected_addr, diff --git a/zebra-network/src/peer_set/initialize.rs b/zebra-network/src/peer_set/initialize.rs index de27c8d37fb..49823e9514a 100644 --- a/zebra-network/src/peer_set/initialize.rs +++ b/zebra-network/src/peer_set/initialize.rs @@ -7,6 +7,7 @@ use std::{ collections::{BTreeMap, HashSet}, convert::Infallible, net::SocketAddr, + pin::Pin, sync::Arc, time::Duration, }; @@ -15,13 +16,14 @@ use futures::{ future::{self, FutureExt}, sink::SinkExt, stream::{FuturesUnordered, StreamExt}, - TryFutureExt, + Future, TryFutureExt, }; use rand::seq::SliceRandom; use tokio::{ net::{TcpListener, TcpStream}, sync::broadcast, - time::{sleep, Instant}, + task::JoinError, + time::{error::Elapsed, sleep, Instant}, }; use tokio_stream::wrappers::IntervalStream; use tower::{ @@ -565,7 +567,8 @@ where "Inbound Connections", ); - let mut handshakes = FuturesUnordered::new(); + let mut handshakes: FuturesUnordered + Send>>> = + FuturesUnordered::new(); // Keeping an unresolved future in the pool means the stream never terminates. handshakes.push(future::pending().boxed()); @@ -575,8 +578,7 @@ where biased; next_handshake_res = handshakes.next() => match next_handshake_res { // The task has already sent the peer change to the peer set. - Some(Ok(_)) => continue, - Some(Err(task_panic)) => panic!("panic in inbound handshake task: {task_panic:?}"), + Some(()) => continue, None => unreachable!("handshakes never terminates, because it contains a future that never resolves"), }, @@ -611,19 +613,37 @@ where connection_tracker, peerset_tx.clone(), ) - .await?; + .await? + .map(move |res| match res { + Ok(()) => (), + Err(e @ JoinError { .. }) => { + if e.is_panic() { + panic!("panic during inbound handshaking: {e:?}"); + } else { + info!( + "task error during inbound handshaking: {e:?}, is Zebra shutting down?" + ) + } + } + }); + + let handshake_timeout = tokio::time::timeout( + // Only trigger this timeout if the inner handshake timeout fails + HANDSHAKE_TIMEOUT + Duration::from_millis(500), + handshake_task, + ) + .map(move |res| match res { + Ok(()) => (), + Err(_e @ Elapsed { .. }) => { + info!( + "timeout in spawned accept_inbound_handshake() task: \ + inner task should have timeout out already" + ); + } + }); // This timeout helps locate inbound peer connection hangs, see #6763 for details. - handshakes.push(Box::pin( - tokio::time::timeout( - // Only trigger this timeout if the inner handshake timeout fails - HANDSHAKE_TIMEOUT + Duration::from_millis(500), - handshake_task, - ) - .inspect_err(|_elapsed| { - info!("timeout in spawned accept_inbound_handshake() task") - }), - )); + handshakes.push(Box::pin(handshake_timeout)); // Rate-limit inbound connection handshakes. // But sleep longer after a successful connection, @@ -798,7 +818,9 @@ where let candidates = Arc::new(futures::lock::Mutex::new(candidates)); // This contains both crawl and handshake tasks. - let mut handshakes = FuturesUnordered::new(); + let mut handshakes: FuturesUnordered< + Pin> + Send>>, + > = FuturesUnordered::new(); // returns None when empty. // Keeping an unresolved future in the pool means the stream never terminates. handshakes.push(future::pending().boxed()); @@ -905,8 +927,14 @@ where }) .map(move |res| match res { Ok(crawler_action) => crawler_action, - Err(e) => { - panic!("panic during handshaking: {e:?}"); + Err(e @ JoinError {..}) => { + if e.is_panic() { + panic!("panic during outbound handshake: {e:?}"); + } else { + info!("task error during outbound handshake: {e:?}, is Zebra shutting down?") + } + // Just fake it + Ok(HandshakeFinished) } }) .in_current_span(); @@ -929,8 +957,14 @@ where }) .map(move |res| match res { Ok(crawler_action) => crawler_action, - Err(e) => { - panic!("panic during TimerCrawl: {tick:?} {e:?}"); + Err(e @ JoinError {..}) => { + if e.is_panic() { + panic!("panic during outbound TimerCrawl: {tick:?} {e:?}"); + } else { + info!("task error during outbound TimerCrawl: {e:?}, is Zebra shutting down?") + } + // Just fake it + Ok(TimerCrawlFinished) } }) .in_current_span(); From 40d697a66c3a732502a99994d0d2692c3ebf3e08 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 19 Jun 2023 04:32:59 +0000 Subject: [PATCH 116/265] build(deps): bump tj-actions/changed-files from 36.2.1 to 36.3.0 (#6986) Bumps [tj-actions/changed-files](https://github.com/tj-actions/changed-files) from 36.2.1 to 36.3.0. - [Release notes](https://github.com/tj-actions/changed-files/releases) - [Changelog](https://github.com/tj-actions/changed-files/blob/main/HISTORY.md) - [Commits](https://github.com/tj-actions/changed-files/compare/v36.2.1...v36.3.0) --- updated-dependencies: - dependency-name: tj-actions/changed-files dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/lint.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 71e61543557..f0fb0777781 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -37,7 +37,7 @@ jobs: - name: Rust files id: changed-files-rust - uses: tj-actions/changed-files@v36.2.1 + uses: tj-actions/changed-files@v36.3.0 with: files: | **/*.rs @@ -49,7 +49,7 @@ jobs: - name: Workflow files id: changed-files-workflows - uses: tj-actions/changed-files@v36.2.1 + uses: tj-actions/changed-files@v36.3.0 with: files: | .github/workflows/*.yml From ad7af3e2d86d7206f0a463ca51b7f5a954fce0ad Mon Sep 17 00:00:00 2001 From: teor Date: Tue, 20 Jun 2023 04:17:39 +1000 Subject: [PATCH 117/265] fix(net): Clean up licensing, closure `move`, log typos, tracing spans (#6995) * Remove a redundant outbound connector timeout * Fix panics in inbound connection handshaker * Refactor to simplify FuturesUnordered types * Make licensing notes consistent * Delete redundant `move` in closures * Fix a log typo * Add some missing tracing spans --- zebra-network/Cargo.toml | 6 +- zebra-network/src/config.rs | 57 +++++++++++-------- zebra-network/src/peer_cache_updater.rs | 1 + zebra-network/src/peer_set/initialize.rs | 32 +++++------ zebra-network/src/peer_set/set.rs | 1 + zebra-network/src/peer_set/unready_service.rs | 9 ++- 6 files changed, 61 insertions(+), 45 deletions(-) diff --git a/zebra-network/Cargo.toml b/zebra-network/Cargo.toml index ae5639881d1..cd1029e6325 100644 --- a/zebra-network/Cargo.toml +++ b/zebra-network/Cargo.toml @@ -7,7 +7,11 @@ description = "Networking code for Zebra" # # This licence is deliberately different to the rest of Zebra. # -# zebra-network/src/peer_set/set.rs was modified from a 2019 version of: +# Some code in: +# zebra-network/src/peer_set/set.rs +# zebra-network/src/peer_set/unready_service.rs +# zebra-network/src/peer_set/initialize.rs +# was modified from a 2019 version of: # https://github.com/tower-rs/tower/tree/master/tower/src/balance/p2c/service.rs license = "MIT" repository = "https://github.com/ZcashFoundation/zebra" diff --git a/zebra-network/src/config.rs b/zebra-network/src/config.rs index 067a50ba09c..23e812f8515 100644 --- a/zebra-network/src/config.rs +++ b/zebra-network/src/config.rs @@ -13,6 +13,7 @@ use indexmap::IndexSet; use serde::{de, Deserialize, Deserializer}; use tempfile::NamedTempFile; use tokio::{fs, io::AsyncWriteExt}; +use tracing::Span; use zebra_chain::parameters::Network; @@ -493,12 +494,15 @@ impl Config { // Create the temporary file. // Do blocking filesystem operations on a dedicated thread. + let span = Span::current(); let tmp_peer_cache_file = tokio::task::spawn_blocking(move || { - // Put the temporary file in the same directory as the permanent file, - // so atomic filesystem operations are possible. - tempfile::Builder::new() - .prefix(&tmp_peer_cache_prefix) - .tempfile_in(peer_cache_dir) + span.in_scope(move || { + // Put the temporary file in the same directory as the permanent file, + // so atomic filesystem operations are possible. + tempfile::Builder::new() + .prefix(&tmp_peer_cache_prefix) + .tempfile_in(peer_cache_dir) + }) }) .await .expect("unexpected panic creating temporary peer cache file")?; @@ -514,31 +518,34 @@ impl Config { // Atomically replace the current cache with the temporary cache. // Do blocking filesystem operations on a dedicated thread. + let span = Span::current(); tokio::task::spawn_blocking(move || { - let result = tmp_peer_cache_file.persist(&peer_cache_file); + span.in_scope(move || { + let result = tmp_peer_cache_file.persist(&peer_cache_file); - // Drops the temp file if needed - match result { - Ok(_temp_file) => { - info!( - cached_ip_count = ?peer_list.len(), - ?peer_cache_file, - "updated cached peer IP addresses" - ); - - for ip in &peer_list { - metrics::counter!( - "zcash.net.peers.cache", - 1, - "cache" => peer_cache_file.display().to_string(), - "remote_ip" => ip.to_string() + // Drops the temp file if needed + match result { + Ok(_temp_file) => { + info!( + cached_ip_count = ?peer_list.len(), + ?peer_cache_file, + "updated cached peer IP addresses" ); - } - Ok(()) + for ip in &peer_list { + metrics::counter!( + "zcash.net.peers.cache", + 1, + "cache" => peer_cache_file.display().to_string(), + "remote_ip" => ip.to_string() + ); + } + + Ok(()) + } + Err(error) => Err(error.error), } - Err(error) => Err(error.error), - } + }) }) .await .expect("unexpected panic making temporary peer cache file permanent") diff --git a/zebra-network/src/peer_cache_updater.rs b/zebra-network/src/peer_cache_updater.rs index 3d23f4d27a5..64c160e815f 100644 --- a/zebra-network/src/peer_cache_updater.rs +++ b/zebra-network/src/peer_cache_updater.rs @@ -15,6 +15,7 @@ use crate::{ }; /// An ongoing task that regularly caches the current `address_book` to disk, based on `config`. +#[instrument(skip(config, address_book))] pub async fn peer_cache_updater( config: Config, address_book: Arc>, diff --git a/zebra-network/src/peer_set/initialize.rs b/zebra-network/src/peer_set/initialize.rs index 49823e9514a..14f2ba5c005 100644 --- a/zebra-network/src/peer_set/initialize.rs +++ b/zebra-network/src/peer_set/initialize.rs @@ -1,7 +1,8 @@ //! A peer set whose size is dynamically determined by resource constraints. - -// Portions of this submodule were adapted from tower-balance, -// which is (c) 2019 Tower Contributors (MIT licensed). +//! +//! The [`PeerSet`] implementation is adapted from the one in [tower::Balance][tower-balance]. +//! +//! [tower-balance]: https://github.com/tower-rs/tower/tree/master/tower/src/balance use std::{ collections::{BTreeMap, HashSet}, @@ -614,7 +615,7 @@ where peerset_tx.clone(), ) .await? - .map(move |res| match res { + .map(|res| match res { Ok(()) => (), Err(e @ JoinError { .. }) => { if e.is_panic() { @@ -632,12 +633,12 @@ where HANDSHAKE_TIMEOUT + Duration::from_millis(500), handshake_task, ) - .map(move |res| match res { + .map(|res| match res { Ok(()) => (), Err(_e @ Elapsed { .. }) => { info!( "timeout in spawned accept_inbound_handshake() task: \ - inner task should have timeout out already" + inner task should have timed out already" ); } }); @@ -677,6 +678,9 @@ where /// /// Uses `handshaker` to perform a Zcash network protocol handshake, and sends /// the [`peer::Client`] result over `peerset_tx`. +// +// TODO: when we support inbound proxies, distinguish between proxied listeners and +// direct listeners in the span generated by this instrument macro #[instrument(skip(handshaker, tcp_stream, connection_tracker, peerset_tx))] async fn accept_inbound_handshake( addr: PeerSocketAddr, @@ -701,8 +705,6 @@ where // // This await is okay because the handshaker's `poll_ready` method always returns Ready. handshaker.ready().await?; - // TODO: distinguish between proxied listeners and direct listeners - let handshaker_span = info_span!("listen_handshaker", peer = ?connected_addr); // Construct a handshake future but do not drive it yet.... let handshake = handshaker.call(HandshakeRequest { @@ -724,7 +726,7 @@ where debug!(?handshake_result, "error handshaking with inbound peer"); } } - .instrument(handshaker_span), + .in_current_span(), ); Ok(handshake_task) @@ -924,8 +926,8 @@ where Ok(DemandCrawlFinished) } - }) - .map(move |res| match res { + }.in_current_span()) + .map(|res| match res { Ok(crawler_action) => crawler_action, Err(e @ JoinError {..}) => { if e.is_panic() { @@ -936,8 +938,7 @@ where // Just fake it Ok(HandshakeFinished) } - }) - .in_current_span(); + }); handshakes.push(Box::pin(handshake_or_crawl_handle)); } @@ -954,7 +955,7 @@ where crawl(candidates, demand_tx).await?; Ok(TimerCrawlFinished) - }) + }.in_current_span()) .map(move |res| match res { Ok(crawler_action) => crawler_action, Err(e @ JoinError {..}) => { @@ -966,8 +967,7 @@ where // Just fake it Ok(TimerCrawlFinished) } - }) - .in_current_span(); + }); handshakes.push(Box::pin(crawl_handle)); } diff --git a/zebra-network/src/peer_set/set.rs b/zebra-network/src/peer_set/set.rs index 0f52a067c05..72fdbe79955 100644 --- a/zebra-network/src/peer_set/set.rs +++ b/zebra-network/src/peer_set/set.rs @@ -3,6 +3,7 @@ //! # Implementation //! //! The [`PeerSet`] implementation is adapted from the one in [tower::Balance][tower-balance]. +//! //! As described in Tower's documentation, it: //! //! > Distributes requests across inner services using the [Power of Two Choices][p2c]. diff --git a/zebra-network/src/peer_set/unready_service.rs b/zebra-network/src/peer_set/unready_service.rs index 108a9e8307f..d49587cde1d 100644 --- a/zebra-network/src/peer_set/unready_service.rs +++ b/zebra-network/src/peer_set/unready_service.rs @@ -1,6 +1,9 @@ -/// Services that are busy or newly created. -/// -/// Adapted from tower-balance. +//! Services that are busy or newly created. +//! +//! The [`UnreadyService`] implementation is adapted from the one in [tower::Balance][tower-balance]. +//! +//! [tower-balance]: https://github.com/tower-rs/tower/tree/master/tower/src/balance + use std::{ future::Future, marker::PhantomData, From 231f5be4038764ba887c8324ecd01918f48656f4 Mon Sep 17 00:00:00 2001 From: teor Date: Tue, 20 Jun 2023 04:17:59 +1000 Subject: [PATCH 118/265] fix(net): Reduce maximum number of connections per IP (#6993) * Reduce maximum number of connections per IP * Fix tests that require multiple connections per IP --- zebra-network/src/constants.rs | 2 +- zebra-network/src/peer_set/set/tests/vectors.rs | 7 ++++++- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/zebra-network/src/constants.rs b/zebra-network/src/constants.rs index 483b63ecfe2..289ccd79399 100644 --- a/zebra-network/src/constants.rs +++ b/zebra-network/src/constants.rs @@ -69,7 +69,7 @@ pub const OUTBOUND_PEER_LIMIT_MULTIPLIER: usize = 3; /// The maximum number of peer connections Zebra will keep for a given IP address /// before it drops any additional peer connections with that IP. -pub const MAX_CONNS_PER_IP: usize = 3; +pub const MAX_CONNS_PER_IP: usize = 1; /// The buffer size for the peer set. /// diff --git a/zebra-network/src/peer_set/set/tests/vectors.rs b/zebra-network/src/peer_set/set/tests/vectors.rs index 44bf9c1aaff..c3534aed182 100644 --- a/zebra-network/src/peer_set/set/tests/vectors.rs +++ b/zebra-network/src/peer_set/set/tests/vectors.rs @@ -1,6 +1,6 @@ //! Fixed test vectors for the peer set. -use std::{iter, time::Duration}; +use std::{cmp::max, iter, time::Duration}; use tokio::time::timeout; use tower::{Service, ServiceExt}; @@ -12,6 +12,7 @@ use zebra_chain::{ use super::{PeerSetBuilder, PeerVersions}; use crate::{ + constants::MAX_CONNS_PER_IP, peer::{ClientRequest, MinimumPeerVersion}, peer_set::inventory_registry::InventoryStatus, protocol::external::{types::Version, InventoryHash}, @@ -144,6 +145,7 @@ fn peer_set_ready_multiple_connections() { let (mut peer_set, _peer_set_guard) = PeerSetBuilder::new() .with_discover(discovered_peers) .with_minimum_peer_version(minimum_peer_version.clone()) + .max_conns_per_ip(max(3, MAX_CONNS_PER_IP)) .build(); // Get peerset ready @@ -257,6 +259,7 @@ fn peer_set_route_inv_empty_registry() { let (mut peer_set, _peer_set_guard) = PeerSetBuilder::new() .with_discover(discovered_peers) .with_minimum_peer_version(minimum_peer_version.clone()) + .max_conns_per_ip(max(2, MAX_CONNS_PER_IP)) .build(); // Get peerset ready @@ -339,6 +342,7 @@ fn peer_set_route_inv_advertised_registry_order(advertised_first: bool) { let (mut peer_set, mut peer_set_guard) = PeerSetBuilder::new() .with_discover(discovered_peers) .with_minimum_peer_version(minimum_peer_version.clone()) + .max_conns_per_ip(max(2, MAX_CONNS_PER_IP)) .build(); // Advertise some inventory @@ -446,6 +450,7 @@ fn peer_set_route_inv_missing_registry_order(missing_first: bool) { let (mut peer_set, mut peer_set_guard) = PeerSetBuilder::new() .with_discover(discovered_peers) .with_minimum_peer_version(minimum_peer_version.clone()) + .max_conns_per_ip(max(2, MAX_CONNS_PER_IP)) .build(); // Mark some inventory as missing From abcabd1931a73bc03d554d42335603987a8803bf Mon Sep 17 00:00:00 2001 From: Marek Date: Tue, 20 Jun 2023 00:48:59 +0200 Subject: [PATCH 119/265] Use `OrderedUtxo` in `CheckpointVerifiedBlock` (#6971) --- zebra-state/src/arbitrary.rs | 3 ++- zebra-state/src/request.rs | 6 +++--- zebra-state/src/service.rs | 3 ++- .../service/finalized_state/zebra_db/block.rs | 18 ++++++++++++++---- zebra-state/src/service/pending_utxos.rs | 8 -------- zebra-state/src/service/queued_blocks.rs | 5 +++-- zebra-state/src/service/tests.rs | 2 +- 7 files changed, 25 insertions(+), 20 deletions(-) diff --git a/zebra-state/src/arbitrary.rs b/zebra-state/src/arbitrary.rs index f151e2c4128..2a8eaa5eae8 100644 --- a/zebra-state/src/arbitrary.rs +++ b/zebra-state/src/arbitrary.rs @@ -183,7 +183,8 @@ impl CheckpointVerifiedBlock { height: block::Height, ) -> Self { let transaction_hashes: Arc<[_]> = block.transactions.iter().map(|tx| tx.hash()).collect(); - let new_outputs = transparent::new_outputs_with_height(&block, height, &transaction_hashes); + let new_outputs = + transparent::new_ordered_outputs_with_height(&block, height, &transaction_hashes); Self { block, diff --git a/zebra-state/src/request.rs b/zebra-state/src/request.rs index 6ffa7d3e04d..dbff91022e6 100644 --- a/zebra-state/src/request.rs +++ b/zebra-state/src/request.rs @@ -235,7 +235,7 @@ pub struct CheckpointVerifiedBlock { /// earlier transaction. /// /// This field can also contain unrelated outputs, which are ignored. - pub(crate) new_outputs: HashMap, + pub(crate) new_outputs: HashMap, /// A precomputed list of the hashes of the transactions in this block, /// in the same order as `block.transactions`. pub transaction_hashes: Arc<[transaction::Hash]>, @@ -369,7 +369,7 @@ impl CheckpointVerifiedBlock { .coinbase_height() .expect("coinbase height was already checked"); let transaction_hashes: Arc<[_]> = block.transactions.iter().map(|tx| tx.hash()).collect(); - let new_outputs = transparent::new_outputs(&block, &transaction_hashes); + let new_outputs = transparent::new_ordered_outputs(&block, &transaction_hashes); Self { block, @@ -405,7 +405,7 @@ impl From for CheckpointVerifiedBlock { block, hash, height, - new_outputs: utxos_from_ordered_utxos(new_outputs), + new_outputs, transaction_hashes, } } diff --git a/zebra-state/src/service.rs b/zebra-state/src/service.rs index 75d1f0ac1ca..f88cd281128 100644 --- a/zebra-state/src/service.rs +++ b/zebra-state/src/service.rs @@ -976,7 +976,8 @@ impl Service for StateService { // even though it is redundant for most finalized blocks. // (Finalized blocks are verified using block hash checkpoints // and transaction merkle tree block header commitments.) - self.pending_utxos.check_against(&finalized.new_outputs); + self.pending_utxos + .check_against_ordered(&finalized.new_outputs); // # Performance // diff --git a/zebra-state/src/service/finalized_state/zebra_db/block.rs b/zebra-state/src/service/finalized_state/zebra_db/block.rs index 8edc89e0258..61e19100a0d 100644 --- a/zebra-state/src/service/finalized_state/zebra_db/block.rs +++ b/zebra-state/src/service/finalized_state/zebra_db/block.rs @@ -305,10 +305,10 @@ impl ZebraDb { let new_outputs_by_out_loc: BTreeMap = finalized .new_outputs .iter() - .map(|(outpoint, utxo)| { + .map(|(outpoint, ordered_utxo)| { ( lookup_out_loc(finalized.height, outpoint, &tx_hash_indexes), - utxo.clone(), + ordered_utxo.utxo.clone(), ) }) .collect(); @@ -331,7 +331,12 @@ impl ZebraDb { }), self.utxo(&outpoint) .map(|ordered_utxo| ordered_utxo.utxo) - .or_else(|| finalized.new_outputs.get(&outpoint).cloned()) + .or_else(|| { + finalized + .new_outputs + .get(&outpoint) + .map(|ordered_utxo| ordered_utxo.utxo.clone()) + }) .expect("already checked UTXO was in state or block"), ) }) @@ -350,7 +355,12 @@ impl ZebraDb { // Get the transparent addresses with changed balances/UTXOs let changed_addresses: HashSet = spent_utxos_by_out_loc .values() - .chain(finalized.new_outputs.values()) + .chain( + finalized + .new_outputs + .values() + .map(|ordered_utxo| &ordered_utxo.utxo), + ) .filter_map(|utxo| utxo.output.address(network)) .unique() .collect(); diff --git a/zebra-state/src/service/pending_utxos.rs b/zebra-state/src/service/pending_utxos.rs index 953aba4a97c..c60719825f9 100644 --- a/zebra-state/src/service/pending_utxos.rs +++ b/zebra-state/src/service/pending_utxos.rs @@ -60,14 +60,6 @@ impl PendingUtxos { } } - /// Check the list of pending UTXO requests against the supplied [`transparent::Utxo`] index. - #[inline] - pub fn check_against(&mut self, utxos: &HashMap) { - for (outpoint, utxo) in utxos.iter() { - self.respond(outpoint, utxo.clone()) - } - } - /// Scan the set of waiting utxo requests for channels where all receivers /// have been dropped and remove the corresponding sender. pub fn prune(&mut self) { diff --git a/zebra-state/src/service/queued_blocks.rs b/zebra-state/src/service/queued_blocks.rs index 7a009605c53..41e938122bf 100644 --- a/zebra-state/src/service/queued_blocks.rs +++ b/zebra-state/src/service/queued_blocks.rs @@ -279,8 +279,9 @@ impl SentHashes { let outpoints = block .new_outputs .iter() - .map(|(outpoint, utxo)| { - self.known_utxos.insert(*outpoint, utxo.clone()); + .map(|(outpoint, ordered_utxo)| { + self.known_utxos + .insert(*outpoint, ordered_utxo.utxo.clone()); outpoint }) .cloned() diff --git a/zebra-state/src/service/tests.rs b/zebra-state/src/service/tests.rs index 5adfaabdf39..b7e55b9a9d9 100644 --- a/zebra-state/src/service/tests.rs +++ b/zebra-state/src/service/tests.rs @@ -419,7 +419,7 @@ proptest! { // the genesis block has a zero-valued transparent output, // which is not included in the UTXO set if block.height > block::Height(0) { - let utxos = &block.new_outputs; + let utxos = &block.new_outputs.iter().map(|(k, ordered_utxo)| (*k, ordered_utxo.utxo.clone())).collect(); let block_value_pool = &block.block.chain_value_pool_change(utxos)?; expected_finalized_value_pool += *block_value_pool; } From fba9440af0d83d5d420b6df00bc544e805040b21 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 20 Jun 2023 00:33:51 +0000 Subject: [PATCH 120/265] build(deps): bump w9jds/firebase-action from 11.30.1 to 12.4.0 (#7005) Bumps [w9jds/firebase-action](https://github.com/w9jds/firebase-action) from 11.30.1 to 12.4.0. - [Release notes](https://github.com/w9jds/firebase-action/releases) - [Commits](https://github.com/w9jds/firebase-action/compare/v11.30.1...v12.4.0) --- updated-dependencies: - dependency-name: w9jds/firebase-action dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/docs.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index 9c8d835f822..ba292810d6e 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -73,7 +73,7 @@ jobs: mdbook build book/ - name: Deploy Zebra book to firebase - uses: w9jds/firebase-action@v11.30.1 + uses: w9jds/firebase-action@v12.4.0 with: args: deploy env: @@ -89,7 +89,7 @@ jobs: RUSTDOCFLAGS: '--html-in-header katex-header.html' - name: Deploy external docs to firebase - uses: w9jds/firebase-action@v11.30.1 + uses: w9jds/firebase-action@v12.4.0 with: args: deploy env: @@ -103,7 +103,7 @@ jobs: RUSTDOCFLAGS: '--html-in-header katex-header.html' - name: Deploy internal docs to firebase - uses: w9jds/firebase-action@v11.30.1 + uses: w9jds/firebase-action@v12.4.0 with: args: deploy env: From c3f0f53256e31e7ad031f32bbbfc4a7ef50ba57e Mon Sep 17 00:00:00 2001 From: teor Date: Tue, 20 Jun 2023 12:42:06 +1000 Subject: [PATCH 121/265] refactor(app): De-duplicate and fix version handling code (#6996) * De-duplicate app_version and user_agent code, rename to build_version * Make RPC testnet flag forward-compatible with additional testnets * Fix RPC tests with new argument * Use "modified" rather than "dirty" for uncommitted changes in build metadata * Split the vergen version into its own function --- zebra-chain/src/parameters/network.rs | 7 +- zebra-rpc/src/methods.rs | 48 ++++++------ .../types/get_mining_info.rs | 2 +- zebra-rpc/src/methods/tests/prop.rs | 14 ++++ zebra-rpc/src/methods/tests/snapshot.rs | 1 + zebra-rpc/src/methods/tests/vectors.rs | 10 +++ zebra-rpc/src/server.rs | 41 +++++++--- zebra-rpc/src/server/tests/vectors.rs | 6 ++ zebrad/Cargo.toml | 2 +- zebrad/src/application.rs | 74 ++++++++++--------- zebrad/src/commands/start.rs | 5 +- zebrad/src/components/tracing/component.rs | 4 +- 12 files changed, 138 insertions(+), 76 deletions(-) diff --git a/zebra-chain/src/parameters/network.rs b/zebra-chain/src/parameters/network.rs index 6ec34384e10..05f0a587738 100644 --- a/zebra-chain/src/parameters/network.rs +++ b/zebra-chain/src/parameters/network.rs @@ -59,7 +59,7 @@ pub enum Network { #[default] Mainnet, - /// The testnet. + /// The oldest public test network. Testnet, } @@ -124,6 +124,11 @@ impl Network { pub fn lowercase_name(&self) -> String { self.to_string().to_ascii_lowercase() } + + /// Returns `true` if this network is a testing network. + pub fn is_a_test_network(&self) -> bool { + *self != Network::Mainnet + } } impl FromStr for Network { diff --git a/zebra-rpc/src/methods.rs b/zebra-rpc/src/methods.rs index 56e8cae69b8..263dbddf373 100644 --- a/zebra-rpc/src/methods.rs +++ b/zebra-rpc/src/methods.rs @@ -251,8 +251,11 @@ where { // Configuration // - /// Zebra's application version. - app_version: String, + /// Zebra's application version, with build metadata. + build_version: String, + + /// Zebra's RPC user agent. + user_agent: String, /// The configured network for this RPC service. network: Network, @@ -300,8 +303,13 @@ where Tip: ChainTip + Clone + Send + Sync + 'static, { /// Create a new instance of the RPC handler. - pub fn new( - app_version: Version, + // + // TODO: + // - put some of the configs or services in their own struct? + #[allow(clippy::too_many_arguments)] + pub fn new( + build_version: VersionString, + user_agent: UserAgentString, network: Network, debug_force_finished_sync: bool, debug_like_zcashd: bool, @@ -310,21 +318,24 @@ where latest_chain_tip: Tip, ) -> (Self, JoinHandle<()>) where - Version: ToString, + VersionString: ToString + Clone + Send + 'static, + UserAgentString: ToString + Clone + Send + 'static, >::Future: Send, >::Future: Send, { let (runner, queue_sender) = Queue::start(); - let mut app_version = app_version.to_string(); + let mut build_version = build_version.to_string(); + let user_agent = user_agent.to_string(); // Match zcashd's version format, if the version string has anything in it - if !app_version.is_empty() && !app_version.starts_with('v') { - app_version.insert(0, 'v'); + if !build_version.is_empty() && !build_version.starts_with('v') { + build_version.insert(0, 'v'); } let rpc_impl = RpcImpl { - app_version, + build_version, + user_agent, network, debug_force_finished_sync, debug_like_zcashd, @@ -364,25 +375,10 @@ where State::Future: Send, Tip: ChainTip + Clone + Send + Sync + 'static, { - #[allow(clippy::unwrap_in_result)] fn get_info(&self) -> Result { - // Build a [BIP 14] valid user agent with release info. - // - // [BIP 14]: https://github.com/bitcoin/bips/blob/master/bip-0014.mediawiki - let release_version = self - .app_version - // remove everything after the `+` character if any - .split('+') - .next() - .expect("always at least 1 slice"); - // Remove the previously added `v` character at the start since it's not a part of the user agent. - let release_version = release_version.strip_prefix('v').unwrap_or(release_version); - - let user_agent = format!("/Zebra:{release_version}/"); - let response = GetInfo { - build: self.app_version.clone(), - subversion: user_agent, + build: self.build_version.clone(), + subversion: self.user_agent.clone(), }; Ok(response) diff --git a/zebra-rpc/src/methods/get_block_template_rpcs/types/get_mining_info.rs b/zebra-rpc/src/methods/get_block_template_rpcs/types/get_mining_info.rs index 3ac548596cb..3da55de3fa7 100644 --- a/zebra-rpc/src/methods/get_block_template_rpcs/types/get_mining_info.rs +++ b/zebra-rpc/src/methods/get_block_template_rpcs/types/get_mining_info.rs @@ -25,7 +25,7 @@ impl Response { networksolps, networkhashps: networksolps, chain: network.bip70_network_name(), - testnet: network == Network::Testnet, + testnet: network.is_a_test_network(), } } } diff --git a/zebra-rpc/src/methods/tests/prop.rs b/zebra-rpc/src/methods/tests/prop.rs index 388e2600728..154f5d8c973 100644 --- a/zebra-rpc/src/methods/tests/prop.rs +++ b/zebra-rpc/src/methods/tests/prop.rs @@ -40,6 +40,7 @@ proptest! { let mut mempool = MockService::build().for_prop_tests(); let mut state: MockService<_, _, _, BoxError> = MockService::build().for_prop_tests(); let (rpc, rpc_tx_queue_task_handle) = RpcImpl::new( + "RPC test", "RPC test", Mainnet, false, @@ -94,6 +95,7 @@ proptest! { let mut state: MockService<_, _, _, BoxError> = MockService::build().for_prop_tests(); let (rpc, rpc_tx_queue_task_handle) = RpcImpl::new( + "RPC test", "RPC test", Mainnet, false, @@ -153,6 +155,7 @@ proptest! { let mut state: MockService<_, _, _, BoxError> = MockService::build().for_prop_tests(); let (rpc, rpc_tx_queue_task_handle) = RpcImpl::new( + "RPC test", "RPC test", Mainnet, false, @@ -220,6 +223,7 @@ proptest! { let mut state: MockService<_, _, _, BoxError> = MockService::build().for_prop_tests(); let (rpc, rpc_tx_queue_task_handle) = RpcImpl::new( + "RPC test", "RPC test", Mainnet, false, @@ -276,6 +280,7 @@ proptest! { let mut state: MockService<_, _, _, BoxError> = MockService::build().for_prop_tests(); let (rpc, rpc_tx_queue_task_handle) = RpcImpl::new( + "RPC test", "RPC test", Mainnet, false, @@ -330,6 +335,7 @@ proptest! { let mut state: MockService<_, _, _, BoxError> = MockService::build().for_prop_tests(); let (rpc, rpc_tx_queue_task_handle) = RpcImpl::new( + "RPC test", "RPC test", Mainnet, false, @@ -430,6 +436,7 @@ proptest! { let mut state: MockService<_, _, _, BoxError> = MockService::build().for_prop_tests(); let (rpc, rpc_tx_queue_task_handle) = RpcImpl::new( + "RPC test", "RPC test", Mainnet, false, @@ -488,6 +495,7 @@ proptest! { let mut state: MockService<_, _, _, BoxError> = MockService::build().for_prop_tests(); let (rpc, rpc_tx_queue_task_handle) = RpcImpl::new( + "RPC test", "RPC test", Mainnet, false, @@ -535,6 +543,7 @@ proptest! { // look for an error with a `NoChainTip` let (rpc, rpc_tx_queue_task_handle) = RpcImpl::new( + "RPC test", "RPC test", network, false, @@ -585,6 +594,7 @@ proptest! { // Start RPC with the mocked `ChainTip` let (rpc, rpc_tx_queue_task_handle) = RpcImpl::new( + "RPC test", "RPC test", network, false, @@ -671,6 +681,7 @@ proptest! { // Start RPC with the mocked `ChainTip` runtime.block_on(async move { let (rpc, _rpc_tx_queue_task_handle) = RpcImpl::new( + "RPC test", "RPC test", network, false, @@ -734,6 +745,7 @@ proptest! { // Start RPC with the mocked `ChainTip` runtime.block_on(async move { let (rpc, _rpc_tx_queue_task_handle) = RpcImpl::new( + "RPC test", "RPC test", network, false, @@ -785,6 +797,7 @@ proptest! { let mut state: MockService<_, _, _, BoxError> = MockService::build().for_prop_tests(); let (rpc, rpc_tx_queue_task_handle) = RpcImpl::new( + "RPC test", "RPC test", Mainnet, false, @@ -874,6 +887,7 @@ proptest! { let mut state: MockService<_, _, _, BoxError> = MockService::build().for_prop_tests(); let (rpc, rpc_tx_queue_task_handle) = RpcImpl::new( + "RPC test", "RPC test", Mainnet, false, diff --git a/zebra-rpc/src/methods/tests/snapshot.rs b/zebra-rpc/src/methods/tests/snapshot.rs index 84af5c0883e..a3fa80dc31c 100644 --- a/zebra-rpc/src/methods/tests/snapshot.rs +++ b/zebra-rpc/src/methods/tests/snapshot.rs @@ -72,6 +72,7 @@ async fn test_rpc_response_data_for_network(network: Network) { // Init RPC let (rpc, _rpc_tx_queue_task_handle) = RpcImpl::new( "RPC test", + "/Zebra:RPC test/", network, false, true, diff --git a/zebra-rpc/src/methods/tests/vectors.rs b/zebra-rpc/src/methods/tests/vectors.rs index f3b22fce482..42cc3c59213 100644 --- a/zebra-rpc/src/methods/tests/vectors.rs +++ b/zebra-rpc/src/methods/tests/vectors.rs @@ -29,6 +29,7 @@ async fn rpc_getinfo() { let (rpc, rpc_tx_queue_task_handle) = RpcImpl::new( "RPC test", + "/Zebra:RPC test/", Mainnet, false, true, @@ -72,6 +73,7 @@ async fn rpc_getblock() { // Init RPC let (rpc, rpc_tx_queue_task_handle) = RpcImpl::new( + "RPC test", "RPC test", Mainnet, false, @@ -223,6 +225,7 @@ async fn rpc_getblock_parse_error() { // Init RPC let (rpc, rpc_tx_queue_task_handle) = RpcImpl::new( + "RPC test", "RPC test", Mainnet, false, @@ -265,6 +268,7 @@ async fn rpc_getblock_missing_error() { // Init RPC let (rpc, rpc_tx_queue_task_handle) = RpcImpl::new( + "RPC test", "RPC test", Mainnet, false, @@ -333,6 +337,7 @@ async fn rpc_getbestblockhash() { // Init RPC let (rpc, rpc_tx_queue_task_handle) = RpcImpl::new( + "RPC test", "RPC test", Mainnet, false, @@ -378,6 +383,7 @@ async fn rpc_getrawtransaction() { // Init RPC let (rpc, rpc_tx_queue_task_handle) = RpcImpl::new( + "RPC test", "RPC test", Mainnet, false, @@ -539,6 +545,7 @@ async fn rpc_getaddresstxids_invalid_arguments() { zebra_state::populated_state(blocks.clone(), Mainnet).await; let (rpc, rpc_tx_queue_task_handle) = RpcImpl::new( + "RPC test", "RPC test", Mainnet, false, @@ -682,6 +689,7 @@ async fn rpc_getaddresstxids_response_with( zebra_state::populated_state(blocks.to_owned(), network).await; let (rpc, rpc_tx_queue_task_handle) = RpcImpl::new( + "RPC test", "RPC test", network, false, @@ -733,6 +741,7 @@ async fn rpc_getaddressutxos_invalid_arguments() { let mut state: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); let rpc = RpcImpl::new( + "RPC test", "RPC test", Mainnet, false, @@ -781,6 +790,7 @@ async fn rpc_getaddressutxos_response() { zebra_state::populated_state(blocks.clone(), Mainnet).await; let rpc = RpcImpl::new( + "RPC test", "RPC test", Mainnet, false, diff --git a/zebra-rpc/src/server.rs b/zebra-rpc/src/server.rs index 43e7397d534..3f02b64def6 100644 --- a/zebra-rpc/src/server.rs +++ b/zebra-rpc/src/server.rs @@ -43,9 +43,16 @@ mod tests; /// Zebra RPC Server #[derive(Clone)] pub struct RpcServer { + /// The RPC config. config: Config, + + /// The configured network. network: Network, - app_version: String, + + /// Zebra's application version, with build metadata. + build_version: String, + + /// A handle that shuts down the RPC server. close_handle: CloseHandle, } @@ -54,7 +61,7 @@ impl fmt::Debug for RpcServer { f.debug_struct("RpcServer") .field("config", &self.config) .field("network", &self.network) - .field("app_version", &self.app_version) + .field("build_version", &self.build_version) .field( "close_handle", // TODO: when it stabilises, use std::any::type_name_of_val(&self.close_handle) @@ -66,21 +73,35 @@ impl fmt::Debug for RpcServer { impl RpcServer { /// Start a new RPC server endpoint using the supplied configs and services. - /// `app_version` is a version string for the application, which is used in RPC responses. + /// + /// `build_version` and `user_agent` are version strings for the application, + /// which are used in RPC responses. /// /// Returns [`JoinHandle`]s for the RPC server and `sendrawtransaction` queue tasks, /// and a [`RpcServer`] handle, which can be used to shut down the RPC server task. // - // TODO: put some of the configs or services in their own struct? + // TODO: + // - put some of the configs or services in their own struct? + // - replace VersionString with semver::Version, and update the tests to provide valid versions #[allow(clippy::too_many_arguments)] - pub fn spawn( + pub fn spawn< + VersionString, + UserAgentString, + Mempool, + State, + Tip, + BlockVerifierRouter, + SyncStatus, + AddressBook, + >( config: Config, #[cfg(feature = "getblocktemplate-rpcs")] mining_config: get_block_template_rpcs::config::Config, #[cfg(not(feature = "getblocktemplate-rpcs"))] #[allow(unused_variables)] mining_config: (), - app_version: Version, + build_version: VersionString, + user_agent: UserAgentString, mempool: Buffer, state: State, #[cfg_attr(not(feature = "getblocktemplate-rpcs"), allow(unused_variables))] @@ -93,7 +114,8 @@ impl RpcServer { network: Network, ) -> (JoinHandle<()>, JoinHandle<()>, Option) where - Version: ToString + Clone + Send + 'static, + VersionString: ToString + Clone + Send + 'static, + UserAgentString: ToString + Clone + Send + 'static, Mempool: tower::Service< mempool::Request, Response = mempool::Response, @@ -159,7 +181,8 @@ impl RpcServer { // Initialize the rpc methods with the zebra version let (rpc_impl, rpc_tx_queue_task_handle) = RpcImpl::new( - app_version.clone(), + build_version.clone(), + user_agent, network, config.debug_force_finished_sync, #[cfg(feature = "getblocktemplate-rpcs")] @@ -202,7 +225,7 @@ impl RpcServer { let rpc_server_handle = RpcServer { config, network, - app_version: app_version.to_string(), + build_version: build_version.to_string(), close_handle, }; diff --git a/zebra-rpc/src/server/tests/vectors.rs b/zebra-rpc/src/server/tests/vectors.rs index 657106a56b7..91c6cffe861 100644 --- a/zebra-rpc/src/server/tests/vectors.rs +++ b/zebra-rpc/src/server/tests/vectors.rs @@ -61,6 +61,7 @@ fn rpc_server_spawn(parallel_cpu_threads: bool) { config, Default::default(), "RPC server test", + "RPC server test", Buffer::new(mempool.clone(), 1), Buffer::new(state.clone(), 1), Buffer::new(router_verifier.clone(), 1), @@ -147,6 +148,7 @@ fn rpc_server_spawn_unallocated_port(parallel_cpu_threads: bool, do_shutdown: bo config, Default::default(), "RPC server test", + "RPC server test", Buffer::new(mempool.clone(), 1), Buffer::new(state.clone(), 1), Buffer::new(router_verifier.clone(), 1), @@ -227,6 +229,7 @@ fn rpc_server_spawn_port_conflict() { config.clone(), Default::default(), "RPC server 1 test", + "RPC server 1 test", Buffer::new(mempool.clone(), 1), Buffer::new(state.clone(), 1), Buffer::new(router_verifier.clone(), 1), @@ -244,6 +247,7 @@ fn rpc_server_spawn_port_conflict() { config, Default::default(), "RPC server 2 conflict test", + "RPC server 2 conflict test", Buffer::new(mempool.clone(), 1), Buffer::new(state.clone(), 1), Buffer::new(router_verifier.clone(), 1), @@ -335,6 +339,7 @@ fn rpc_server_spawn_port_conflict_parallel_auto() { config.clone(), Default::default(), "RPC server 1 test", + "RPC server 1 test", Buffer::new(mempool.clone(), 1), Buffer::new(state.clone(), 1), Buffer::new(router_verifier.clone(), 1), @@ -352,6 +357,7 @@ fn rpc_server_spawn_port_conflict_parallel_auto() { config, Default::default(), "RPC server 2 conflict test", + "RPC server 2 conflict test", Buffer::new(mempool.clone(), 1), Buffer::new(state.clone(), 1), Buffer::new(router_verifier.clone(), 1), diff --git a/zebrad/Cargo.toml b/zebrad/Cargo.toml index 521f383dddf..7b65394c3da 100644 --- a/zebrad/Cargo.toml +++ b/zebrad/Cargo.toml @@ -132,6 +132,7 @@ chrono = { version = "0.4.26", default-features = false, features = ["clock", "s humantime-serde = "1.1.1" indexmap = "1.9.3" lazy_static = "1.4.0" +semver = "1.0.17" serde = { version = "1.0.164", features = ["serde_derive"] } toml = "0.7.4" @@ -207,7 +208,6 @@ hex = "0.4.3" jsonrpc-core = "18.0.0" once_cell = "1.18.0" regex = "1.8.4" -semver = "1.0.17" # zebra-rpc needs the preserve_order feature, it also makes test results more stable serde_json = { version = "1.0.97", features = ["preserve_order"] } diff --git a/zebrad/src/application.rs b/zebrad/src/application.rs index 6f95d6393db..90adeb0877c 100644 --- a/zebrad/src/application.rs +++ b/zebrad/src/application.rs @@ -7,8 +7,9 @@ use abscissa_core::{ config::CfgCell, status_err, terminal::{component::Terminal, stderr, stdout, ColorChoice}, - Application, Component, Configurable, FrameworkError, Shutdown, StandardPaths, Version, + Application, Component, Configurable, FrameworkError, Shutdown, StandardPaths, }; +use semver::{BuildMetadata, Version}; use zebra_network::constants::PORT_IN_USE_ERROR; use zebra_state::constants::{DATABASE_FORMAT_VERSION, LOCK_FILE_ERROR}; @@ -29,16 +30,30 @@ fn fatal_error(app_name: String, err: &dyn std::error::Error) -> ! { /// Application state pub static APPLICATION: AppCell = AppCell::new(); -/// Returns the zebrad version for this build, in SemVer 2.0 format. +/// Returns the `zebrad` version for this build, in SemVer 2.0 format. /// -/// Includes the git commit and the number of commits since the last version -/// tag, if available. +/// Includes `git describe` build metatata if available: +/// - the number of commits since the last version tag, and +/// - the git commit. /// /// For details, see -pub fn app_version() -> Version { +pub fn build_version() -> Version { // CARGO_PKG_VERSION is always a valid SemVer 2.0 version. const CARGO_PKG_VERSION: &str = env!("CARGO_PKG_VERSION"); + // We're using the same library as cargo uses internally, so this is guaranteed. + let fallback_version = CARGO_PKG_VERSION.parse().unwrap_or_else(|error| { + panic!( + "unexpected invalid CARGO_PKG_VERSION: {error:?} in {CARGO_PKG_VERSION:?}, \ + should have been checked by cargo" + ) + }); + + vergen_build_version().unwrap_or(fallback_version) +} + +/// Returns the `zebrad` version from this build, if available from `vergen`. +fn vergen_build_version() -> Option { // VERGEN_GIT_DESCRIBE should be in the format: // - v1.0.0-rc.9-6-g319b01bb84 // - v1.0.0-6-g319b01bb84 @@ -48,14 +63,6 @@ pub fn app_version() -> Version { // or whatever is specified in zebrad/build.rs. const VERGEN_GIT_DESCRIBE: Option<&str> = option_env!("VERGEN_GIT_DESCRIBE"); - // We're using the same library as cargo uses internally, so this is guaranteed. - let fallback_version = CARGO_PKG_VERSION.parse().unwrap_or_else(|error| { - panic!( - "unexpected invalid CARGO_PKG_VERSION: {error:?} in {CARGO_PKG_VERSION:?}, \ - should have been checked by cargo" - ) - }); - // The SemVer 2.0 format is: // - 1.0.0-rc.9+6.g319b01bb84 // - 1.0.0+6.g319b01bb84 @@ -66,16 +73,20 @@ pub fn app_version() -> Version { // - optional build: `+`tag[`.`tag ...] // change the git describe format to the semver 2.0 format let Some(vergen_git_describe) = VERGEN_GIT_DESCRIBE else { - return fallback_version; + return None; }; + // `git describe` uses "dirty" for uncommitted changes, + // but users won't understand what that means. + let vergen_git_describe = vergen_git_describe.replace("dirty", "modified"); + // Split using "git describe" separators. let mut vergen_git_describe = vergen_git_describe.split('-').peekable(); // Check the "version core" part. let version = vergen_git_describe.next(); let Some(mut version) = version else { - return fallback_version; + return None; }; // strip the leading "v", if present. @@ -83,7 +94,7 @@ pub fn app_version() -> Version { // If the initial version is empty, just a commit hash, or otherwise invalid. if Version::parse(version).is_err() { - return fallback_version; + return None; } let mut semver = version.to_string(); @@ -92,7 +103,7 @@ pub fn app_version() -> Version { // but only consume it if it is a pre-release tag. let Some(part) = vergen_git_describe.peek() else { // No pre-release or build. - return semver.parse().expect("just checked semver is valid"); + return semver.parse().ok(); }; if part.starts_with(char::is_alphabetic) { @@ -107,12 +118,12 @@ pub fn app_version() -> Version { // Check if the next part is a build part. let Some(build) = vergen_git_describe.peek() else { // No build tags. - return semver.parse().unwrap_or(fallback_version); + return semver.parse().ok(); }; if !build.starts_with(char::is_numeric) { // It's not a valid "commit count" build tag from "git describe". - return fallback_version; + return None; } // Append the rest of the build parts with the correct `+` and `.` separators. @@ -122,19 +133,16 @@ pub fn app_version() -> Version { semver.push('+'); semver.push_str(&build_parts); - semver.parse().unwrap_or(fallback_version) + semver.parse().ok() } -/// The Zebra current release version. -// -// TODO: deduplicate this code with release_version in zebra_rpc::get_info() -pub fn release_version() -> String { - app_version() - .to_string() - .split('+') - .next() - .expect("always at least 1 slice") - .to_string() +/// The Zebra current release version, without any build metadata. +pub fn release_version() -> Version { + let mut release_version = build_version(); + + release_version.build = BuildMetadata::EMPTY; + + release_version } /// The User-Agent string provided by the node. @@ -142,8 +150,6 @@ pub fn release_version() -> String { /// This must be a valid [BIP 14] user agent. /// /// [BIP 14]: https://github.com/bitcoin/bips/blob/master/bip-0014.mediawiki -// -// TODO: deduplicate this code with the user agent in zebra_rpc::get_info() pub fn user_agent() -> String { let release_version = release_version(); format!("/Zebra:{release_version}/") @@ -260,7 +266,7 @@ impl Application for ZebradApp { let app_metadata = vec![ // cargo or git tag + short commit - ("version", app_version().to_string()), + ("version", build_version().to_string()), // config ("Zcash network", config.network.network.to_string()), // constants @@ -368,7 +374,7 @@ impl Application for ZebradApp { #[cfg(feature = "sentry")] let guard = sentry::init(sentry::ClientOptions { debug: true, - release: Some(app_version().to_string().into()), + release: Some(build_version().to_string().into()), ..Default::default() }); diff --git a/zebrad/src/commands/start.rs b/zebrad/src/commands/start.rs index 67b9b3e78e2..256c77ef09b 100644 --- a/zebrad/src/commands/start.rs +++ b/zebrad/src/commands/start.rs @@ -82,7 +82,7 @@ use zebra_consensus::router::BackgroundTaskHandles; use zebra_rpc::server::RpcServer; use crate::{ - application::{app_version, user_agent}, + application::{build_version, user_agent}, components::{ inbound::{self, InboundSetupData, MAX_INBOUND_RESPONSE_TIME}, mempool::{self, Mempool}, @@ -215,7 +215,8 @@ impl StartCmd { config.mining.clone(), #[cfg(not(feature = "getblocktemplate-rpcs"))] (), - app_version(), + build_version(), + user_agent(), mempool.clone(), read_only_state_service, router_verifier, diff --git a/zebrad/src/components/tracing/component.rs b/zebrad/src/components/tracing/component.rs index 769d6db46f7..8f32e0943a5 100644 --- a/zebrad/src/components/tracing/component.rs +++ b/zebrad/src/components/tracing/component.rs @@ -16,7 +16,7 @@ use tracing_subscriber::{ EnvFilter, }; -use crate::{application::app_version, components::tracing::Config}; +use crate::{application::build_version, components::tracing::Config}; #[cfg(feature = "flamegraph")] use super::flame; @@ -341,7 +341,7 @@ impl Component for Tracing { } fn version(&self) -> abscissa_core::Version { - app_version() + build_version() } fn before_shutdown(&self, _kind: Shutdown) -> Result<(), FrameworkError> { From 2d35feb177884137d3a4c74d1c4b348bb712f42e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 20 Jun 2023 02:42:25 +0000 Subject: [PATCH 122/265] build(deps): bump Swatinem/rust-cache from 2.4.0 to 2.5.0 (#7002) Bumps [Swatinem/rust-cache](https://github.com/Swatinem/rust-cache) from 2.4.0 to 2.5.0. - [Release notes](https://github.com/Swatinem/rust-cache/releases) - [Changelog](https://github.com/Swatinem/rust-cache/blob/master/CHANGELOG.md) - [Commits](https://github.com/Swatinem/rust-cache/compare/v2.4.0...v2.5.0) --- updated-dependencies: - dependency-name: Swatinem/rust-cache dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/continous-integration-os.yml | 4 ++-- .github/workflows/docs.yml | 2 +- .github/workflows/lint.yml | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/continous-integration-os.yml b/.github/workflows/continous-integration-os.yml index 8d2bb504df8..93303977e95 100644 --- a/.github/workflows/continous-integration-os.yml +++ b/.github/workflows/continous-integration-os.yml @@ -107,7 +107,7 @@ jobs: curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain=${{ matrix.rust }} --profile=minimal - - uses: Swatinem/rust-cache@v2.4.0 + - uses: Swatinem/rust-cache@v2.5.0 # TODO: change Rust cache target directory on Windows, # or remove this workaround once the build is more efficient (#3005). #with: @@ -242,7 +242,7 @@ jobs: run: | curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain=stable --profile=minimal - - uses: Swatinem/rust-cache@v2.4.0 + - uses: Swatinem/rust-cache@v2.5.0 with: shared-key: "clippy-cargo-lock" diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index ba292810d6e..5c3b0999d30 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -56,7 +56,7 @@ jobs: run: | curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain=beta --profile=default - - uses: Swatinem/rust-cache@v2.4.0 + - uses: Swatinem/rust-cache@v2.5.0 - name: Setup mdBook uses: peaceiris/actions-mdbook@v1.2.0 diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index f0fb0777781..260d22f7e28 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -86,7 +86,7 @@ jobs: run: | curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain=stable --profile=default - - uses: Swatinem/rust-cache@v2.4.0 + - uses: Swatinem/rust-cache@v2.5.0 with: shared-key: "clippy-cargo-lock" @@ -131,7 +131,7 @@ jobs: # We don't cache `fmt` outputs because the job is quick, # and we want to use the limited GitHub actions cache space for slower jobs. - #- uses: Swatinem/rust-cache@v2.4.0 + #- uses: Swatinem/rust-cache@v2.5.0 - run: | cargo fmt --all -- --check From cd8dddffe58ddd27ae4e1eded224353c90e2685b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 20 Jun 2023 02:42:39 +0000 Subject: [PATCH 123/265] build(deps): bump reviewdog/action-actionlint from 1.37.0 to 1.37.1 (#7003) Bumps [reviewdog/action-actionlint](https://github.com/reviewdog/action-actionlint) from 1.37.0 to 1.37.1. - [Release notes](https://github.com/reviewdog/action-actionlint/releases) - [Commits](https://github.com/reviewdog/action-actionlint/compare/v1.37.0...v1.37.1) --- updated-dependencies: - dependency-name: reviewdog/action-actionlint dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/lint.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 260d22f7e28..874bd5ac685 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -178,7 +178,7 @@ jobs: if: ${{ needs.changed-files.outputs.workflows == 'true' }} steps: - uses: actions/checkout@v3.5.3 - - uses: reviewdog/action-actionlint@v1.37.0 + - uses: reviewdog/action-actionlint@v1.37.1 with: level: warning fail_on_error: false From a588965b942409420fa7d42fde153b7cc8437fd2 Mon Sep 17 00:00:00 2001 From: Arya Date: Tue, 20 Jun 2023 01:11:23 -0400 Subject: [PATCH 124/265] change(commands): Ignore error from loading config if running the 'generate' or 'download' commands (#7014) * ignore error when reading config for generate cmd * Adds comments. * ignore load config errs for download cmd too * Fix comment wording --------- Co-authored-by: teor --- zebrad/src/application.rs | 4 +++- zebrad/src/commands.rs | 6 ++++++ 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/zebrad/src/application.rs b/zebrad/src/application.rs index 90adeb0877c..68134f7d878 100644 --- a/zebrad/src/application.rs +++ b/zebrad/src/application.rs @@ -241,10 +241,12 @@ impl Application for ZebradApp { let mut components = self.framework_components(command)?; // Load config *after* framework components so that we can - // report an error to the terminal if it occurs. + // report an error to the terminal if it occurs (unless used with a command that doesn't need the config). let config = match command.config_path() { Some(path) => match self.load_config(&path) { Ok(config) => config, + // Ignore errors loading the config for some commands. + Err(_e) if command.cmd().should_ignore_load_config_error() => Default::default(), Err(e) => { status_err!("Zebra could not parse the provided config file. This might mean you are using a deprecated format of the file. You can generate a valid config by running \"zebrad generate\", and diff it against yours to examine any format inconsistencies."); return Err(e); diff --git a/zebrad/src/commands.rs b/zebrad/src/commands.rs index 2f005f799b3..806ac360808 100644 --- a/zebrad/src/commands.rs +++ b/zebrad/src/commands.rs @@ -87,6 +87,12 @@ impl ZebradCmd { "debug" } } + + /// Returns true if this command should ignore errors when + /// attempting to load a config file. + pub(crate) fn should_ignore_load_config_error(&self) -> bool { + matches!(self, ZebradCmd::Generate(_) | ZebradCmd::Download(_)) + } } impl Runnable for ZebradCmd { From b40fc9b0328de96bd304d6daca20dd3226dc5b63 Mon Sep 17 00:00:00 2001 From: Arya Date: Tue, 20 Jun 2023 01:11:45 -0400 Subject: [PATCH 125/265] change(network): Configurable maximum connections per IP (#7013) * Adds config field * adds new generated config * Lint * fixes config_tests --- zebra-network/src/config.rs | 45 +++++++---- zebra-network/src/constants.rs | 6 +- zebra-network/src/peer_set/set.rs | 9 ++- .../src/peer_set/set/tests/vectors.rs | 14 ++-- .../configs/getblocktemplate-v1.0.1.toml | 75 +++++++++++++++++++ zebrad/tests/common/configs/v1.0.1.toml | 71 ++++++++++++++++++ 6 files changed, 194 insertions(+), 26 deletions(-) create mode 100644 zebrad/tests/common/configs/getblocktemplate-v1.0.1.toml create mode 100644 zebrad/tests/common/configs/v1.0.1.toml diff --git a/zebra-network/src/config.rs b/zebra-network/src/config.rs index 23e812f8515..49798137b58 100644 --- a/zebra-network/src/config.rs +++ b/zebra-network/src/config.rs @@ -19,8 +19,8 @@ use zebra_chain::parameters::Network; use crate::{ constants::{ - DEFAULT_CRAWL_NEW_PEER_INTERVAL, DNS_LOOKUP_TIMEOUT, INBOUND_PEER_LIMIT_MULTIPLIER, - MAX_PEER_DISK_CACHE_SIZE, OUTBOUND_PEER_LIMIT_MULTIPLIER, + DEFAULT_CRAWL_NEW_PEER_INTERVAL, DEFAULT_MAX_CONNS_PER_IP, DNS_LOOKUP_TIMEOUT, + INBOUND_PEER_LIMIT_MULTIPLIER, MAX_PEER_DISK_CACHE_SIZE, OUTBOUND_PEER_LIMIT_MULTIPLIER, }, protocol::external::{canonical_peer_addr, canonical_socket_addr}, BoxError, PeerSocketAddr, @@ -153,6 +153,12 @@ pub struct Config { /// next connection attempt. #[serde(with = "humantime_serde")] pub crawl_new_peer_interval: Duration, + + /// The maximum number of peer connections Zebra will keep for a given IP address + /// before it drops any additional peer connections with that IP. + /// + /// The default and minimum value are 1. + pub max_connections_per_ip: usize, } impl Config { @@ -591,6 +597,7 @@ impl Default for Config { // But Zebra should only make a small number of initial outbound connections, // so that idle peers don't use too many connection slots. peerset_initial_target_size: 25, + max_connections_per_ip: DEFAULT_MAX_CONNS_PER_IP, } } } @@ -611,6 +618,7 @@ impl<'de> Deserialize<'de> for Config { peerset_initial_target_size: usize, #[serde(alias = "new_peer_interval", with = "humantime_serde")] crawl_new_peer_interval: Duration, + max_connections_per_ip: Option, } impl Default for DConfig { @@ -624,16 +632,26 @@ impl<'de> Deserialize<'de> for Config { cache_dir: config.cache_dir, peerset_initial_target_size: config.peerset_initial_target_size, crawl_new_peer_interval: config.crawl_new_peer_interval, + max_connections_per_ip: Some(DEFAULT_MAX_CONNS_PER_IP), } } } - let config = DConfig::deserialize(deserializer)?; - - let listen_addr = match config.listen_addr.parse::() { + let DConfig { + listen_addr, + network, + initial_mainnet_peers, + initial_testnet_peers, + cache_dir, + peerset_initial_target_size, + crawl_new_peer_interval, + max_connections_per_ip, + } = DConfig::deserialize(deserializer)?; + + let listen_addr = match listen_addr.parse::() { Ok(socket) => Ok(socket), - Err(_) => match config.listen_addr.parse::() { - Ok(ip) => Ok(SocketAddr::new(ip, config.network.default_port())), + Err(_) => match listen_addr.parse::() { + Ok(ip) => Ok(SocketAddr::new(ip, network.default_port())), Err(err) => Err(de::Error::custom(format!( "{err}; Hint: addresses can be a IPv4, IPv6 (with brackets), or a DNS name, the port is optional" ))), @@ -642,12 +660,13 @@ impl<'de> Deserialize<'de> for Config { Ok(Config { listen_addr: canonical_socket_addr(listen_addr), - network: config.network, - initial_mainnet_peers: config.initial_mainnet_peers, - initial_testnet_peers: config.initial_testnet_peers, - cache_dir: config.cache_dir, - peerset_initial_target_size: config.peerset_initial_target_size, - crawl_new_peer_interval: config.crawl_new_peer_interval, + network, + initial_mainnet_peers, + initial_testnet_peers, + cache_dir, + peerset_initial_target_size, + crawl_new_peer_interval, + max_connections_per_ip: max_connections_per_ip.unwrap_or(DEFAULT_MAX_CONNS_PER_IP), }) } } diff --git a/zebra-network/src/constants.rs b/zebra-network/src/constants.rs index 289ccd79399..b466d4fd42c 100644 --- a/zebra-network/src/constants.rs +++ b/zebra-network/src/constants.rs @@ -67,9 +67,11 @@ pub const INBOUND_PEER_LIMIT_MULTIPLIER: usize = 5; /// See [`INBOUND_PEER_LIMIT_MULTIPLIER`] for details. pub const OUTBOUND_PEER_LIMIT_MULTIPLIER: usize = 3; -/// The maximum number of peer connections Zebra will keep for a given IP address +/// The default maximum number of peer connections Zebra will keep for a given IP address /// before it drops any additional peer connections with that IP. -pub const MAX_CONNS_PER_IP: usize = 1; +/// +/// This will be used as Config.max_connections_per_ip if no value is provided. +pub const DEFAULT_MAX_CONNS_PER_IP: usize = 1; /// The buffer size for the peer set. /// diff --git a/zebra-network/src/peer_set/set.rs b/zebra-network/src/peer_set/set.rs index 72fdbe79955..522ee6b1802 100644 --- a/zebra-network/src/peer_set/set.rs +++ b/zebra-network/src/peer_set/set.rs @@ -254,7 +254,7 @@ where last_peer_log: Option, /// The configured maximum number of peers that can be in the - /// peer set per IP, defaults to [`crate::constants::MAX_CONNS_PER_IP`] + /// peer set per IP, defaults to [`crate::constants::DEFAULT_MAX_CONNS_PER_IP`] max_conns_per_ip: usize, } @@ -290,7 +290,8 @@ where /// - `address_book`: when peer set is busy, it logs address book diagnostics. /// - `minimum_peer_version`: endpoint to see the minimum peer protocol version in real time. /// - `max_conns_per_ip`: configured maximum number of peers that can be in the - /// peer set per IP, defaults to [`crate::constants::MAX_CONNS_PER_IP`]. + /// peer set per IP, defaults to the config value or to + /// [`crate::constants::DEFAULT_MAX_CONNS_PER_IP`]. pub fn new( config: &Config, discover: D, @@ -328,7 +329,7 @@ where last_peer_log: None, address_metrics, - max_conns_per_ip: max_conns_per_ip.unwrap_or(crate::constants::MAX_CONNS_PER_IP), + max_conns_per_ip: max_conns_per_ip.unwrap_or(config.max_connections_per_ip), } } @@ -540,7 +541,7 @@ where // # Security // - // drop the new peer if there are already `MAX_CONNS_PER_IP` peers with + // drop the new peer if there are already `max_conns_per_ip` peers with // the same IP address in the peer set. if self.num_peers_with_ip(key.ip()) >= self.max_conns_per_ip { std::mem::drop(svc); diff --git a/zebra-network/src/peer_set/set/tests/vectors.rs b/zebra-network/src/peer_set/set/tests/vectors.rs index c3534aed182..8290469997c 100644 --- a/zebra-network/src/peer_set/set/tests/vectors.rs +++ b/zebra-network/src/peer_set/set/tests/vectors.rs @@ -12,7 +12,7 @@ use zebra_chain::{ use super::{PeerSetBuilder, PeerVersions}; use crate::{ - constants::MAX_CONNS_PER_IP, + constants::DEFAULT_MAX_CONNS_PER_IP, peer::{ClientRequest, MinimumPeerVersion}, peer_set::inventory_registry::InventoryStatus, protocol::external::{types::Version, InventoryHash}, @@ -145,7 +145,7 @@ fn peer_set_ready_multiple_connections() { let (mut peer_set, _peer_set_guard) = PeerSetBuilder::new() .with_discover(discovered_peers) .with_minimum_peer_version(minimum_peer_version.clone()) - .max_conns_per_ip(max(3, MAX_CONNS_PER_IP)) + .max_conns_per_ip(max(3, DEFAULT_MAX_CONNS_PER_IP)) .build(); // Get peerset ready @@ -178,7 +178,7 @@ fn peer_set_ready_multiple_connections() { #[test] fn peer_set_rejects_connections_past_per_ip_limit() { - const NUM_PEER_VERSIONS: usize = crate::constants::MAX_CONNS_PER_IP + 1; + const NUM_PEER_VERSIONS: usize = crate::constants::DEFAULT_MAX_CONNS_PER_IP + 1; // Use three peers with the same version let peer_version = Version::min_specified_for_upgrade(Network::Mainnet, NetworkUpgrade::Nu5); @@ -220,7 +220,7 @@ fn peer_set_rejects_connections_past_per_ip_limit() { // Check we have the right amount of ready services assert_eq!( peer_ready.ready_services.len(), - crate::constants::MAX_CONNS_PER_IP + crate::constants::DEFAULT_MAX_CONNS_PER_IP ); }); } @@ -259,7 +259,7 @@ fn peer_set_route_inv_empty_registry() { let (mut peer_set, _peer_set_guard) = PeerSetBuilder::new() .with_discover(discovered_peers) .with_minimum_peer_version(minimum_peer_version.clone()) - .max_conns_per_ip(max(2, MAX_CONNS_PER_IP)) + .max_conns_per_ip(max(2, DEFAULT_MAX_CONNS_PER_IP)) .build(); // Get peerset ready @@ -342,7 +342,7 @@ fn peer_set_route_inv_advertised_registry_order(advertised_first: bool) { let (mut peer_set, mut peer_set_guard) = PeerSetBuilder::new() .with_discover(discovered_peers) .with_minimum_peer_version(minimum_peer_version.clone()) - .max_conns_per_ip(max(2, MAX_CONNS_PER_IP)) + .max_conns_per_ip(max(2, DEFAULT_MAX_CONNS_PER_IP)) .build(); // Advertise some inventory @@ -450,7 +450,7 @@ fn peer_set_route_inv_missing_registry_order(missing_first: bool) { let (mut peer_set, mut peer_set_guard) = PeerSetBuilder::new() .with_discover(discovered_peers) .with_minimum_peer_version(minimum_peer_version.clone()) - .max_conns_per_ip(max(2, MAX_CONNS_PER_IP)) + .max_conns_per_ip(max(2, DEFAULT_MAX_CONNS_PER_IP)) .build(); // Mark some inventory as missing diff --git a/zebrad/tests/common/configs/getblocktemplate-v1.0.1.toml b/zebrad/tests/common/configs/getblocktemplate-v1.0.1.toml new file mode 100644 index 00000000000..3536c80c9c8 --- /dev/null +++ b/zebrad/tests/common/configs/getblocktemplate-v1.0.1.toml @@ -0,0 +1,75 @@ +# Default configuration for zebrad. +# +# This file can be used as a skeleton for custom configs. +# +# Unspecified fields use default values. Optional fields are Some(field) if the +# field is present and None if it is absent. +# +# This file is generated as an example using zebrad's current defaults. +# You should set only the config options you want to keep, and delete the rest. +# Only a subset of fields are present in the skeleton, since optional values +# whose default is None are omitted. +# +# The config format (including a complete list of sections and fields) is +# documented here: +# https://doc.zebra.zfnd.org/zebrad/config/struct.ZebradConfig.html +# +# zebrad attempts to load configs in the following order: +# +# 1. The -c flag on the command line, e.g., `zebrad -c myconfig.toml start`; +# 2. The file `zebrad.toml` in the users's preference directory (platform-dependent); +# 3. The default config. + +[consensus] +checkpoint_sync = true +debug_skip_parameter_preload = false + +[mempool] +eviction_memory_time = "1h" +tx_cost_limit = 80000000 + +[metrics] + +[mining] +debug_like_zcashd = true + +[network] +cache_dir = true +crawl_new_peer_interval = "1m 1s" +initial_mainnet_peers = [ + "dnsseed.z.cash:8233", + "dnsseed.str4d.xyz:8233", + "mainnet.seeder.zfnd.org:8233", + "mainnet.is.yolo.money:8233", +] +initial_testnet_peers = [ + "dnsseed.testnet.z.cash:18233", + "testnet.seeder.zfnd.org:18233", + "testnet.is.yolo.money:18233", +] +listen_addr = "0.0.0.0:8233" +max_connections_per_ip = 1 +network = "Mainnet" +peerset_initial_target_size = 25 + +[rpc] +debug_force_finished_sync = false +parallel_cpu_threads = 0 + +[state] +cache_dir = "cache_dir" +delete_old_database = true +ephemeral = false + +[sync] +checkpoint_verify_concurrency_limit = 1000 +download_concurrency_limit = 50 +full_verify_concurrency_limit = 20 +parallel_cpu_threads = 0 + +[tracing] +buffer_limit = 128000 +force_use_color = false +use_color = true +use_journald = false + diff --git a/zebrad/tests/common/configs/v1.0.1.toml b/zebrad/tests/common/configs/v1.0.1.toml new file mode 100644 index 00000000000..02bac53da62 --- /dev/null +++ b/zebrad/tests/common/configs/v1.0.1.toml @@ -0,0 +1,71 @@ +# Default configuration for zebrad. +# +# This file can be used as a skeleton for custom configs. +# +# Unspecified fields use default values. Optional fields are Some(field) if the +# field is present and None if it is absent. +# +# This file is generated as an example using zebrad's current defaults. +# You should set only the config options you want to keep, and delete the rest. +# Only a subset of fields are present in the skeleton, since optional values +# whose default is None are omitted. +# +# The config format (including a complete list of sections and fields) is +# documented here: +# https://doc.zebra.zfnd.org/zebrad/config/struct.ZebradConfig.html +# +# zebrad attempts to load configs in the following order: +# +# 1. The -c flag on the command line, e.g., `zebrad -c myconfig.toml start`; +# 2. The file `zebrad.toml` in the users's preference directory (platform-dependent); +# 3. The default config. + +[consensus] +checkpoint_sync = true +debug_skip_parameter_preload = false + +[mempool] +eviction_memory_time = "1h" +tx_cost_limit = 80000000 + +[metrics] + +[network] +cache_dir = true +crawl_new_peer_interval = "1m 1s" +initial_mainnet_peers = [ + "dnsseed.z.cash:8233", + "dnsseed.str4d.xyz:8233", + "mainnet.seeder.zfnd.org:8233", + "mainnet.is.yolo.money:8233", +] +initial_testnet_peers = [ + "dnsseed.testnet.z.cash:18233", + "testnet.seeder.zfnd.org:18233", + "testnet.is.yolo.money:18233", +] +listen_addr = "0.0.0.0:8233" +max_connections_per_ip = 1 +network = "Mainnet" +peerset_initial_target_size = 25 + +[rpc] +debug_force_finished_sync = false +parallel_cpu_threads = 1 + +[state] +cache_dir = "cache_dir" +delete_old_database = true +ephemeral = false + +[sync] +checkpoint_verify_concurrency_limit = 1000 +download_concurrency_limit = 50 +full_verify_concurrency_limit = 20 +parallel_cpu_threads = 0 + +[tracing] +buffer_limit = 128000 +force_use_color = false +use_color = true +use_journald = false From 795d40a0034457afe83e7dd277be211120abff2b Mon Sep 17 00:00:00 2001 From: teor Date: Tue, 20 Jun 2023 17:10:40 +1000 Subject: [PATCH 126/265] change(readme): Install from crates.io not git in the README, automate release version replacements (#6977) * Install from crates.io not git * Move git instructions to install.md * Add versions back to install.md * Automate release replacements in Cargo.toml * Re-add h4 to re-enable links to those sections * Remove release replacements from zebra/Cargo.toml * Add release replacements to zebrad/Cargo.toml * Put the toml table in the right place * Try another place in Cargo.toml * Let's try again without "package." * Remove duplicate release metadata from Cargo.toml * Apply suggestions from code review Co-authored-by: Marek --------- Co-authored-by: Marek --- Cargo.toml | 2 ++ README.md | 16 ++++++---- book/src/user/install.md | 66 +++++++++++++++++++++++++++++----------- zebrad/Cargo.toml | 20 ++++++++---- 4 files changed, 74 insertions(+), 30 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index e6ffaefb9db..0f81f34fa45 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -17,6 +17,8 @@ members = [ # Use the edition 2021 dependency resolver in the workspace, to match the crates resolver = "2" +# Compilation settings + [profile.dev] panic = "abort" diff --git a/README.md b/README.md index 9c4ae188603..af14b07cd4e 100644 --- a/README.md +++ b/README.md @@ -74,7 +74,9 @@ Every few weeks, we release a [new Zebra version](https://github.com/ZcashFounda Below are quick summaries for installing the dependencies on your machine. -

General instructions for installing dependencies

+
+ +

General instructions for installing dependencies

1. Install [`cargo` and `rustc`](https://www.rust-lang.org/tools/install). @@ -88,7 +90,9 @@ Below are quick summaries for installing the dependencies on your machine.
-

Dependencies on Arch

+
+ +

Dependencies on Arch

```sh sudo pacman -S rust clang pkgconf @@ -98,10 +102,10 @@ Note that the package `clang` includes `libclang` as well as the C++ compiler.
-Once the dependencies are in place, you can build Zebra +Once the dependencies are in place, you can build and install Zebra: ```sh -cargo install --locked --git https://github.com/ZcashFoundation/zebra --tag v1.0.0 zebrad +cargo install --locked zebrad ``` You can start Zebra by @@ -110,8 +114,8 @@ You can start Zebra by zebrad start ``` -See the [Running Zebra](https://zebra.zfnd.org/user/run.html) section in the -book for more details. +See the [Installing Zebra](https://zebra.zfnd.org/user/install.html) and [Running Zebra](https://zebra.zfnd.org/user/run.html) +sections in the book for more details. #### Optional Features diff --git a/book/src/user/install.md b/book/src/user/install.md index 9f9826f6412..c93790920ef 100644 --- a/book/src/user/install.md +++ b/book/src/user/install.md @@ -2,19 +2,64 @@ Follow the [Docker or compilation instructions](https://zebra.zfnd.org/index.html#getting-started). -#### ARM +## Installing Dependencies + +To compile Zebra from source, you will need to [install some dependencies.](https://zebra.zfnd.org/index.html#building-zebra). + + +## Alternative Compilation Methods + +### Compiling Manually from git + +To compile Zebra directly from GitHub, or from a GitHub release source archive: + +1. Install the dependencies (see above) + +2. Get the source code using `git` or from a GitHub source package + +```sh +git clone https://github.com/ZcashFoundation/zebra.git +cd zebra +git checkout v1.0.0 +``` + +3. Build and Run `zebrad` + +```sh +cargo build --release --bin zebrad +target/release/zebrad start +``` + +### Compiling from git using cargo install + +```sh +cargo install --git https://github.com/ZcashFoundation/zebra --tag v1.0.0 zebrad +``` + +### Compiling on ARM If you're using an ARM machine, [install the Rust compiler for ARM](https://rust-lang.github.io/rustup/installation/other.html). If you build using the x86_64 tools, Zebra might run really slowly. -#### Build Troubleshooting +## Build Troubleshooting If you're having trouble with: -Dependencies: +### Compilers + +- **clang:** install both `libclang` and `clang` - they are usually different packages +- **libclang:** check out the [clang-sys documentation](https://github.com/KyleMayes/clang-sys#dependencies) +- **g++ or MSVC++:** try using clang or Xcode instead +- **rustc:** use the latest stable `rustc` and `cargo` versions + - Zebra does not have a minimum supported Rust version (MSRV) policy: any release can update the required Rust version. + +### Dependencies - use `cargo install` without `--locked` to build with the latest versions of each dependency + +#### Optional Tor feature + - **sqlite linker errors:** libsqlite3 is an optional dependency of the `zebra-network/tor` feature. If you don't have it installed, you might see errors like `note: /usr/bin/ld: cannot find -lsqlite3`. [Follow the arti instructions](https://gitlab.torproject.org/tpo/core/arti/-/blob/main/CONTRIBUTING.md#setting-up-your-development-environment) @@ -25,19 +70,4 @@ cargo build cargo build -p zebrad --all-features ``` -Compilers: - -- **clang:** install both `libclang` and `clang` - they are usually different packages -- **libclang:** check out the [clang-sys documentation](https://github.com/KyleMayes/clang-sys#dependencies) -- **g++ or MSVC++:** try using clang or Xcode instead -- **rustc:** use rustc 1.65 or later - - Zebra does not have a minimum supported Rust version (MSRV) policy: any release can update the required Rust version. - -### Dependencies - -Zebra primarily depends on pure Rust crates, and some Rust/C++ crates: - -- [rocksdb](https://crates.io/crates/rocksdb) -- [zcash_script](https://crates.io/crates/zcash_script) -They will be automatically built along with `zebrad`. diff --git a/zebrad/Cargo.toml b/zebrad/Cargo.toml index 7b65394c3da..9fb96712c70 100644 --- a/zebrad/Cargo.toml +++ b/zebrad/Cargo.toml @@ -7,6 +7,13 @@ description = "The Zcash Foundation's independent, consensus-compatible implemen license = "MIT OR Apache-2.0" repository = "https://github.com/ZcashFoundation/zebra" +readme = "../README.md" +homepage = "https://zfnd.org/zebra/" +# crates.io is limited to 5 keywords and categories +keywords = ["zebra", "zcash"] +# Must be one of +categories = ["command-line-utilities", "cryptography::cryptocurrencies"] + # Settings that impact compilation edition = "2021" @@ -20,12 +27,13 @@ rust-version = "1.66" # when run in the workspace directory default-run = "zebrad" -readme = "../README.md" -homepage = "https://zfnd.org/zebra/" -# crates.io is limited to 5 keywords and categories -keywords = ["zebra", "zcash"] -# Must be one of -categories = ["command-line-utilities", "cryptography::cryptocurrencies"] +# `cargo release` settings +[metadata.release] +pre-release-replacements = [ + {file="../book/src/user/install.md", search="git checkout [a-z0-9\\.-]+", replace="git checkout {{version}}"}, + {file="../book/src/user/install.md", search="--tag [a-z0-9\\.-]+", replace="--tag {{version}}"}, + {file="../book/src/user/docker.md", search="--branch [a-z0-9\\.-]+", replace="--branch {{version}}"}, +] [features] # In release builds, don't compile debug logging code, to improve performance. From 56a76385f0b5c6b5890bb0530f6a55cf4af90298 Mon Sep 17 00:00:00 2001 From: teor Date: Tue, 20 Jun 2023 17:11:04 +1000 Subject: [PATCH 127/265] git ls-tree --full-tree -r --name-only HEAD | xargs sed -i -e 's/router_verifier/block_verifier_router/g' (#6998) cargo fmt --all --- .../src/dev/diagrams/service-dependencies.svg | 26 +++---- book/src/dev/overview.md | 10 +-- zebra-consensus/src/router/tests.rs | 52 ++++++++------ .../src/methods/get_block_template_rpcs.rs | 14 ++-- .../get_block_template.rs | 8 +-- .../tests/snapshot/get_block_template_rpcs.rs | 22 +++--- zebra-rpc/src/methods/tests/vectors.rs | 28 ++++---- zebra-rpc/src/server.rs | 4 +- zebra-rpc/src/server/tests/vectors.rs | 28 ++++---- zebrad/src/commands/start.rs | 8 +-- zebrad/src/components/sync/tests/vectors.rs | 68 +++++++++---------- 11 files changed, 142 insertions(+), 126 deletions(-) diff --git a/book/src/dev/diagrams/service-dependencies.svg b/book/src/dev/diagrams/service-dependencies.svg index 979b44846ba..d6d02928dd5 100644 --- a/book/src/dev/diagrams/service-dependencies.svg +++ b/book/src/dev/diagrams/service-dependencies.svg @@ -74,15 +74,15 @@ - +
-router_verifier +block_verifier_router -router_verifier +block_verifier_router - + -inbound->router_verifier +inbound->block_verifier_router @@ -104,9 +104,9 @@ - + -rpc_server->router_verifier +rpc_server->block_verifier_router @@ -116,9 +116,9 @@ checkpoint_verifier - + -router_verifier->checkpoint_verifier +block_verifier_router->checkpoint_verifier @@ -128,9 +128,9 @@ block_verifier - + -router_verifier->block_verifier +block_verifier_router->block_verifier @@ -146,9 +146,9 @@ syncer - + -syncer->router_verifier +syncer->block_verifier_router diff --git a/book/src/dev/overview.md b/book/src/dev/overview.md index f5e5f82afdc..2142be8b0fc 100644 --- a/book/src/dev/overview.md +++ b/book/src/dev/overview.md @@ -56,18 +56,18 @@ digraph services { inbound -> state rpc_server -> state mempool -> transaction_verifier - router_verifier -> checkpoint_verifier + block_verifier_router -> checkpoint_verifier inbound -> mempool rpc_server -> mempool - inbound -> router_verifier - syncer -> router_verifier - rpc_server -> router_verifier [style=dotted] + inbound -> block_verifier_router + syncer -> block_verifier_router + rpc_server -> block_verifier_router [style=dotted] syncer -> peer_set mempool -> peer_set block_verifier -> state checkpoint_verifier -> state block_verifier -> transaction_verifier - router_verifier -> block_verifier + block_verifier_router -> block_verifier rpc_server -> inbound [style=invis] // for layout of the diagram } diff --git a/zebra-consensus/src/router/tests.rs b/zebra-consensus/src/router/tests.rs index fd35b88031e..eb2abf1b2a3 100644 --- a/zebra-consensus/src/router/tests.rs +++ b/zebra-consensus/src/router/tests.rs @@ -66,14 +66,18 @@ async fn verifiers_from_network( + 'static, ) { let state_service = zs::init_test(network); - let (router_verifier, _transaction_verifier, _groth16_download_handle, _max_checkpoint_height) = - crate::router::init(Config::default(), network, state_service.clone(), true).await; + let ( + block_verifier_router, + _transaction_verifier, + _groth16_download_handle, + _max_checkpoint_height, + ) = crate::router::init(Config::default(), network, state_service.clone(), true).await; // We can drop the download task handle here, because: // - if the download task fails, the tests will panic, and // - if the download task hangs, the tests will hang. - (router_verifier, state_service) + (block_verifier_router, state_service) } static BLOCK_VERIFY_TRANSCRIPT_GENESIS: Lazy< @@ -165,15 +169,19 @@ async fn verify_checkpoint(config: Config) -> Result<(), Report> { // init_from_verifiers. // // Download task panics and timeouts are propagated to the tests that use Groth16 verifiers. - let (router_verifier, _transaction_verifier, _groth16_download_handle, _max_checkpoint_height) = - super::init(config.clone(), network, zs::init_test(network), true).await; + let ( + block_verifier_router, + _transaction_verifier, + _groth16_download_handle, + _max_checkpoint_height, + ) = super::init(config.clone(), network, zs::init_test(network), true).await; // Add a timeout layer - let router_verifier = - TimeoutLayer::new(Duration::from_secs(VERIFY_TIMEOUT_SECONDS)).layer(router_verifier); + let block_verifier_router = + TimeoutLayer::new(Duration::from_secs(VERIFY_TIMEOUT_SECONDS)).layer(block_verifier_router); let transcript = Transcript::from(BLOCK_VERIFY_TRANSCRIPT_GENESIS.iter().cloned()); - transcript.check(router_verifier).await.unwrap(); + transcript.check(block_verifier_router).await.unwrap(); Ok(()) } @@ -194,11 +202,11 @@ async fn verify_fail_no_coinbase() -> Result<(), Report> { let (router, state_service) = verifiers_from_network(Network::Mainnet).await; // Add a timeout layer - let router_verifier = + let block_verifier_router = TimeoutLayer::new(Duration::from_secs(VERIFY_TIMEOUT_SECONDS)).layer(router); let transcript = Transcript::from(NO_COINBASE_TRANSCRIPT.iter().cloned()); - transcript.check(router_verifier).await.unwrap(); + transcript.check(block_verifier_router).await.unwrap(); let transcript = Transcript::from(NO_COINBASE_STATE_TRANSCRIPT.iter().cloned()); transcript.check(state_service).await.unwrap(); @@ -216,14 +224,14 @@ async fn round_trip_checkpoint_test() -> Result<(), Report> { async fn round_trip_checkpoint() -> Result<(), Report> { let _init_guard = zebra_test::init(); - let (router_verifier, state_service) = verifiers_from_network(Network::Mainnet).await; + let (block_verifier_router, state_service) = verifiers_from_network(Network::Mainnet).await; // Add a timeout layer - let router_verifier = - TimeoutLayer::new(Duration::from_secs(VERIFY_TIMEOUT_SECONDS)).layer(router_verifier); + let block_verifier_router = + TimeoutLayer::new(Duration::from_secs(VERIFY_TIMEOUT_SECONDS)).layer(block_verifier_router); let transcript = Transcript::from(BLOCK_VERIFY_TRANSCRIPT_GENESIS.iter().cloned()); - transcript.check(router_verifier).await.unwrap(); + transcript.check(block_verifier_router).await.unwrap(); let transcript = Transcript::from(STATE_VERIFY_TRANSCRIPT_GENESIS.iter().cloned()); transcript.check(state_service).await.unwrap(); @@ -241,20 +249,26 @@ async fn verify_fail_add_block_checkpoint_test() -> Result<(), Report> { async fn verify_fail_add_block_checkpoint() -> Result<(), Report> { let _init_guard = zebra_test::init(); - let (router_verifier, state_service) = verifiers_from_network(Network::Mainnet).await; + let (block_verifier_router, state_service) = verifiers_from_network(Network::Mainnet).await; // Add a timeout layer - let router_verifier = - TimeoutLayer::new(Duration::from_secs(VERIFY_TIMEOUT_SECONDS)).layer(router_verifier); + let block_verifier_router = + TimeoutLayer::new(Duration::from_secs(VERIFY_TIMEOUT_SECONDS)).layer(block_verifier_router); let transcript = Transcript::from(BLOCK_VERIFY_TRANSCRIPT_GENESIS.iter().cloned()); - transcript.check(router_verifier.clone()).await.unwrap(); + transcript + .check(block_verifier_router.clone()) + .await + .unwrap(); let transcript = Transcript::from(STATE_VERIFY_TRANSCRIPT_GENESIS.iter().cloned()); transcript.check(state_service.clone()).await.unwrap(); let transcript = Transcript::from(BLOCK_VERIFY_TRANSCRIPT_GENESIS_FAIL.iter().cloned()); - transcript.check(router_verifier.clone()).await.unwrap(); + transcript + .check(block_verifier_router.clone()) + .await + .unwrap(); let transcript = Transcript::from(STATE_VERIFY_TRANSCRIPT_GENESIS.iter().cloned()); transcript.check(state_service.clone()).await.unwrap(); diff --git a/zebra-rpc/src/methods/get_block_template_rpcs.rs b/zebra-rpc/src/methods/get_block_template_rpcs.rs index 5afc977c16b..5f0ff5ce274 100644 --- a/zebra-rpc/src/methods/get_block_template_rpcs.rs +++ b/zebra-rpc/src/methods/get_block_template_rpcs.rs @@ -273,7 +273,7 @@ pub struct GetBlockTemplateRpcImpl< latest_chain_tip: Tip, /// The chain verifier, used for submitting blocks. - router_verifier: BlockVerifierRouter, + block_verifier_router: BlockVerifierRouter, /// The chain sync status, used for checking if Zebra is likely close to the network chain tip. sync_status: SyncStatus, @@ -319,7 +319,7 @@ where mempool: Buffer, state: State, latest_chain_tip: Tip, - router_verifier: BlockVerifierRouter, + block_verifier_router: BlockVerifierRouter, sync_status: SyncStatus, address_book: AddressBook, ) -> Self { @@ -358,7 +358,7 @@ where mempool, state, latest_chain_tip, - router_verifier, + block_verifier_router, sync_status, address_book, } @@ -454,7 +454,7 @@ where .and_then(get_block_template::JsonParameters::block_proposal_data) { return validate_block_proposal( - self.router_verifier.clone(), + self.block_verifier_router.clone(), block_proposal_bytes, network, latest_chain_tip, @@ -737,7 +737,7 @@ where HexData(block_bytes): HexData, _parameters: Option, ) -> BoxFuture> { - let mut router_verifier = self.router_verifier.clone(); + let mut block_verifier_router = self.block_verifier_router.clone(); async move { let block: Block = match block_bytes.zcash_deserialize_into() { @@ -755,7 +755,7 @@ where .unwrap_or_else(|| "invalid coinbase height".to_string()); let block_hash = block.hash(); - let router_verifier_response = router_verifier + let block_verifier_router_response = block_verifier_router .ready() .await .map_err(|error| Error { @@ -766,7 +766,7 @@ where .call(zebra_consensus::Request::Commit(Arc::new(block))) .await; - let chain_error = match router_verifier_response { + let chain_error = match block_verifier_router_response { // Currently, this match arm returns `null` (Accepted) for blocks committed // to any chain, but Accepted is only for blocks in the best chain. // diff --git a/zebra-rpc/src/methods/get_block_template_rpcs/get_block_template.rs b/zebra-rpc/src/methods/get_block_template_rpcs/get_block_template.rs index f18242b7cdd..04a3fa9c8e1 100644 --- a/zebra-rpc/src/methods/get_block_template_rpcs/get_block_template.rs +++ b/zebra-rpc/src/methods/get_block_template_rpcs/get_block_template.rs @@ -98,7 +98,7 @@ pub fn check_miner_address( /// /// Returns a `getblocktemplate` [`Response`]. pub async fn validate_block_proposal( - mut router_verifier: BlockVerifierRouter, + mut block_verifier_router: BlockVerifierRouter, block_proposal_bytes: Vec, network: Network, latest_chain_tip: Tip, @@ -129,7 +129,7 @@ where } }; - let router_verifier_response = router_verifier + let block_verifier_router_response = block_verifier_router .ready() .await .map_err(|error| Error { @@ -140,12 +140,12 @@ where .call(zebra_consensus::Request::CheckProposal(Arc::new(block))) .await; - Ok(router_verifier_response + Ok(block_verifier_router_response .map(|_hash| ProposalResponse::Valid) .unwrap_or_else(|verify_chain_error| { tracing::info!( ?verify_chain_error, - "error response from router_verifier in CheckProposal request" + "error response from block_verifier_router in CheckProposal request" ); ProposalResponse::rejected("invalid proposal", verify_chain_error) diff --git a/zebra-rpc/src/methods/tests/snapshot/get_block_template_rpcs.rs b/zebra-rpc/src/methods/tests/snapshot/get_block_template_rpcs.rs index 05e12874d61..f941b7e2771 100644 --- a/zebra-rpc/src/methods/tests/snapshot/get_block_template_rpcs.rs +++ b/zebra-rpc/src/methods/tests/snapshot/get_block_template_rpcs.rs @@ -85,7 +85,7 @@ pub async fn test_responses( >::Future: Send, { let ( - router_verifier, + block_verifier_router, _transaction_verifier, _parameter_download_task_handle, _max_checkpoint_height, @@ -145,7 +145,7 @@ pub async fn test_responses( Buffer::new(mempool.clone(), 1), read_state, mock_chain_tip.clone(), - router_verifier.clone(), + block_verifier_router.clone(), mock_sync_status.clone(), mock_address_book, ); @@ -267,7 +267,7 @@ pub async fn test_responses( Buffer::new(mempool.clone(), 1), read_state.clone(), mock_chain_tip.clone(), - router_verifier, + block_verifier_router, mock_sync_status.clone(), MockAddressBookPeers::default(), ); @@ -365,16 +365,16 @@ pub async fn test_responses( snapshot_rpc_getblocktemplate("invalid-proposal", get_block_template, None, &settings); - // the following snapshots use a mock read_state and router_verifier + // the following snapshots use a mock read_state and block_verifier_router - let mut mock_router_verifier = MockService::build().for_unit_tests(); + let mut mock_block_verifier_router = MockService::build().for_unit_tests(); let get_block_template_rpc_mock_state_verifier = GetBlockTemplateRpcImpl::new( network, mining_config, Buffer::new(mempool.clone(), 1), read_state.clone(), mock_chain_tip, - mock_router_verifier.clone(), + mock_block_verifier_router.clone(), mock_sync_status, MockAddressBookPeers::default(), ); @@ -387,15 +387,17 @@ pub async fn test_responses( }), ); - let mock_router_verifier_request_handler = async move { - mock_router_verifier + let mock_block_verifier_router_request_handler = async move { + mock_block_verifier_router .expect_request_that(|req| matches!(req, zebra_consensus::Request::CheckProposal(_))) .await .respond(Hash::from([0; 32])); }; - let (get_block_template, ..) = - tokio::join!(get_block_template_fut, mock_router_verifier_request_handler,); + let (get_block_template, ..) = tokio::join!( + get_block_template_fut, + mock_block_verifier_router_request_handler, + ); let get_block_template = get_block_template.expect("unexpected error in getblocktemplate RPC call"); diff --git a/zebra-rpc/src/methods/tests/vectors.rs b/zebra-rpc/src/methods/tests/vectors.rs index 42cc3c59213..255acafd79f 100644 --- a/zebra-rpc/src/methods/tests/vectors.rs +++ b/zebra-rpc/src/methods/tests/vectors.rs @@ -840,7 +840,7 @@ async fn rpc_getblockcount() { zebra_state::populated_state(blocks.clone(), Mainnet).await; let ( - router_verifier, + block_verifier_router, _transaction_verifier, _parameter_download_task_handle, _max_checkpoint_height, @@ -859,7 +859,7 @@ async fn rpc_getblockcount() { Buffer::new(mempool.clone(), 1), read_state, latest_chain_tip.clone(), - router_verifier, + block_verifier_router, MockSyncStatus::default(), MockAddressBookPeers::default(), ); @@ -890,7 +890,7 @@ async fn rpc_getblockcount_empty_state() { zebra_state::init_test_services(Mainnet); let ( - router_verifier, + block_verifier_router, _transaction_verifier, _parameter_download_task_handle, _max_checkpoint_height, @@ -909,7 +909,7 @@ async fn rpc_getblockcount_empty_state() { Buffer::new(mempool.clone(), 1), read_state, latest_chain_tip.clone(), - router_verifier, + block_verifier_router, MockSyncStatus::default(), MockAddressBookPeers::default(), ); @@ -942,7 +942,7 @@ async fn rpc_getpeerinfo() { zebra_state::init_test_services(Mainnet); let ( - router_verifier, + block_verifier_router, _transaction_verifier, _parameter_download_task_handle, _max_checkpoint_height, @@ -975,7 +975,7 @@ async fn rpc_getpeerinfo() { Buffer::new(mempool.clone(), 1), read_state, latest_chain_tip.clone(), - router_verifier, + block_verifier_router, MockSyncStatus::default(), mock_address_book, ); @@ -1017,7 +1017,7 @@ async fn rpc_getblockhash() { zebra_state::populated_state(blocks.clone(), Mainnet).await; let ( - router_verifier, + block_verifier_router, _transaction_verifier, _parameter_download_task_handle, _max_checkpoint_height, @@ -1036,7 +1036,7 @@ async fn rpc_getblockhash() { Buffer::new(mempool.clone(), 1), read_state, latest_chain_tip.clone(), - tower::ServiceBuilder::new().service(router_verifier), + tower::ServiceBuilder::new().service(block_verifier_router), MockSyncStatus::default(), MockAddressBookPeers::default(), ); @@ -1205,7 +1205,7 @@ async fn rpc_getblocktemplate_mining_address(use_p2pkh: bool) { let mut mempool: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); let read_state = MockService::build().for_unit_tests(); - let router_verifier = MockService::build().for_unit_tests(); + let block_verifier_router = MockService::build().for_unit_tests(); let mut mock_sync_status = MockSyncStatus::default(); mock_sync_status.set_is_close_to_tip(true); @@ -1246,7 +1246,7 @@ async fn rpc_getblocktemplate_mining_address(use_p2pkh: bool) { Buffer::new(mempool.clone(), 1), read_state.clone(), mock_chain_tip, - router_verifier, + block_verifier_router, mock_sync_status.clone(), MockAddressBookPeers::default(), ); @@ -1491,7 +1491,7 @@ async fn rpc_submitblock_errors() { // Init RPCs let ( - router_verifier, + block_verifier_router, _transaction_verifier, _parameter_download_task_handle, _max_checkpoint_height, @@ -1510,7 +1510,7 @@ async fn rpc_submitblock_errors() { Buffer::new(mempool.clone(), 1), read_state, latest_chain_tip.clone(), - router_verifier, + block_verifier_router, MockSyncStatus::default(), MockAddressBookPeers::default(), ); @@ -1658,7 +1658,7 @@ async fn rpc_getdifficulty() { let mempool: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); let read_state = MockService::build().for_unit_tests(); - let router_verifier = MockService::build().for_unit_tests(); + let block_verifier_router = MockService::build().for_unit_tests(); let mut mock_sync_status = MockSyncStatus::default(); mock_sync_status.set_is_close_to_tip(true); @@ -1693,7 +1693,7 @@ async fn rpc_getdifficulty() { Buffer::new(mempool.clone(), 1), read_state.clone(), mock_chain_tip, - router_verifier, + block_verifier_router, mock_sync_status.clone(), MockAddressBookPeers::default(), ); diff --git a/zebra-rpc/src/server.rs b/zebra-rpc/src/server.rs index 3f02b64def6..d1bdd7ed920 100644 --- a/zebra-rpc/src/server.rs +++ b/zebra-rpc/src/server.rs @@ -105,7 +105,7 @@ impl RpcServer { mempool: Buffer, state: State, #[cfg_attr(not(feature = "getblocktemplate-rpcs"), allow(unused_variables))] - router_verifier: BlockVerifierRouter, + block_verifier_router: BlockVerifierRouter, #[cfg_attr(not(feature = "getblocktemplate-rpcs"), allow(unused_variables))] sync_status: SyncStatus, #[cfg_attr(not(feature = "getblocktemplate-rpcs"), allow(unused_variables))] @@ -171,7 +171,7 @@ impl RpcServer { mempool.clone(), state.clone(), latest_chain_tip.clone(), - router_verifier, + block_verifier_router, sync_status, address_book, ); diff --git a/zebra-rpc/src/server/tests/vectors.rs b/zebra-rpc/src/server/tests/vectors.rs index 91c6cffe861..2622b84ba86 100644 --- a/zebra-rpc/src/server/tests/vectors.rs +++ b/zebra-rpc/src/server/tests/vectors.rs @@ -52,7 +52,7 @@ fn rpc_server_spawn(parallel_cpu_threads: bool) { rt.block_on(async { let mut mempool: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); let mut state: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); - let mut router_verifier: MockService<_, _, _, BoxError> = + let mut block_verifier_router: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); info!("spawning RPC server..."); @@ -64,7 +64,7 @@ fn rpc_server_spawn(parallel_cpu_threads: bool) { "RPC server test", Buffer::new(mempool.clone(), 1), Buffer::new(state.clone(), 1), - Buffer::new(router_verifier.clone(), 1), + Buffer::new(block_verifier_router.clone(), 1), MockSyncStatus::default(), MockAddressBookPeers::default(), NoChainTip, @@ -75,7 +75,7 @@ fn rpc_server_spawn(parallel_cpu_threads: bool) { mempool.expect_no_requests().await; state.expect_no_requests().await; - router_verifier.expect_no_requests().await; + block_verifier_router.expect_no_requests().await; // The server and queue tasks should continue without errors or panics let rpc_server_task_result = rpc_server_task_handle.now_or_never(); @@ -139,7 +139,7 @@ fn rpc_server_spawn_unallocated_port(parallel_cpu_threads: bool, do_shutdown: bo rt.block_on(async { let mut mempool: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); let mut state: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); - let mut router_verifier: MockService<_, _, _, BoxError> = + let mut block_verifier_router: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); info!("spawning RPC server..."); @@ -151,7 +151,7 @@ fn rpc_server_spawn_unallocated_port(parallel_cpu_threads: bool, do_shutdown: bo "RPC server test", Buffer::new(mempool.clone(), 1), Buffer::new(state.clone(), 1), - Buffer::new(router_verifier.clone(), 1), + Buffer::new(block_verifier_router.clone(), 1), MockSyncStatus::default(), MockAddressBookPeers::default(), NoChainTip, @@ -162,7 +162,7 @@ fn rpc_server_spawn_unallocated_port(parallel_cpu_threads: bool, do_shutdown: bo mempool.expect_no_requests().await; state.expect_no_requests().await; - router_verifier.expect_no_requests().await; + block_verifier_router.expect_no_requests().await; if do_shutdown { rpc_server @@ -219,7 +219,7 @@ fn rpc_server_spawn_port_conflict() { let test_task_handle = rt.spawn(async { let mut mempool: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); let mut state: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); - let mut router_verifier: MockService<_, _, _, BoxError> = + let mut block_verifier_router: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); info!("spawning RPC server 1..."); @@ -232,7 +232,7 @@ fn rpc_server_spawn_port_conflict() { "RPC server 1 test", Buffer::new(mempool.clone(), 1), Buffer::new(state.clone(), 1), - Buffer::new(router_verifier.clone(), 1), + Buffer::new(block_verifier_router.clone(), 1), MockSyncStatus::default(), MockAddressBookPeers::default(), NoChainTip, @@ -250,7 +250,7 @@ fn rpc_server_spawn_port_conflict() { "RPC server 2 conflict test", Buffer::new(mempool.clone(), 1), Buffer::new(state.clone(), 1), - Buffer::new(router_verifier.clone(), 1), + Buffer::new(block_verifier_router.clone(), 1), MockSyncStatus::default(), MockAddressBookPeers::default(), NoChainTip, @@ -261,7 +261,7 @@ fn rpc_server_spawn_port_conflict() { mempool.expect_no_requests().await; state.expect_no_requests().await; - router_verifier.expect_no_requests().await; + block_verifier_router.expect_no_requests().await; // Because there is a panic inside a multi-threaded executor, // we can't depend on the exact behaviour of the other tasks, @@ -329,7 +329,7 @@ fn rpc_server_spawn_port_conflict_parallel_auto() { let test_task_handle = rt.spawn(async { let mut mempool: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); let mut state: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); - let mut router_verifier: MockService<_, _, _, BoxError> = + let mut block_verifier_router: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); info!("spawning parallel RPC server 1..."); @@ -342,7 +342,7 @@ fn rpc_server_spawn_port_conflict_parallel_auto() { "RPC server 1 test", Buffer::new(mempool.clone(), 1), Buffer::new(state.clone(), 1), - Buffer::new(router_verifier.clone(), 1), + Buffer::new(block_verifier_router.clone(), 1), MockSyncStatus::default(), MockAddressBookPeers::default(), NoChainTip, @@ -360,7 +360,7 @@ fn rpc_server_spawn_port_conflict_parallel_auto() { "RPC server 2 conflict test", Buffer::new(mempool.clone(), 1), Buffer::new(state.clone(), 1), - Buffer::new(router_verifier.clone(), 1), + Buffer::new(block_verifier_router.clone(), 1), MockSyncStatus::default(), MockAddressBookPeers::default(), NoChainTip, @@ -371,7 +371,7 @@ fn rpc_server_spawn_port_conflict_parallel_auto() { mempool.expect_no_requests().await; state.expect_no_requests().await; - router_verifier.expect_no_requests().await; + block_verifier_router.expect_no_requests().await; // Because there might be a panic inside a multi-threaded executor, // we can't depend on the exact behaviour of the other tasks, diff --git a/zebrad/src/commands/start.rs b/zebrad/src/commands/start.rs index 256c77ef09b..3e8e18230ec 100644 --- a/zebrad/src/commands/start.rs +++ b/zebrad/src/commands/start.rs @@ -158,7 +158,7 @@ impl StartCmd { .await; info!("initializing verifiers"); - let (router_verifier, tx_verifier, consensus_task_handles, max_checkpoint_height) = + let (block_verifier_router, tx_verifier, consensus_task_handles, max_checkpoint_height) = zebra_consensus::router::init( config.consensus.clone(), config.network.network, @@ -172,7 +172,7 @@ impl StartCmd { &config, max_checkpoint_height, peer_set.clone(), - router_verifier.clone(), + block_verifier_router.clone(), state.clone(), latest_chain_tip.clone(), ); @@ -197,7 +197,7 @@ impl StartCmd { let setup_data = InboundSetupData { address_book: address_book.clone(), block_download_peer_set: peer_set.clone(), - block_verifier: router_verifier.clone(), + block_verifier: block_verifier_router.clone(), mempool: mempool.clone(), state, latest_chain_tip: latest_chain_tip.clone(), @@ -219,7 +219,7 @@ impl StartCmd { user_agent(), mempool.clone(), read_only_state_service, - router_verifier, + block_verifier_router, sync_status.clone(), address_book, latest_chain_tip.clone(), diff --git a/zebrad/src/components/sync/tests/vectors.rs b/zebrad/src/components/sync/tests/vectors.rs index a9155184240..3a656904ef0 100644 --- a/zebrad/src/components/sync/tests/vectors.rs +++ b/zebrad/src/components/sync/tests/vectors.rs @@ -45,7 +45,7 @@ async fn sync_blocks_ok() -> Result<(), crate::BoxError> { let ( chain_sync_future, _sync_status, - mut router_verifier, + mut block_verifier_router, mut peer_set, mut state_service, _mock_chain_tip_sender, @@ -88,7 +88,7 @@ async fn sync_blocks_ok() -> Result<(), crate::BoxError> { .await .respond(zn::Response::Blocks(vec![Available(block0.clone())])); - router_verifier + block_verifier_router .expect_request(zebra_consensus::Request::Commit(block0)) .await .respond(block0_hash); @@ -96,7 +96,7 @@ async fn sync_blocks_ok() -> Result<(), crate::BoxError> { // Check that nothing unexpected happened. // We expect more requests to the state service, because the syncer keeps on running. peer_set.expect_no_requests().await; - router_verifier.expect_no_requests().await; + block_verifier_router.expect_no_requests().await; // State is checked for genesis again state_service @@ -144,7 +144,7 @@ async fn sync_blocks_ok() -> Result<(), crate::BoxError> { // Check that nothing unexpected happened. peer_set.expect_no_requests().await; - router_verifier.expect_no_requests().await; + block_verifier_router.expect_no_requests().await; // State is checked for all non-tip blocks (blocks 1 & 2) in response order state_service @@ -174,7 +174,7 @@ async fn sync_blocks_ok() -> Result<(), crate::BoxError> { .collect(); for _ in 1..=2 { - router_verifier + block_verifier_router .expect_request_that(|req| remaining_blocks.remove(&req.block().hash()).is_some()) .await .respond_with(|req| req.block().hash()); @@ -186,7 +186,7 @@ async fn sync_blocks_ok() -> Result<(), crate::BoxError> { ); // Check that nothing unexpected happened. - router_verifier.expect_no_requests().await; + block_verifier_router.expect_no_requests().await; state_service.expect_no_requests().await; // ChainSync::extend_tips @@ -217,7 +217,7 @@ async fn sync_blocks_ok() -> Result<(), crate::BoxError> { } // Check that nothing unexpected happened. - router_verifier.expect_no_requests().await; + block_verifier_router.expect_no_requests().await; state_service.expect_no_requests().await; // Blocks 3 & 4 are fetched in order, then verified concurrently @@ -238,7 +238,7 @@ async fn sync_blocks_ok() -> Result<(), crate::BoxError> { .collect(); for _ in 3..=4 { - router_verifier + block_verifier_router .expect_request_that(|req| remaining_blocks.remove(&req.block().hash()).is_some()) .await .respond_with(|req| req.block().hash()); @@ -250,7 +250,7 @@ async fn sync_blocks_ok() -> Result<(), crate::BoxError> { ); // Check that nothing unexpected happened. - router_verifier.expect_no_requests().await; + block_verifier_router.expect_no_requests().await; state_service.expect_no_requests().await; let chain_sync_result = chain_sync_task_handle.now_or_never(); @@ -272,7 +272,7 @@ async fn sync_blocks_duplicate_hashes_ok() -> Result<(), crate::BoxError> { let ( chain_sync_future, _sync_status, - mut router_verifier, + mut block_verifier_router, mut peer_set, mut state_service, _mock_chain_tip_sender, @@ -315,7 +315,7 @@ async fn sync_blocks_duplicate_hashes_ok() -> Result<(), crate::BoxError> { .await .respond(zn::Response::Blocks(vec![Available(block0.clone())])); - router_verifier + block_verifier_router .expect_request(zebra_consensus::Request::Commit(block0)) .await .respond(block0_hash); @@ -323,7 +323,7 @@ async fn sync_blocks_duplicate_hashes_ok() -> Result<(), crate::BoxError> { // Check that nothing unexpected happened. // We expect more requests to the state service, because the syncer keeps on running. peer_set.expect_no_requests().await; - router_verifier.expect_no_requests().await; + block_verifier_router.expect_no_requests().await; // State is checked for genesis again state_service @@ -373,7 +373,7 @@ async fn sync_blocks_duplicate_hashes_ok() -> Result<(), crate::BoxError> { // Check that nothing unexpected happened. peer_set.expect_no_requests().await; - router_verifier.expect_no_requests().await; + block_verifier_router.expect_no_requests().await; // State is checked for all non-tip blocks (blocks 1 & 2) in response order state_service @@ -403,7 +403,7 @@ async fn sync_blocks_duplicate_hashes_ok() -> Result<(), crate::BoxError> { .collect(); for _ in 1..=2 { - router_verifier + block_verifier_router .expect_request_that(|req| remaining_blocks.remove(&req.block().hash()).is_some()) .await .respond_with(|req| req.block().hash()); @@ -415,7 +415,7 @@ async fn sync_blocks_duplicate_hashes_ok() -> Result<(), crate::BoxError> { ); // Check that nothing unexpected happened. - router_verifier.expect_no_requests().await; + block_verifier_router.expect_no_requests().await; state_service.expect_no_requests().await; // ChainSync::extend_tips @@ -448,7 +448,7 @@ async fn sync_blocks_duplicate_hashes_ok() -> Result<(), crate::BoxError> { } // Check that nothing unexpected happened. - router_verifier.expect_no_requests().await; + block_verifier_router.expect_no_requests().await; state_service.expect_no_requests().await; // Blocks 3 & 4 are fetched in order, then verified concurrently @@ -469,7 +469,7 @@ async fn sync_blocks_duplicate_hashes_ok() -> Result<(), crate::BoxError> { .collect(); for _ in 3..=4 { - router_verifier + block_verifier_router .expect_request_that(|req| remaining_blocks.remove(&req.block().hash()).is_some()) .await .respond_with(|req| req.block().hash()); @@ -481,7 +481,7 @@ async fn sync_blocks_duplicate_hashes_ok() -> Result<(), crate::BoxError> { ); // Check that nothing unexpected happened. - router_verifier.expect_no_requests().await; + block_verifier_router.expect_no_requests().await; state_service.expect_no_requests().await; let chain_sync_result = chain_sync_task_handle.now_or_never(); @@ -500,7 +500,7 @@ async fn sync_block_lookahead_drop() -> Result<(), crate::BoxError> { let ( chain_sync_future, _sync_status, - mut router_verifier, + mut block_verifier_router, mut peer_set, mut state_service, _mock_chain_tip_sender, @@ -535,7 +535,7 @@ async fn sync_block_lookahead_drop() -> Result<(), crate::BoxError> { // Block is dropped because it is too far ahead of the tip. // We expect more requests to the state service, because the syncer keeps on running. peer_set.expect_no_requests().await; - router_verifier.expect_no_requests().await; + block_verifier_router.expect_no_requests().await; let chain_sync_result = chain_sync_task_handle.now_or_never(); assert!( @@ -555,7 +555,7 @@ async fn sync_block_too_high_obtain_tips() -> Result<(), crate::BoxError> { let ( chain_sync_future, _sync_status, - mut router_verifier, + mut block_verifier_router, mut peer_set, mut state_service, _mock_chain_tip_sender, @@ -597,7 +597,7 @@ async fn sync_block_too_high_obtain_tips() -> Result<(), crate::BoxError> { .await .respond(zn::Response::Blocks(vec![Available(block0.clone())])); - router_verifier + block_verifier_router .expect_request(zebra_consensus::Request::Commit(block0)) .await .respond(block0_hash); @@ -605,7 +605,7 @@ async fn sync_block_too_high_obtain_tips() -> Result<(), crate::BoxError> { // Check that nothing unexpected happened. // We expect more requests to the state service, because the syncer keeps on running. peer_set.expect_no_requests().await; - router_verifier.expect_no_requests().await; + block_verifier_router.expect_no_requests().await; // State is checked for genesis again state_service @@ -654,7 +654,7 @@ async fn sync_block_too_high_obtain_tips() -> Result<(), crate::BoxError> { // Check that nothing unexpected happened. peer_set.expect_no_requests().await; - router_verifier.expect_no_requests().await; + block_verifier_router.expect_no_requests().await; // State is checked for all non-tip blocks (blocks 982k, 1, 2) in response order state_service @@ -710,7 +710,7 @@ async fn sync_block_too_high_extend_tips() -> Result<(), crate::BoxError> { let ( chain_sync_future, _sync_status, - mut router_verifier, + mut block_verifier_router, mut peer_set, mut state_service, _mock_chain_tip_sender, @@ -758,7 +758,7 @@ async fn sync_block_too_high_extend_tips() -> Result<(), crate::BoxError> { .await .respond(zn::Response::Blocks(vec![Available(block0.clone())])); - router_verifier + block_verifier_router .expect_request(zebra_consensus::Request::Commit(block0)) .await .respond(block0_hash); @@ -766,7 +766,7 @@ async fn sync_block_too_high_extend_tips() -> Result<(), crate::BoxError> { // Check that nothing unexpected happened. // We expect more requests to the state service, because the syncer keeps on running. peer_set.expect_no_requests().await; - router_verifier.expect_no_requests().await; + block_verifier_router.expect_no_requests().await; // State is checked for genesis again state_service @@ -814,7 +814,7 @@ async fn sync_block_too_high_extend_tips() -> Result<(), crate::BoxError> { // Check that nothing unexpected happened. peer_set.expect_no_requests().await; - router_verifier.expect_no_requests().await; + block_verifier_router.expect_no_requests().await; // State is checked for all non-tip blocks (blocks 1 & 2) in response order state_service @@ -844,7 +844,7 @@ async fn sync_block_too_high_extend_tips() -> Result<(), crate::BoxError> { .collect(); for _ in 1..=2 { - router_verifier + block_verifier_router .expect_request_that(|req| remaining_blocks.remove(&req.block().hash()).is_some()) .await .respond_with(|req| req.block().hash()); @@ -856,7 +856,7 @@ async fn sync_block_too_high_extend_tips() -> Result<(), crate::BoxError> { ); // Check that nothing unexpected happened. - router_verifier.expect_no_requests().await; + block_verifier_router.expect_no_requests().await; state_service.expect_no_requests().await; // ChainSync::extend_tips @@ -888,7 +888,7 @@ async fn sync_block_too_high_extend_tips() -> Result<(), crate::BoxError> { } // Check that nothing unexpected happened. - router_verifier.expect_no_requests().await; + block_verifier_router.expect_no_requests().await; state_service.expect_no_requests().await; // Blocks 3, 4, 982k are fetched in order, then verified concurrently, @@ -951,7 +951,7 @@ fn setup() -> ( .with_max_request_delay(MAX_SERVICE_REQUEST_DELAY) .for_unit_tests(); - let router_verifier = MockService::build() + let block_verifier_router = MockService::build() .with_max_request_delay(MAX_SERVICE_REQUEST_DELAY) .for_unit_tests(); @@ -965,7 +965,7 @@ fn setup() -> ( &config, Height(0), peer_set.clone(), - router_verifier.clone(), + block_verifier_router.clone(), state_service.clone(), mock_chain_tip, ); @@ -975,7 +975,7 @@ fn setup() -> ( ( chain_sync_future, sync_status, - router_verifier, + block_verifier_router, peer_set, state_service, mock_chain_tip_sender, From ac4a34231b412399383398c33d72692601cd50bd Mon Sep 17 00:00:00 2001 From: teor Date: Tue, 20 Jun 2023 21:04:32 +1000 Subject: [PATCH 128/265] Change network upgrade wording in release-checklist.md (#7010) --- .github/PULL_REQUEST_TEMPLATE/release-checklist.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/PULL_REQUEST_TEMPLATE/release-checklist.md b/.github/PULL_REQUEST_TEMPLATE/release-checklist.md index 4a97b69104e..f18de3d5195 100644 --- a/.github/PULL_REQUEST_TEMPLATE/release-checklist.md +++ b/.github/PULL_REQUEST_TEMPLATE/release-checklist.md @@ -40,7 +40,7 @@ These release steps can be done a few days before the release, in the same PR: Zebra follows [semantic versioning](https://semver.org). Semantic versions look like: MAJOR.MINOR.PATCH[-TAG.PRE-RELEASE] Choose a release level for `zebrad` based on the changes in the release that users will see: -- mainnet network upgrades are `major` releases +- Mainnet Network Upgrades are `major` releases - new features, large changes, deprecations, and removals are `minor` releases - otherwise, it is a `patch` release From 067e32b3806c431ae53756dbaaa9f1a077ba95fa Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 20 Jun 2023 15:19:36 +0000 Subject: [PATCH 129/265] build(deps): bump tj-actions/changed-files from 36.3.0 to 36.4.0 (#7004) Bumps [tj-actions/changed-files](https://github.com/tj-actions/changed-files) from 36.3.0 to 36.4.0. - [Release notes](https://github.com/tj-actions/changed-files/releases) - [Changelog](https://github.com/tj-actions/changed-files/blob/main/HISTORY.md) - [Commits](https://github.com/tj-actions/changed-files/compare/v36.3.0...v36.4.0) --- updated-dependencies: - dependency-name: tj-actions/changed-files dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/lint.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 874bd5ac685..d2f56cbd1f9 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -37,7 +37,7 @@ jobs: - name: Rust files id: changed-files-rust - uses: tj-actions/changed-files@v36.3.0 + uses: tj-actions/changed-files@v36.4.0 with: files: | **/*.rs @@ -49,7 +49,7 @@ jobs: - name: Workflow files id: changed-files-workflows - uses: tj-actions/changed-files@v36.3.0 + uses: tj-actions/changed-files@v36.4.0 with: files: | .github/workflows/*.yml From 44b7a8bde5d008683c1838d5ad71a6be5db72261 Mon Sep 17 00:00:00 2001 From: Deirdre Connolly Date: Tue, 20 Jun 2023 18:36:00 -0400 Subject: [PATCH 130/265] change(rename): Update missed tower-batch-control renames (#7011) --- .github/CODEOWNERS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index d53cdbc8b00..4275337d52a 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -14,7 +14,7 @@ /zebrad/src/commands/start.rs @ZcashFoundation/general-rust-reviewers # Network and Async Code -/tower-batch/ @ZcashFoundation/network-reviewers +/tower-batch-control/ @ZcashFoundation/network-reviewers /tower-fallback/ @ZcashFoundation/network-reviewers /zebra-network/ @ZcashFoundation/network-reviewers /zebra-node-services/ @ZcashFoundation/network-reviewers From d8c29809f4e3265907041697173ccaca01519f99 Mon Sep 17 00:00:00 2001 From: teor Date: Wed, 21 Jun 2023 20:17:26 +1000 Subject: [PATCH 131/265] Refactor terminal color checks, fix force_color on panic logs (#6997) --- zebrad/src/application.rs | 10 ++-------- zebrad/src/components/tracing.rs | 15 +++++++++++++++ zebrad/src/components/tracing/component.rs | 9 ++++----- 3 files changed, 21 insertions(+), 13 deletions(-) diff --git a/zebrad/src/application.rs b/zebrad/src/application.rs index 68134f7d878..d7fb3356dd1 100644 --- a/zebrad/src/application.rs +++ b/zebrad/src/application.rs @@ -166,11 +166,6 @@ pub struct ZebradApp { } impl ZebradApp { - /// Are standard output and standard error both connected to ttys? - fn outputs_are_ttys() -> bool { - atty::is(atty::Stream::Stdout) && atty::is(atty::Stream::Stderr) - } - /// Returns the git commit for this build, if available. /// /// @@ -215,8 +210,7 @@ impl Application for ZebradApp { // of the `color_eyre::install` part of `Terminal::new` without // ColorChoice::Never? - // The Tracing component uses stdout directly and will apply colors - // `if Self::outputs_are_ttys() && config.tracing.use_colors` + // The Tracing component uses stdout directly and will apply colors automatically. // // Note: It's important to use `ColorChoice::Never` here to avoid panicking in // `register_components()` below if `color_eyre::install()` is called @@ -257,7 +251,7 @@ impl Application for ZebradApp { let config = command.process_config(config)?; - let theme = if Self::outputs_are_ttys() && config.tracing.use_color { + let theme = if config.tracing.use_color_stdout_and_stderr() { color_eyre::config::Theme::dark() } else { color_eyre::config::Theme::new() diff --git a/zebrad/src/components/tracing.rs b/zebrad/src/components/tracing.rs index 3aa27f4c756..d12491f87a7 100644 --- a/zebrad/src/components/tracing.rs +++ b/zebrad/src/components/tracing.rs @@ -131,6 +131,21 @@ pub struct Config { pub use_journald: bool, } +impl Config { + /// Returns `true` if standard output should use color escapes. + /// Automatically checks if Zebra is running in a terminal. + pub fn use_color_stdout(&self) -> bool { + self.force_use_color || (self.use_color && atty::is(atty::Stream::Stdout)) + } + + /// Returns `true` if output that could go to standard output or standard error + /// should use color escapes. Automatically checks if Zebra is running in a terminal. + pub fn use_color_stdout_and_stderr(&self) -> bool { + self.force_use_color + || (self.use_color && atty::is(atty::Stream::Stdout) && atty::is(atty::Stream::Stderr)) + } +} + impl Default for Config { fn default() -> Self { #[cfg(feature = "progress-bar")] diff --git a/zebrad/src/components/tracing/component.rs b/zebrad/src/components/tracing/component.rs index 8f32e0943a5..4a3a4560eb3 100644 --- a/zebrad/src/components/tracing/component.rs +++ b/zebrad/src/components/tracing/component.rs @@ -54,6 +54,10 @@ impl Tracing { /// Try to create a new [`Tracing`] component with the given `filter`. #[allow(clippy::print_stdout, clippy::print_stderr)] pub fn new(config: Config) -> Result { + // Only use color if tracing output is being sent to a terminal or if it was explicitly + // forced to. + let use_color = config.use_color_stdout(); + let filter = config.filter.unwrap_or_default(); let flame_root = &config.flamegraph; @@ -98,11 +102,6 @@ impl Tracing { .buffered_lines_limit(config.buffer_limit.max(100)) .finish(writer); - // Only use color if tracing output is being sent to a terminal or if it was explicitly - // forced to. - let use_color = - config.force_use_color || (config.use_color && atty::is(atty::Stream::Stdout)); - // Construct a format subscriber with the supplied global logging filter, // and optionally enable reloading. // From 17e14d9349a2196598c45128cc8db886eb251ad1 Mon Sep 17 00:00:00 2001 From: teor Date: Wed, 21 Jun 2023 22:38:35 +1000 Subject: [PATCH 132/265] Use correct cargo release manifest key name (#7028) Avoids an "unused manifest key: metadata" warning --- zebrad/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/zebrad/Cargo.toml b/zebrad/Cargo.toml index 9fb96712c70..cfeef6df271 100644 --- a/zebrad/Cargo.toml +++ b/zebrad/Cargo.toml @@ -28,7 +28,7 @@ rust-version = "1.66" default-run = "zebrad" # `cargo release` settings -[metadata.release] +[package.metadata.release] pre-release-replacements = [ {file="../book/src/user/install.md", search="git checkout [a-z0-9\\.-]+", replace="git checkout {{version}}"}, {file="../book/src/user/install.md", search="--tag [a-z0-9\\.-]+", replace="--tag {{version}}"}, From 343a683ceaf5f409f6fbfcfef51fbcf0debb77cd Mon Sep 17 00:00:00 2001 From: teor Date: Thu, 22 Jun 2023 01:02:05 +1000 Subject: [PATCH 133/265] cleanup(test): Make test debugging output more readable (#7027) * Fix some debug impls to use hex rather than u8 arrays * Hide extremely long debug data in proptests --- zebra-chain/src/fmt.rs | 2 +- zebra-chain/src/sprout/joinsplit.rs | 21 +++++++++++++++++++-- zebra-chain/src/transaction/joinsplit.rs | 14 +++++++++++++- zebrad/src/components/mempool/tests/prop.rs | 4 ++-- 4 files changed, 35 insertions(+), 6 deletions(-) diff --git a/zebra-chain/src/fmt.rs b/zebra-chain/src/fmt.rs index 800663147b6..98923446c99 100644 --- a/zebra-chain/src/fmt.rs +++ b/zebra-chain/src/fmt.rs @@ -162,7 +162,7 @@ where } /// Wrapper to override `Debug`, redirecting it to hex-encode the type. -/// The type must be hex-encodable. +/// The type must implement `AsRef<[u8]>`. #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] #[cfg_attr(any(test, feature = "proptest-impl"), derive(Arbitrary))] #[serde(transparent)] diff --git a/zebra-chain/src/sprout/joinsplit.rs b/zebra-chain/src/sprout/joinsplit.rs index 059ac4be5a0..ca891e5f892 100644 --- a/zebra-chain/src/sprout/joinsplit.rs +++ b/zebra-chain/src/sprout/joinsplit.rs @@ -1,6 +1,6 @@ //! Sprout funds transfers using [`JoinSplit`]s. -use std::io; +use std::{fmt, io}; use serde::{Deserialize, Serialize}; @@ -49,7 +49,7 @@ impl From<&RandomSeed> for [u8; 32] { /// A _JoinSplit Description_, as described in [protocol specification §7.2][ps]. /// /// [ps]: https://zips.z.cash/protocol/protocol.pdf#joinsplitencoding -#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)] +#[derive(PartialEq, Eq, Clone, Serialize, Deserialize)] pub struct JoinSplit { /// A value that the JoinSplit transfer removes from the transparent value /// pool. @@ -81,6 +81,23 @@ pub struct JoinSplit { pub enc_ciphertexts: [note::EncryptedNote; 2], } +impl fmt::Debug for JoinSplit

{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("JoinSplit") + .field("vpub_old", &self.vpub_old) + .field("vpub_new", &self.vpub_new) + .field("anchor", &self.anchor) + .field("nullifiers", &self.nullifiers) + .field("commitments", &self.commitments) + .field("ephemeral_key", &HexDebug(self.ephemeral_key.as_bytes())) + .field("random_seed", &self.random_seed) + .field("vmacs", &self.vmacs) + .field("zkproof", &self.zkproof) + .field("enc_ciphertexts", &self.enc_ciphertexts) + .finish() + } +} + impl ZcashSerialize for JoinSplit

{ fn zcash_serialize(&self, mut writer: W) -> Result<(), io::Error> { self.vpub_old.zcash_serialize(&mut writer)?; diff --git a/zebra-chain/src/transaction/joinsplit.rs b/zebra-chain/src/transaction/joinsplit.rs index 0735bb6e3bc..80103b16e5e 100644 --- a/zebra-chain/src/transaction/joinsplit.rs +++ b/zebra-chain/src/transaction/joinsplit.rs @@ -4,6 +4,7 @@ use serde::{Deserialize, Serialize}; use crate::{ amount::{self, Amount, NegativeAllowed}, + fmt::HexDebug, primitives::{ed25519, ZkSnarkProof}, sprout::{self, JoinSplit, Nullifier}, }; @@ -16,7 +17,7 @@ use crate::{ /// description with the required signature data, so that an /// `Option` correctly models the presence or absence of any /// JoinSplit data. -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +#[derive(Clone, PartialEq, Eq, Serialize, Deserialize)] pub struct JoinSplitData { /// The first JoinSplit description in the transaction, /// using proofs of type `P`. @@ -48,6 +49,17 @@ pub struct JoinSplitData { pub sig: ed25519::Signature, } +impl fmt::Debug for JoinSplitData

{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("JoinSplitData") + .field("first", &self.first) + .field("rest", &self.rest) + .field("pub_key", &self.pub_key) + .field("sig", &HexDebug(&self.sig.to_bytes())) + .finish() + } +} + impl fmt::Display for JoinSplitData

{ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let mut fmter = diff --git a/zebrad/src/components/mempool/tests/prop.rs b/zebrad/src/components/mempool/tests/prop.rs index 46e85b35e14..0b0a35cd19e 100644 --- a/zebrad/src/components/mempool/tests/prop.rs +++ b/zebrad/src/components/mempool/tests/prop.rs @@ -11,7 +11,7 @@ use tower::{buffer::Buffer, util::BoxService}; use zebra_chain::{ block::{self, Block}, - fmt::DisplayToDebug, + fmt::{DisplayToDebug, TypeNameToDebug}, parameters::{Network, NetworkUpgrade}, serialization::ZcashDeserializeInto, transaction::VerifiedUnminedTx, @@ -103,7 +103,7 @@ proptest! { network in any::(), mut previous_chain_tip in any::>(), mut transactions in vec(any::>(), 0..CHAIN_LENGTH), - fake_chain_tips in vec(any::>(), 0..CHAIN_LENGTH), + fake_chain_tips in vec(any::>(), 0..CHAIN_LENGTH), ) { let (runtime, _init_guard) = zebra_test::init_async(); From 006c2ae42b446201ae3920e975bfd98b34c461e1 Mon Sep 17 00:00:00 2001 From: Marek Date: Wed, 21 Jun 2023 18:58:11 +0200 Subject: [PATCH 134/265] change(state): Refactor the structure of verified blocks (#7025) * Refactor `CheckpointVerifiedBlock` This commit turns `CheckpointVerifiedBlock` into a wrapper of `SemanticallyVerifiedBlock` since both structs have the same fields. * Refactor `ContextuallyVerifiedBlockWithTrees` This commit uses `SemanticallyVerifiedBlock` in `ContextuallyVerifiedBlockWithTrees` instead of `CheckpointVerifiedBlock`. --- zebra-state/src/arbitrary.rs | 4 +- zebra-state/src/request.rs | 113 ++++++++++-------- zebra-state/src/service/chain_tip.rs | 6 +- zebra-state/src/service/finalized_state.rs | 2 +- .../service/finalized_state/zebra_db/block.rs | 17 ++- .../service/finalized_state/zebra_db/chain.rs | 11 +- .../finalized_state/zebra_db/shielded.rs | 15 ++- .../finalized_state/zebra_db/transparent.rs | 6 +- .../non_finalized_state/tests/vectors.rs | 4 +- 9 files changed, 95 insertions(+), 83 deletions(-) diff --git a/zebra-state/src/arbitrary.rs b/zebra-state/src/arbitrary.rs index 2a8eaa5eae8..9f87c749c98 100644 --- a/zebra-state/src/arbitrary.rs +++ b/zebra-state/src/arbitrary.rs @@ -186,12 +186,12 @@ impl CheckpointVerifiedBlock { let new_outputs = transparent::new_ordered_outputs_with_height(&block, height, &transaction_hashes); - Self { + Self(SemanticallyVerifiedBlock { block, hash, height, new_outputs, transaction_hashes, - } + }) } } diff --git a/zebra-state/src/request.rs b/zebra-state/src/request.rs index dbff91022e6..5c1516886e8 100644 --- a/zebra-state/src/request.rs +++ b/zebra-state/src/request.rs @@ -2,7 +2,7 @@ use std::{ collections::{HashMap, HashSet}, - ops::RangeInclusive, + ops::{Deref, DerefMut, RangeInclusive}, sync::Arc, }; @@ -162,6 +162,17 @@ pub struct SemanticallyVerifiedBlock { pub transaction_hashes: Arc<[transaction::Hash]>, } +/// A block ready to be committed directly to the finalized state with +/// no checks. +/// +/// This is exposed for use in checkpointing. +/// +/// Note: The difference between a `CheckpointVerifiedBlock` and a `ContextuallyVerifiedBlock` is +/// that the `CheckpointVerifier` doesn't bind the transaction authorizing data to the +/// `ChainHistoryBlockTxAuthCommitmentHash`, but the `NonFinalizedState` and `FinalizedState` do. +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct CheckpointVerifiedBlock(pub(crate) SemanticallyVerifiedBlock); + // Some fields are pub(crate), so we can add whatever db-format-dependent // precomputation we want here without leaking internal details. @@ -211,36 +222,6 @@ pub struct ContextuallyVerifiedBlock { pub(crate) chain_value_pool_change: ValueBalance, } -/// A block ready to be committed directly to the finalized state with -/// no checks. -/// -/// This is exposed for use in checkpointing. -/// -/// Note: The difference between a `CheckpointVerifiedBlock` and a `ContextuallyVerifiedBlock` is -/// that the `CheckpointVerifier` doesn't bind the transaction authorizing data to the -/// `ChainHistoryBlockTxAuthCommitmentHash`, but the `NonFinalizedState` and `FinalizedState` do. -#[derive(Clone, Debug, PartialEq, Eq)] -pub struct CheckpointVerifiedBlock { - /// The block to commit to the state. - pub block: Arc, - /// The hash of the block. - pub hash: block::Hash, - /// The height of the block. - pub height: block::Height, - /// New transparent outputs created in this block, indexed by - /// [`OutPoint`](transparent::OutPoint). - /// - /// Note: although these transparent outputs are newly created, they may not - /// be unspent, since a later transaction in a block can spend outputs of an - /// earlier transaction. - /// - /// This field can also contain unrelated outputs, which are ignored. - pub(crate) new_outputs: HashMap, - /// A precomputed list of the hashes of the transactions in this block, - /// in the same order as `block.transactions`. - pub transaction_hashes: Arc<[transaction::Hash]>, -} - /// Wraps note commitment trees and the history tree together. pub struct Treestate { /// Note commitment trees. @@ -273,20 +254,18 @@ impl Treestate { /// Zebra's non-finalized state passes this `struct` over to the finalized state /// when committing a block. The associated treestate is passed so that the /// finalized state does not have to retrieve the previous treestate from the -/// database and recompute the new one. +/// database and recompute a new one. pub struct ContextuallyVerifiedBlockWithTrees { /// A block ready to be committed. - pub checkpoint_verified: CheckpointVerifiedBlock, + pub block: SemanticallyVerifiedBlock, /// The tresstate associated with the block. pub treestate: Option, } impl ContextuallyVerifiedBlockWithTrees { - pub fn new(block: ContextuallyVerifiedBlock, treestate: Treestate) -> Self { - let checkpoint_verified = CheckpointVerifiedBlock::from(block); - + pub fn new(contextually_verified: ContextuallyVerifiedBlock, treestate: Treestate) -> Self { Self { - checkpoint_verified, + block: SemanticallyVerifiedBlock::from(contextually_verified), treestate: Some(treestate), } } @@ -294,14 +273,23 @@ impl ContextuallyVerifiedBlockWithTrees { impl From> for ContextuallyVerifiedBlockWithTrees { fn from(block: Arc) -> Self { - Self::from(CheckpointVerifiedBlock::from(block)) + Self::from(SemanticallyVerifiedBlock::from(block)) + } +} + +impl From for ContextuallyVerifiedBlockWithTrees { + fn from(semantically_verified: SemanticallyVerifiedBlock) -> Self { + Self { + block: semantically_verified, + treestate: None, + } } } impl From for ContextuallyVerifiedBlockWithTrees { - fn from(block: CheckpointVerifiedBlock) -> Self { + fn from(checkpoint_verified: CheckpointVerifiedBlock) -> Self { Self { - checkpoint_verified: block, + block: checkpoint_verified.0, treestate: None, } } @@ -358,20 +346,15 @@ impl ContextuallyVerifiedBlock { } } -impl CheckpointVerifiedBlock { - /// Create a block that's ready to be committed to the finalized state, - /// using a precalculated [`block::Hash`]. - /// - /// Note: a [`CheckpointVerifiedBlock`] isn't actually finalized - /// until [`Request::CommitCheckpointVerifiedBlock`] returns success. - pub fn with_hash(block: Arc, hash: block::Hash) -> Self { +impl SemanticallyVerifiedBlock { + fn with_hash(block: Arc, hash: block::Hash) -> Self { let height = block .coinbase_height() .expect("coinbase height was already checked"); let transaction_hashes: Arc<[_]> = block.transactions.iter().map(|tx| tx.hash()).collect(); let new_outputs = transparent::new_ordered_outputs(&block, &transaction_hashes); - Self { + SemanticallyVerifiedBlock { block, hash, height, @@ -381,6 +364,17 @@ impl CheckpointVerifiedBlock { } } +impl CheckpointVerifiedBlock { + /// Create a block that's ready to be committed to the finalized state, + /// using a precalculated [`block::Hash`]. + /// + /// Note: a [`CheckpointVerifiedBlock`] isn't actually finalized + /// until [`Request::CommitCheckpointVerifiedBlock`] returns success. + pub fn with_hash(block: Arc, hash: block::Hash) -> Self { + Self(SemanticallyVerifiedBlock::with_hash(block, hash)) + } +} + impl From> for CheckpointVerifiedBlock { fn from(block: Arc) -> Self { let hash = block.hash(); @@ -389,7 +383,15 @@ impl From> for CheckpointVerifiedBlock { } } -impl From for CheckpointVerifiedBlock { +impl From> for SemanticallyVerifiedBlock { + fn from(block: Arc) -> Self { + let hash = block.hash(); + + SemanticallyVerifiedBlock::with_hash(block, hash) + } +} + +impl From for SemanticallyVerifiedBlock { fn from(contextually_valid: ContextuallyVerifiedBlock) -> Self { let ContextuallyVerifiedBlock { block, @@ -411,6 +413,19 @@ impl From for CheckpointVerifiedBlock { } } +impl Deref for CheckpointVerifiedBlock { + type Target = SemanticallyVerifiedBlock; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} +impl DerefMut for CheckpointVerifiedBlock { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + #[derive(Clone, Debug, PartialEq, Eq)] /// A query about or modification to the chain state, via the /// [`StateService`](crate::service::StateService). diff --git a/zebra-state/src/service/chain_tip.rs b/zebra-state/src/service/chain_tip.rs index c08571c76e3..76f57bfabe7 100644 --- a/zebra-state/src/service/chain_tip.rs +++ b/zebra-state/src/service/chain_tip.rs @@ -21,7 +21,7 @@ use zebra_chain::{ use crate::{ request::ContextuallyVerifiedBlock, service::watch_receiver::WatchReceiver, - CheckpointVerifiedBlock, + CheckpointVerifiedBlock, SemanticallyVerifiedBlock, }; use TipAction::*; @@ -109,13 +109,13 @@ impl From for ChainTipBlock { impl From for ChainTipBlock { fn from(finalized: CheckpointVerifiedBlock) -> Self { - let CheckpointVerifiedBlock { + let CheckpointVerifiedBlock(SemanticallyVerifiedBlock { block, hash, height, transaction_hashes, .. - } = finalized; + }) = finalized; Self { hash, diff --git a/zebra-state/src/service/finalized_state.rs b/zebra-state/src/service/finalized_state.rs index c6ca264f38e..ca1f5887051 100644 --- a/zebra-state/src/service/finalized_state.rs +++ b/zebra-state/src/service/finalized_state.rs @@ -228,7 +228,7 @@ impl FinalizedState { contextually_verified_with_trees: ContextuallyVerifiedBlockWithTrees, source: &str, ) -> Result { - let finalized = contextually_verified_with_trees.checkpoint_verified; + let finalized = contextually_verified_with_trees.block; let committed_tip_hash = self.db.finalized_tip_hash(); let committed_tip_height = self.db.finalized_tip_height(); diff --git a/zebra-state/src/service/finalized_state/zebra_db/block.rs b/zebra-state/src/service/finalized_state/zebra_db/block.rs index 61e19100a0d..aad9f2272bd 100644 --- a/zebra-state/src/service/finalized_state/zebra_db/block.rs +++ b/zebra-state/src/service/finalized_state/zebra_db/block.rs @@ -38,9 +38,8 @@ use crate::{ transparent::{AddressBalanceLocation, OutputLocation}, }, zebra_db::{metrics::block_precommit_metrics, ZebraDb}, - CheckpointVerifiedBlock, }, - BoxError, HashOrHeight, + BoxError, HashOrHeight, SemanticallyVerifiedBlock, }; #[cfg(test)] @@ -282,7 +281,7 @@ impl ZebraDb { /// - Propagates any errors from updating history and note commitment trees pub(in super::super) fn write_block( &mut self, - finalized: CheckpointVerifiedBlock, + finalized: SemanticallyVerifiedBlock, history_tree: Arc, note_commitment_trees: NoteCommitmentTrees, network: Network, @@ -430,7 +429,7 @@ impl DiskWriteBatch { pub fn prepare_block_batch( &mut self, db: &DiskDb, - finalized: CheckpointVerifiedBlock, + finalized: SemanticallyVerifiedBlock, new_outputs_by_out_loc: BTreeMap, spent_utxos_by_outpoint: HashMap, spent_utxos_by_out_loc: BTreeMap, @@ -439,7 +438,7 @@ impl DiskWriteBatch { note_commitment_trees: NoteCommitmentTrees, value_pool: ValueBalance, ) -> Result<(), BoxError> { - let CheckpointVerifiedBlock { + let SemanticallyVerifiedBlock { block, hash, height, @@ -495,7 +494,7 @@ impl DiskWriteBatch { pub fn prepare_block_header_and_transaction_data_batch( &mut self, db: &DiskDb, - finalized: &CheckpointVerifiedBlock, + finalized: &SemanticallyVerifiedBlock, ) -> Result<(), BoxError> { // Blocks let block_header_by_height = db.cf_handle("block_header_by_height").unwrap(); @@ -507,7 +506,7 @@ impl DiskWriteBatch { let hash_by_tx_loc = db.cf_handle("hash_by_tx_loc").unwrap(); let tx_loc_by_hash = db.cf_handle("tx_loc_by_hash").unwrap(); - let CheckpointVerifiedBlock { + let SemanticallyVerifiedBlock { block, hash, height, @@ -554,9 +553,9 @@ impl DiskWriteBatch { pub fn prepare_genesis_batch( &mut self, db: &DiskDb, - finalized: &CheckpointVerifiedBlock, + finalized: &SemanticallyVerifiedBlock, ) -> bool { - let CheckpointVerifiedBlock { block, .. } = finalized; + let SemanticallyVerifiedBlock { block, .. } = finalized; if block.header.previous_block_hash == GENESIS_PREVIOUS_BLOCK_HASH { self.prepare_genesis_note_commitment_tree_batch(db, finalized); diff --git a/zebra-state/src/service/finalized_state/zebra_db/chain.rs b/zebra-state/src/service/finalized_state/zebra_db/chain.rs index b8db8d717d3..590f609d824 100644 --- a/zebra-state/src/service/finalized_state/zebra_db/chain.rs +++ b/zebra-state/src/service/finalized_state/zebra_db/chain.rs @@ -24,9 +24,8 @@ use crate::{ service::finalized_state::{ disk_db::{DiskDb, DiskWriteBatch, ReadDisk, WriteDisk}, zebra_db::ZebraDb, - CheckpointVerifiedBlock, }, - BoxError, + BoxError, SemanticallyVerifiedBlock, }; impl ZebraDb { @@ -70,12 +69,12 @@ impl DiskWriteBatch { pub fn prepare_history_batch( &mut self, db: &DiskDb, - finalized: &CheckpointVerifiedBlock, + finalized: &SemanticallyVerifiedBlock, history_tree: Arc, ) -> Result<(), BoxError> { let history_tree_cf = db.cf_handle("history_tree").unwrap(); - let CheckpointVerifiedBlock { height, .. } = finalized; + let SemanticallyVerifiedBlock { height, .. } = finalized; // Update the tree in state let current_tip_height = *height - 1; @@ -108,13 +107,13 @@ impl DiskWriteBatch { pub fn prepare_chain_value_pools_batch( &mut self, db: &DiskDb, - finalized: &CheckpointVerifiedBlock, + finalized: &SemanticallyVerifiedBlock, utxos_spent_by_block: HashMap, value_pool: ValueBalance, ) -> Result<(), BoxError> { let tip_chain_value_pool = db.cf_handle("tip_chain_value_pool").unwrap(); - let CheckpointVerifiedBlock { block, .. } = finalized; + let SemanticallyVerifiedBlock { block, .. } = finalized; let new_pool = value_pool.add_block(block.borrow(), &utxos_spent_by_block)?; self.zs_insert(&tip_chain_value_pool, (), new_pool); diff --git a/zebra-state/src/service/finalized_state/zebra_db/shielded.rs b/zebra-state/src/service/finalized_state/zebra_db/shielded.rs index 83a4d36f67f..ac306bdfe1b 100644 --- a/zebra-state/src/service/finalized_state/zebra_db/shielded.rs +++ b/zebra-state/src/service/finalized_state/zebra_db/shielded.rs @@ -23,9 +23,8 @@ use crate::{ service::finalized_state::{ disk_db::{DiskDb, DiskWriteBatch, ReadDisk, WriteDisk}, zebra_db::ZebraDb, - CheckpointVerifiedBlock, }, - BoxError, + BoxError, SemanticallyVerifiedBlock, }; impl ZebraDb { @@ -210,9 +209,9 @@ impl DiskWriteBatch { pub fn prepare_shielded_transaction_batch( &mut self, db: &DiskDb, - finalized: &CheckpointVerifiedBlock, + finalized: &SemanticallyVerifiedBlock, ) -> Result<(), BoxError> { - let CheckpointVerifiedBlock { block, .. } = finalized; + let SemanticallyVerifiedBlock { block, .. } = finalized; // Index each transaction's shielded data for transaction in &block.transactions { @@ -265,7 +264,7 @@ impl DiskWriteBatch { pub fn prepare_note_commitment_batch( &mut self, db: &DiskDb, - finalized: &CheckpointVerifiedBlock, + finalized: &SemanticallyVerifiedBlock, note_commitment_trees: NoteCommitmentTrees, history_tree: Arc, ) -> Result<(), BoxError> { @@ -277,7 +276,7 @@ impl DiskWriteBatch { let sapling_note_commitment_tree_cf = db.cf_handle("sapling_note_commitment_tree").unwrap(); let orchard_note_commitment_tree_cf = db.cf_handle("orchard_note_commitment_tree").unwrap(); - let CheckpointVerifiedBlock { height, .. } = finalized; + let SemanticallyVerifiedBlock { height, .. } = finalized; // Use the cached values that were previously calculated in parallel. let sprout_root = note_commitment_trees.sprout.root(); @@ -328,13 +327,13 @@ impl DiskWriteBatch { pub fn prepare_genesis_note_commitment_tree_batch( &mut self, db: &DiskDb, - finalized: &CheckpointVerifiedBlock, + finalized: &SemanticallyVerifiedBlock, ) { let sprout_note_commitment_tree_cf = db.cf_handle("sprout_note_commitment_tree").unwrap(); let sapling_note_commitment_tree_cf = db.cf_handle("sapling_note_commitment_tree").unwrap(); let orchard_note_commitment_tree_cf = db.cf_handle("orchard_note_commitment_tree").unwrap(); - let CheckpointVerifiedBlock { height, .. } = finalized; + let SemanticallyVerifiedBlock { height, .. } = finalized; // Insert empty note commitment trees. Note that these can't be // used too early (e.g. the Orchard tree before Nu5 activates) diff --git a/zebra-state/src/service/finalized_state/zebra_db/transparent.rs b/zebra-state/src/service/finalized_state/zebra_db/transparent.rs index 91509631d26..9eda37a8888 100644 --- a/zebra-state/src/service/finalized_state/zebra_db/transparent.rs +++ b/zebra-state/src/service/finalized_state/zebra_db/transparent.rs @@ -35,7 +35,7 @@ use crate::{ }, zebra_db::ZebraDb, }, - BoxError, CheckpointVerifiedBlock, + BoxError, SemanticallyVerifiedBlock, }; impl ZebraDb { @@ -369,13 +369,13 @@ impl DiskWriteBatch { pub fn prepare_transparent_transaction_batch( &mut self, db: &DiskDb, - finalized: &CheckpointVerifiedBlock, + finalized: &SemanticallyVerifiedBlock, new_outputs_by_out_loc: &BTreeMap, spent_utxos_by_outpoint: &HashMap, spent_utxos_by_out_loc: &BTreeMap, mut address_balances: HashMap, ) -> Result<(), BoxError> { - let CheckpointVerifiedBlock { block, height, .. } = finalized; + let SemanticallyVerifiedBlock { block, height, .. } = finalized; // Update created and spent transparent outputs self.prepare_new_transparent_outputs_batch( diff --git a/zebra-state/src/service/non_finalized_state/tests/vectors.rs b/zebra-state/src/service/non_finalized_state/tests/vectors.rs index a8e61e7c060..a7e008bcf57 100644 --- a/zebra-state/src/service/non_finalized_state/tests/vectors.rs +++ b/zebra-state/src/service/non_finalized_state/tests/vectors.rs @@ -214,11 +214,11 @@ fn finalize_pops_from_best_chain_for_network(network: Network) -> Result<()> { state.commit_block(child.prepare(), &finalized_state)?; let finalized_with_trees = state.finalize(); - let finalized = finalized_with_trees.checkpoint_verified; + let finalized = finalized_with_trees.block; assert_eq!(block1, finalized.block); let finalized_with_trees = state.finalize(); - let finalized = finalized_with_trees.checkpoint_verified; + let finalized = finalized_with_trees.block; assert_eq!(block2, finalized.block); assert!(state.best_chain().is_none()); From 3d2c5ef29097f25558741e145772e805cfbe668f Mon Sep 17 00:00:00 2001 From: teor Date: Thu, 22 Jun 2023 06:44:53 +1000 Subject: [PATCH 135/265] fix(concurrency): Use Arc::into_inner() to avoid potential concurrency issues, needs Rust 1.70 (#7032) * Use Arc::into_inner() to avoid potential concurrency issues * Remove some outdated clippy lint workarounds (fixed in Rust 1.66) * Update the required Rust version to 1.70 --- zebra-chain/src/orchard/shielded_data.rs | 3 --- zebra-chain/src/transaction/serialize.rs | 3 --- zebra-consensus/src/block.rs | 2 +- zebra-network/src/protocol/external/codec.rs | 2 -- zebra-state/src/service.rs | 12 ++++-------- zebra-state/src/service/non_finalized_state.rs | 2 ++ zebra-utils/Cargo.toml | 4 ++++ zebrad/Cargo.toml | 2 +- 8 files changed, 12 insertions(+), 18 deletions(-) diff --git a/zebra-chain/src/orchard/shielded_data.rs b/zebra-chain/src/orchard/shielded_data.rs index dc55d19a8f7..3a034c05f0f 100644 --- a/zebra-chain/src/orchard/shielded_data.rs +++ b/zebra-chain/src/orchard/shielded_data.rs @@ -269,9 +269,6 @@ impl ZcashDeserialize for Flags { // Consensus rule: "In a version 5 transaction, // the reserved bits 2..7 of the flagsOrchard field MUST be zero." // https://zips.z.cash/protocol/protocol.pdf#txnencodingandconsensus - // - // Clippy 1.64 is wrong here, this lazy evaluation is necessary, constructors are functions. This is fixed in 1.66. - #[allow(clippy::unnecessary_lazy_evaluations)] Flags::from_bits(reader.read_u8()?) .ok_or_else(|| SerializationError::Parse("invalid reserved orchard flags")) } diff --git a/zebra-chain/src/transaction/serialize.rs b/zebra-chain/src/transaction/serialize.rs index da6a3770bf1..f79244da6ea 100644 --- a/zebra-chain/src/transaction/serialize.rs +++ b/zebra-chain/src/transaction/serialize.rs @@ -884,9 +884,6 @@ impl ZcashDeserialize for Transaction { } // Denoted as `nConsensusBranchId` in the spec. // Convert it to a NetworkUpgrade - // - // Clippy 1.64 is wrong here, this lazy evaluation is necessary, constructors are functions. This is fixed in 1.66. - #[allow(clippy::unnecessary_lazy_evaluations)] let network_upgrade = NetworkUpgrade::from_branch_id(limited_reader.read_u32::()?) .ok_or_else(|| { diff --git a/zebra-consensus/src/block.rs b/zebra-consensus/src/block.rs index 3b694ac6773..970cf4118aa 100644 --- a/zebra-consensus/src/block.rs +++ b/zebra-consensus/src/block.rs @@ -280,7 +280,7 @@ where check::miner_fees_are_valid(&block, network, block_miner_fees)?; // Finally, submit the block for contextual verification. - let new_outputs = Arc::try_unwrap(known_utxos) + let new_outputs = Arc::into_inner(known_utxos) .expect("all verification tasks using known_utxos are complete"); let prepared_block = zs::SemanticallyVerifiedBlock { diff --git a/zebra-network/src/protocol/external/codec.rs b/zebra-network/src/protocol/external/codec.rs index 7aee299dafa..6a4ae0585eb 100644 --- a/zebra-network/src/protocol/external/codec.rs +++ b/zebra-network/src/protocol/external/codec.rs @@ -500,8 +500,6 @@ impl Codec { /// Note: zcashd only requires fields up to `address_recv`, but everything up to `relay` is required in Zebra. /// see fn read_version(&self, mut reader: R) -> Result { - // Clippy 1.64 is wrong here, this lazy evaluation is necessary, constructors are functions. This is fixed in 1.66. - #[allow(clippy::unnecessary_lazy_evaluations)] Ok(VersionMessage { version: Version(reader.read_u32::()?), // Use from_bits_truncate to discard unknown service bits. diff --git a/zebra-state/src/service.rs b/zebra-state/src/service.rs index f88cd281128..0e7c96d1748 100644 --- a/zebra-state/src/service.rs +++ b/zebra-state/src/service.rs @@ -269,7 +269,7 @@ impl Drop for ReadStateService { // so dropping it should check if we can shut down. if let Some(block_write_task) = self.block_write_task.take() { - if let Ok(block_write_task_handle) = Arc::try_unwrap(block_write_task) { + if let Some(block_write_task_handle) = Arc::into_inner(block_write_task) { // We're the last database user, so we can tell it to shut down (blocking): // - flushes the database to disk, and // - drops the database, which cleans up any database tasks correctly. @@ -1165,15 +1165,11 @@ impl Service for ReadStateService { if let Some(block_write_task) = block_write_task { if block_write_task.is_finished() { - match Arc::try_unwrap(block_write_task) { + if let Some(block_write_task) = Arc::into_inner(block_write_task) { // We are the last state with a reference to this task, so we can propagate any panics - Ok(block_write_task_handle) => { - if let Err(thread_panic) = block_write_task_handle.join() { - std::panic::resume_unwind(thread_panic); - } + if let Err(thread_panic) = block_write_task.join() { + std::panic::resume_unwind(thread_panic); } - // We're not the last state, so we need to put it back - Err(arc_block_write_task) => self.block_write_task = Some(arc_block_write_task), } } else { // It hasn't finished, so we need to put it back diff --git a/zebra-state/src/service/non_finalized_state.rs b/zebra-state/src/service/non_finalized_state.rs index bdcb1c10eb2..6cb9a2d447e 100644 --- a/zebra-state/src/service/non_finalized_state.rs +++ b/zebra-state/src/service/non_finalized_state.rs @@ -399,6 +399,8 @@ impl NonFinalizedState { // Pushing a block onto a Chain can launch additional parallel batches. // TODO: should we pass _scope into Chain::push()? scope.spawn_fifo(|_scope| { + // TODO: Replace with Arc::unwrap_or_clone() when it stabilises: + // https://github.com/rust-lang/rust/issues/93610 let new_chain = Arc::try_unwrap(new_chain) .unwrap_or_else(|shared_chain| (*shared_chain).clone()); chain_push_result = Some(new_chain.push(contextual).map(Arc::new)); diff --git a/zebra-utils/Cargo.toml b/zebra-utils/Cargo.toml index 98e3b042c0e..9ce5fe07654 100644 --- a/zebra-utils/Cargo.toml +++ b/zebra-utils/Cargo.toml @@ -15,6 +15,10 @@ keywords = ["zebra", "zcash"] # Must be one of categories = ["command-line-utilities", "cryptography::cryptocurrencies"] +# Zebra is only supported on the latest stable Rust version. See the README for details. +# Any Zebra release can break compatibility with older Rust versions. +rust-version = "1.70" + [[bin]] name = "zebra-checkpoints" # this setting is required for Zebra's Docker build caches diff --git a/zebrad/Cargo.toml b/zebrad/Cargo.toml index cfeef6df271..3ec95f01b2f 100644 --- a/zebrad/Cargo.toml +++ b/zebrad/Cargo.toml @@ -19,7 +19,7 @@ edition = "2021" # Zebra is only supported on the latest stable Rust version. See the README for details. # Any Zebra release can break compatibility with older Rust versions. -rust-version = "1.66" +rust-version = "1.70" # Settings that impact runtime behaviour From 53cb0a7fa26a91942c4a29054c509cc180b702e3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 21 Jun 2023 22:19:32 +0000 Subject: [PATCH 136/265] build(deps): bump peter-evans/dockerhub-description from 3.4.1 to 3.4.2 (#7023) Bumps [peter-evans/dockerhub-description](https://github.com/peter-evans/dockerhub-description) from 3.4.1 to 3.4.2. - [Release notes](https://github.com/peter-evans/dockerhub-description/releases) - [Commits](https://github.com/peter-evans/dockerhub-description/compare/v3.4.1...v3.4.2) --- updated-dependencies: - dependency-name: peter-evans/dockerhub-description dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/dockerhub-description.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/dockerhub-description.yml b/.github/workflows/dockerhub-description.yml index 958a23bc8e8..55a02bb920f 100644 --- a/.github/workflows/dockerhub-description.yml +++ b/.github/workflows/dockerhub-description.yml @@ -22,7 +22,7 @@ jobs: persist-credentials: false - name: Docker Hub Description - uses: peter-evans/dockerhub-description@v3.4.1 + uses: peter-evans/dockerhub-description@v3.4.2 with: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_PASSWORD }} From 8861de6a7c18d0120a01df671b682cafe551d364 Mon Sep 17 00:00:00 2001 From: teor Date: Thu, 22 Jun 2023 14:16:50 +1000 Subject: [PATCH 137/265] fix(docker): Stop resetting the `cargo-chef` cache in the Dockerfile (#6934) * Fix Dockerfile cache use * Remove cache-breaking COPY commands * Use git to only reset files modified by cargo-chef * Copy .git and sources before cargo chef cook * Update .dockerignore to include .git * Don't use .git * Use rsync instead of git * Maybe COPY is needed * Actually copy changed files using rsync * Actually copy the files using the correct rsync syntax * Remove ls commands from Dockerfile --- docker/Dockerfile | 32 +++++++++++++++++++++++++++----- 1 file changed, 27 insertions(+), 5 deletions(-) diff --git a/docker/Dockerfile b/docker/Dockerfile index 4f21f9aee5d..74224019637 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -29,7 +29,7 @@ FROM chef AS deps SHELL ["/bin/bash", "-xo", "pipefail", "-c"] COPY --from=planner /opt/zebrad/recipe.json recipe.json -# Install zebra build deps +# Install zebra build deps and Dockerfile deps RUN apt-get -qq update && \ apt-get -qq install -y --no-install-recommends \ llvm \ @@ -37,6 +37,7 @@ RUN apt-get -qq update && \ clang \ ca-certificates \ protobuf-compiler \ + rsync \ ; \ rm -rf /var/lib/apt/lists/* /tmp/* @@ -102,16 +103,28 @@ FROM deps AS tests COPY --from=us-docker.pkg.dev/zealous-zebra/zebra/zcash-params /root/.zcash-params /root/.zcash-params COPY --from=us-docker.pkg.dev/zealous-zebra/zebra/lightwalletd /opt/lightwalletd /usr/local/bin +# cargo uses timestamps for its cache, so they need to be in this order: +# unmodified source files < previous build cache < modified source files +COPY . . + # Re-hydrate the minimum project skeleton identified by `cargo chef prepare` in the planner stage, +# over the top of the original source files, # and build it to cache all possible sentry and test dependencies. # -# This is the caching Docker layer for Rust! +# This is the caching Docker layer for Rust tests! +# It creates fake empty test binaries so dependencies are built, but Zebra is not fully built. # # TODO: add --locked when cargo-chef supports it RUN cargo chef cook --tests --release --features "${TEST_FEATURES} ${FEATURES}" --workspace --recipe-path recipe.json -COPY . . -# Test Zebra +# Undo the source file changes made by cargo-chef. +# rsync invalidates the cargo cache for the changed files only, by updating their timestamps. +# This makes sure the fake empty binaries created by cargo-chef are rebuilt. +COPY --from=planner /opt/zebrad zebra-original +RUN rsync --recursive --checksum --itemize-changes --verbose zebra-original/ . +RUN rm -r zebra-original + +# Build Zebra test binaries, but don't run them RUN cargo test --locked --release --features "${TEST_FEATURES} ${FEATURES}" --workspace --no-run RUN cp /opt/zebrad/target/release/zebrad /usr/local/bin RUN cp /opt/zebrad/target/release/zebra-checkpoints /usr/local/bin @@ -129,10 +142,19 @@ ENTRYPOINT [ "/entrypoint.sh" ] # zebrad binary from this step. FROM deps AS release +COPY . . + +# This is the caching layer for Rust zebrad builds. +# It creates a fake empty zebrad binary, see above for details. +# # TODO: add --locked when cargo-chef supports it RUN cargo chef cook --release --features "${FEATURES}" --package zebrad --bin zebrad --recipe-path recipe.json -COPY . . +# Undo the source file changes made by cargo-chef, so the fake empty zebrad binary is rebuilt. +COPY --from=planner /opt/zebrad zebra-original +RUN rsync --recursive --checksum --itemize-changes --verbose zebra-original/ . +RUN rm -r zebra-original + # Build zebrad RUN cargo build --locked --release --features "${FEATURES}" --package zebrad --bin zebrad From d3c2a9a31c8187e7651a875a73d95061211e391e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 22 Jun 2023 04:17:07 +0000 Subject: [PATCH 138/265] build(deps): bump tj-actions/changed-files from 36.4.0 to 36.4.1 (#7024) Bumps [tj-actions/changed-files](https://github.com/tj-actions/changed-files) from 36.4.0 to 36.4.1. - [Release notes](https://github.com/tj-actions/changed-files/releases) - [Changelog](https://github.com/tj-actions/changed-files/blob/main/HISTORY.md) - [Commits](https://github.com/tj-actions/changed-files/compare/v36.4.0...v36.4.1) --- updated-dependencies: - dependency-name: tj-actions/changed-files dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/lint.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index d2f56cbd1f9..9c43b4a128a 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -37,7 +37,7 @@ jobs: - name: Rust files id: changed-files-rust - uses: tj-actions/changed-files@v36.4.0 + uses: tj-actions/changed-files@v36.4.1 with: files: | **/*.rs @@ -49,7 +49,7 @@ jobs: - name: Workflow files id: changed-files-workflows - uses: tj-actions/changed-files@v36.4.0 + uses: tj-actions/changed-files@v36.4.1 with: files: | .github/workflows/*.yml From 31bc46bb2a736110d215de2fac5cdfd79bcbd058 Mon Sep 17 00:00:00 2001 From: teor Date: Thu, 22 Jun 2023 17:47:02 +1000 Subject: [PATCH 139/265] Add deny.toml update details to release-checklist.md (#7042) --- .github/PULL_REQUEST_TEMPLATE/release-checklist.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/PULL_REQUEST_TEMPLATE/release-checklist.md b/.github/PULL_REQUEST_TEMPLATE/release-checklist.md index f18de3d5195..774eda60df1 100644 --- a/.github/PULL_REQUEST_TEMPLATE/release-checklist.md +++ b/.github/PULL_REQUEST_TEMPLATE/release-checklist.md @@ -23,7 +23,8 @@ Sometimes `dependabot` misses some dependency updates, or we accidentally turned Here's how we make sure we got everything: - [ ] Run `cargo update` on the latest `main` branch, and keep the output -- [ ] If needed, update [deny.toml](https://github.com/ZcashFoundation/zebra/blob/main/book/src/dev/continuous-integration.md#fixing-duplicate-dependencies-in-check-denytoml-bans) +- [ ] If needed, [add duplicate dependency exceptions to deny.toml](https://github.com/ZcashFoundation/zebra/blob/main/book/src/dev/continuous-integration.md#fixing-duplicate-dependencies-in-check-denytoml-bans) +- [ ] If needed, remove resolved duplicate dependencies from `deny.toml` - [ ] Open a separate PR with the changes - [ ] Add the output of `cargo update` to that PR as a comment From 835e7a88f084739888958e0064288473e7be002c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 23 Jun 2023 01:00:27 +0000 Subject: [PATCH 140/265] build(deps): bump insta from 1.29.0 to 1.30.0 (#7051) Bumps [insta](https://github.com/mitsuhiko/insta) from 1.29.0 to 1.30.0. - [Changelog](https://github.com/mitsuhiko/insta/blob/master/CHANGELOG.md) - [Commits](https://github.com/mitsuhiko/insta/compare/1.29.0...1.30.0) --- updated-dependencies: - dependency-name: insta dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- zebra-rpc/Cargo.toml | 2 +- zebra-state/Cargo.toml | 2 +- zebra-test/Cargo.toml | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index fdaf7eceba6..09100032d9a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2101,9 +2101,9 @@ dependencies = [ [[package]] name = "insta" -version = "1.29.0" +version = "1.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a28d25139df397cbca21408bb742cf6837e04cdbebf1b07b760caf971d6a972" +checksum = "28491f7753051e5704d4d0ae7860d45fae3238d7d235bc4289dcd45c48d3cec3" dependencies = [ "console", "lazy_static", diff --git a/zebra-rpc/Cargo.toml b/zebra-rpc/Cargo.toml index e9f6976e6ee..e9e4aa0e992 100644 --- a/zebra-rpc/Cargo.toml +++ b/zebra-rpc/Cargo.toml @@ -78,7 +78,7 @@ zebra-script = { path = "../zebra-script", version = "1.0.0-beta.26" } zebra-state = { path = "../zebra-state", version = "1.0.0-beta.26" } [dev-dependencies] -insta = { version = "1.29.0", features = ["redactions", "json", "ron"] } +insta = { version = "1.30.0", features = ["redactions", "json", "ron"] } proptest = "1.2.0" diff --git a/zebra-state/Cargo.toml b/zebra-state/Cargo.toml index 89ea428ab04..1fbc31f6707 100644 --- a/zebra-state/Cargo.toml +++ b/zebra-state/Cargo.toml @@ -90,7 +90,7 @@ once_cell = "1.18.0" spandoc = "0.2.2" hex = { version = "0.4.3", features = ["serde"] } -insta = { version = "1.29.0", features = ["ron"] } +insta = { version = "1.30.0", features = ["ron"] } proptest = "1.2.0" proptest-derive = "0.3.0" diff --git a/zebra-test/Cargo.toml b/zebra-test/Cargo.toml index d6edb5c5aa0..4c712e3beb3 100644 --- a/zebra-test/Cargo.toml +++ b/zebra-test/Cargo.toml @@ -18,7 +18,7 @@ categories = ["command-line-utilities", "cryptography::cryptocurrencies"] hex = "0.4.3" indexmap = "1.9.3" lazy_static = "1.4.0" -insta = "1.29.0" +insta = "1.30.0" proptest = "1.2.0" once_cell = "1.18.0" rand = { version = "0.8.5", package = "rand" } From 7e6162a043870939ac55b6d34024e5eca4fc135e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 23 Jun 2023 18:27:49 +0000 Subject: [PATCH 141/265] build(deps): bump itertools from 0.10.5 to 0.11.0 (#7050) * build(deps): bump itertools from 0.10.5 to 0.11.0 Bumps [itertools](https://github.com/rust-itertools/itertools) from 0.10.5 to 0.11.0. - [Changelog](https://github.com/rust-itertools/itertools/blob/master/CHANGELOG.md) - [Commits](https://github.com/rust-itertools/itertools/compare/v0.10.5...v0.11.0) --- updated-dependencies: - dependency-name: itertools dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * add itertools as a duplicated dep --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Alfredo Garcia --- Cargo.lock | 25 +++++++++++++++++-------- deny.toml | 3 +++ zebra-chain/Cargo.toml | 2 +- zebra-network/Cargo.toml | 2 +- zebra-state/Cargo.toml | 2 +- zebra-utils/Cargo.toml | 2 +- 6 files changed, 24 insertions(+), 12 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 09100032d9a..89ab3fde2d1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -959,7 +959,7 @@ dependencies = [ "clap 4.3.4", "criterion-plot", "is-terminal", - "itertools", + "itertools 0.10.5", "num-traits", "once_cell", "oorandom", @@ -980,7 +980,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6b50826342786a51a89e2da3a28f1c32b06e387201bc2d19791f622c673706b1" dependencies = [ "cast", - "itertools", + "itertools 0.10.5", ] [[package]] @@ -2163,6 +2163,15 @@ dependencies = [ "either", ] +[[package]] +name = "itertools" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1c173a5686ce8bfa551b3563d0c2170bf24ca44da99c7ca4bfdab5418c3fe57" +dependencies = [ + "either", +] + [[package]] name = "itoa" version = "1.0.6" @@ -3296,7 +3305,7 @@ checksum = "119533552c9a7ffacc21e099c24a0ac8bb19c2a2a3f363de84cd9b844feab270" dependencies = [ "bytes", "heck 0.4.1", - "itertools", + "itertools 0.10.5", "lazy_static", "log", "multimap", @@ -3317,7 +3326,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e5d2d8d10f3c6ded6da8b05b5fb3b8a5082514344d56c9f871412d29b4e075b4" dependencies = [ "anyhow", - "itertools", + "itertools 0.10.5", "proc-macro2 1.0.60", "quote 1.0.28", "syn 1.0.109", @@ -5669,7 +5678,7 @@ dependencies = [ "hex", "humantime", "incrementalmerkletree", - "itertools", + "itertools 0.11.0", "jubjub", "lazy_static", "num-integer", @@ -5765,7 +5774,7 @@ dependencies = [ "howudoin", "humantime-serde", "indexmap", - "itertools", + "itertools 0.11.0", "lazy_static", "metrics 0.21.0", "num-integer", @@ -5864,7 +5873,7 @@ dependencies = [ "howudoin", "indexmap", "insta", - "itertools", + "itertools 0.11.0", "jubjub", "lazy_static", "metrics 0.21.0", @@ -5923,7 +5932,7 @@ version = "1.0.0-beta.26" dependencies = [ "color-eyre", "hex", - "itertools", + "itertools 0.11.0", "regex", "reqwest", "serde_json", diff --git a/deny.toml b/deny.toml index 30c7a846928..2014f775cd5 100644 --- a/deny.toml +++ b/deny.toml @@ -44,6 +44,9 @@ skip-tree = [ # wait for prost-build to upgrade { name = "prettyplease", version = "=0.1.25" }, + # wait for criterion to upgrade + { name = "itertools", version = "=0.10.5" }, + # ZF crates # wait for zcashd and zcash_script to upgrade diff --git a/zebra-chain/Cargo.toml b/zebra-chain/Cargo.toml index 905b548efa2..7615d602c34 100644 --- a/zebra-chain/Cargo.toml +++ b/zebra-chain/Cargo.toml @@ -96,7 +96,7 @@ serde-big-array = "0.5.1" # Processing futures = "0.3.28" -itertools = "0.10.5" +itertools = "0.11.0" rayon = "1.7.0" # ZF deps diff --git a/zebra-network/Cargo.toml b/zebra-network/Cargo.toml index cd1029e6325..7bb8e14f0ce 100644 --- a/zebra-network/Cargo.toml +++ b/zebra-network/Cargo.toml @@ -48,7 +48,7 @@ dirs = "5.0.1" hex = "0.4.3" humantime-serde = "1.1.1" indexmap = { version = "1.9.3", features = ["serde"] } -itertools = "0.10.5" +itertools = "0.11.0" lazy_static = "1.4.0" num-integer = "0.1.45" ordered-map = "0.4.2" diff --git a/zebra-state/Cargo.toml b/zebra-state/Cargo.toml index 1fbc31f6707..e4d87463ce5 100644 --- a/zebra-state/Cargo.toml +++ b/zebra-state/Cargo.toml @@ -49,7 +49,7 @@ dirs = "5.0.1" futures = "0.3.28" hex = "0.4.3" indexmap = "1.9.3" -itertools = "0.10.5" +itertools = "0.11.0" lazy_static = "1.4.0" metrics = "0.21.0" mset = "0.1.1" diff --git a/zebra-utils/Cargo.toml b/zebra-utils/Cargo.toml index 9ce5fe07654..98b7fb50d20 100644 --- a/zebra-utils/Cargo.toml +++ b/zebra-utils/Cargo.toml @@ -81,7 +81,7 @@ zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.26" } zebra-rpc = { path = "../zebra-rpc", version = "1.0.0-beta.26", optional = true } # These crates are needed for the zebra-checkpoints binary -itertools = { version = "0.10.5", optional = true } +itertools = { version = "0.11.0", optional = true } # These crates are needed for the search-issue-refs binary regex = { version = "1.8.4", optional = true } From b234b681fed0c65e21d492f9c0ae6106e4cd6f23 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 23 Jun 2023 18:28:11 +0000 Subject: [PATCH 142/265] build(deps): bump clap from 4.3.4 to 4.3.5 (#7038) Bumps [clap](https://github.com/clap-rs/clap) from 4.3.4 to 4.3.5. - [Release notes](https://github.com/clap-rs/clap/releases) - [Changelog](https://github.com/clap-rs/clap/blob/master/CHANGELOG.md) - [Commits](https://github.com/clap-rs/clap/compare/v4.3.4...v4.3.5) --- updated-dependencies: - dependency-name: clap dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 14 +++++++------- zebrad/Cargo.toml | 2 +- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 89ab3fde2d1..6d808c73b21 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -12,7 +12,7 @@ dependencies = [ "arc-swap", "backtrace", "canonical-path", - "clap 4.3.4", + "clap 4.3.5", "color-eyre", "fs-err", "once_cell", @@ -773,9 +773,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.3.4" +version = "4.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80672091db20273a15cf9fdd4e47ed43b5091ec9841bf4c6145c9dfbbcae09ed" +checksum = "2686c4115cb0810d9a984776e197823d08ec94f176549a89a9efded477c456dc" dependencies = [ "clap_builder", "clap_derive", @@ -784,9 +784,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.3.4" +version = "4.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1458a1df40e1e2afebb7ab60ce55c1fa8f431146205aa5f4887e0b111c27636" +checksum = "2e53afce1efce6ed1f633cf0e57612fe51db54a1ee4fd8f8503d078fe02d69ae" dependencies = [ "anstream", "anstyle", @@ -956,7 +956,7 @@ dependencies = [ "anes", "cast", "ciborium", - "clap 4.3.4", + "clap 4.3.5", "criterion-plot", "is-terminal", "itertools 0.10.5", @@ -5954,7 +5954,7 @@ dependencies = [ "abscissa_core", "atty", "chrono", - "clap 4.3.4", + "clap 4.3.5", "color-eyre", "console-subscriber", "dirs", diff --git a/zebrad/Cargo.toml b/zebrad/Cargo.toml index 3ec95f01b2f..af5f11ddac0 100644 --- a/zebrad/Cargo.toml +++ b/zebrad/Cargo.toml @@ -135,7 +135,7 @@ zebra-state = { path = "../zebra-state", version = "1.0.0-beta.26" } zebra-utils = { path = "../zebra-utils", version = "1.0.0-beta.26", optional = true } abscissa_core = "0.7.0" -clap = { version = "4.3.4", features = ["cargo"] } +clap = { version = "4.3.5", features = ["cargo"] } chrono = { version = "0.4.26", default-features = false, features = ["clock", "std"] } humantime-serde = "1.1.1" indexmap = "1.9.3" From 0e0ee8d1bd2c9b35c00078b4aed9f5f113100565 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 23 Jun 2023 22:17:36 +0000 Subject: [PATCH 143/265] build(deps): bump clap from 4.3.5 to 4.3.6 (#7059) Bumps [clap](https://github.com/clap-rs/clap) from 4.3.5 to 4.3.6. - [Release notes](https://github.com/clap-rs/clap/releases) - [Changelog](https://github.com/clap-rs/clap/blob/master/CHANGELOG.md) - [Commits](https://github.com/clap-rs/clap/compare/v4.3.5...v4.3.6) --- updated-dependencies: - dependency-name: clap dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 10 +++++----- zebrad/Cargo.toml | 2 +- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6d808c73b21..1c984acaa25 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -12,7 +12,7 @@ dependencies = [ "arc-swap", "backtrace", "canonical-path", - "clap 4.3.5", + "clap 4.3.6", "color-eyre", "fs-err", "once_cell", @@ -773,9 +773,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.3.5" +version = "4.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2686c4115cb0810d9a984776e197823d08ec94f176549a89a9efded477c456dc" +checksum = "6320c6d1c98b6981da7bb2dcecbd0be9dc98d42165fa8326b21000f7dbfde6d0" dependencies = [ "clap_builder", "clap_derive", @@ -956,7 +956,7 @@ dependencies = [ "anes", "cast", "ciborium", - "clap 4.3.5", + "clap 4.3.6", "criterion-plot", "is-terminal", "itertools 0.10.5", @@ -5954,7 +5954,7 @@ dependencies = [ "abscissa_core", "atty", "chrono", - "clap 4.3.5", + "clap 4.3.6", "color-eyre", "console-subscriber", "dirs", diff --git a/zebrad/Cargo.toml b/zebrad/Cargo.toml index af5f11ddac0..9682bc185fd 100644 --- a/zebrad/Cargo.toml +++ b/zebrad/Cargo.toml @@ -135,7 +135,7 @@ zebra-state = { path = "../zebra-state", version = "1.0.0-beta.26" } zebra-utils = { path = "../zebra-utils", version = "1.0.0-beta.26", optional = true } abscissa_core = "0.7.0" -clap = { version = "4.3.5", features = ["cargo"] } +clap = { version = "4.3.6", features = ["cargo"] } chrono = { version = "0.4.26", default-features = false, features = ["clock", "std"] } humantime-serde = "1.1.1" indexmap = "1.9.3" From 9229424ebb588071b1c57262451317ee0a03af74 Mon Sep 17 00:00:00 2001 From: teor Date: Mon, 26 Jun 2023 09:24:10 +1000 Subject: [PATCH 144/265] add(ci): Run release builds and production Docker image tests on pull requests (#7055) * Update the workflow run conditions for CI docker tests * Run release builds and release Docker image tests on pull requests * Remove the manual docker test from the release checklist * Fix workflow syntax * Use the right kind of quotes --- .../release-checklist.md | 5 +-- .../workflows/continous-delivery.patch.yml | 36 ++++++++++++++++ .github/workflows/continous-delivery.yml | 43 ++++++++++++++++++- .../continous-integration-docker.patch.yml | 2 + .../continous-integration-docker.yml | 3 +- 5 files changed, 83 insertions(+), 6 deletions(-) create mode 100644 .github/workflows/continous-delivery.patch.yml diff --git a/.github/PULL_REQUEST_TEMPLATE/release-checklist.md b/.github/PULL_REQUEST_TEMPLATE/release-checklist.md index 774eda60df1..1f010c13a07 100644 --- a/.github/PULL_REQUEST_TEMPLATE/release-checklist.md +++ b/.github/PULL_REQUEST_TEMPLATE/release-checklist.md @@ -158,10 +158,7 @@ The end of support height is calculated from the current blockchain height: and put the output in a comment on the PR. ## Publish Docker Images -- [ ] Wait until [the Docker images have been published](https://github.com/ZcashFoundation/zebra/actions/workflows/release-binaries.yml) -- [ ] Test the Docker image using `docker run --tty --interactive zfnd/zebra:v1.0.0`, - and put the output in a comment on the PR. - (You can use [gcloud cloud shell](https://console.cloud.google.com/home/dashboard?cloudshell=true)) +- [ ] Wait for the [the Docker images to be published successfully](https://github.com/ZcashFoundation/zebra/actions/workflows/release-binaries.yml). - [ ] Un-freeze the [`batched` queue](https://dashboard.mergify.com/github/ZcashFoundation/repo/zebra/queues) using Mergify. ## Release Failures diff --git a/.github/workflows/continous-delivery.patch.yml b/.github/workflows/continous-delivery.patch.yml new file mode 100644 index 00000000000..4dbc98652a7 --- /dev/null +++ b/.github/workflows/continous-delivery.patch.yml @@ -0,0 +1,36 @@ +name: CD + +on: + # Only patch the Docker image test jobs + pull_request: + paths-ignore: + # code and tests + - '**/*.rs' + # hard-coded checkpoints and proptest regressions + - '**/*.txt' + # dependencies + - '**/Cargo.toml' + - '**/Cargo.lock' + # configuration files + - '.cargo/config.toml' + - '**/clippy.toml' + # workflow definitions + - 'docker/**' + - '.dockerignore' + - '.github/workflows/continous-delivery.yml' + - '.github/workflows/find-cached-disks.yml' + + +jobs: + # Also patched by continous-integration-docker.patch.yml, which has a different paths-ignore + build: + name: Build CI Docker / Build images + runs-on: ubuntu-latest + steps: + - run: 'echo "No build required"' + + test-configuration-file: + name: Test Zebra default Docker config file + runs-on: ubuntu-latest + steps: + - run: 'echo "No build required"' diff --git a/.github/workflows/continous-delivery.yml b/.github/workflows/continous-delivery.yml index 15c8c08a615..aee4e80e8b2 100644 --- a/.github/workflows/continous-delivery.yml +++ b/.github/workflows/continous-delivery.yml @@ -6,9 +6,12 @@ name: CD # # Since the different event types each use a different Managed Instance Group or instance, # we can run different event types concurrently. +# +# For pull requests, we only run the tests from this workflow, and don't do any deployments. +# So an in-progress pull request gets cancelled, just like other tests. concurrency: group: ${{ github.workflow }}-${{ github.event_name }}-${{ github.ref }} - cancel-in-progress: false + cancel-in-progress: ${{ github.event_name == 'pull_request' }} on: workflow_dispatch: @@ -25,14 +28,52 @@ on: required: false type: boolean default: false + # Temporarily disabled to reduce network load, see #6894. #push: # branches: # - main + # paths: + # # code and tests + # - '**/*.rs' + # # hard-coded checkpoints and proptest regressions + # - '**/*.txt' + # # dependencies + # - '**/Cargo.toml' + # - '**/Cargo.lock' + # # configuration files + # - '.cargo/config.toml' + # - '**/clippy.toml' + # # workflow definitions + # - 'docker/**' + # - '.dockerignore' + # - '.github/workflows/continous-delivery.yml' + # - '.github/workflows/build-docker-image.yml' + + # Only runs the Docker image tests, doesn't deploy any instances + pull_request: + paths: + # code and tests + - '**/*.rs' + # hard-coded checkpoints and proptest regressions + - '**/*.txt' + # dependencies + - '**/Cargo.toml' + - '**/Cargo.lock' + # configuration files + - '.cargo/config.toml' + - '**/clippy.toml' + # workflow definitions + - 'docker/**' + - '.dockerignore' + - '.github/workflows/continous-delivery.yml' + - '.github/workflows/find-cached-disks.yml' + release: types: - published + jobs: # If a release was made we want to extract the first part of the semver from the # tag_name diff --git a/.github/workflows/continous-integration-docker.patch.yml b/.github/workflows/continous-integration-docker.patch.yml index b3cf3a8b537..489d75b01b3 100644 --- a/.github/workflows/continous-integration-docker.patch.yml +++ b/.github/workflows/continous-integration-docker.patch.yml @@ -19,8 +19,10 @@ on: - '**/clippy.toml' # workflow definitions - 'docker/**' + - '.dockerignore' - '.github/workflows/continous-integration-docker.yml' - '.github/workflows/deploy-gcp-tests.yml' + - '.github/workflows/find-cached-disks.yml' - '.github/workflows/build-docker-image.yml' jobs: diff --git a/.github/workflows/continous-integration-docker.yml b/.github/workflows/continous-integration-docker.yml index 40d2313a894..902218ef266 100644 --- a/.github/workflows/continous-integration-docker.yml +++ b/.github/workflows/continous-integration-docker.yml @@ -80,10 +80,11 @@ on: - '**/clippy.toml' # workflow definitions - 'docker/**' + - '.dockerignore' - '.github/workflows/continous-integration-docker.yml' - '.github/workflows/deploy-gcp-tests.yml' - - '.github/workflows/build-docker-image.yml' - '.github/workflows/find-cached-disks.yml' + - '.github/workflows/build-docker-image.yml' jobs: # to also run a job on Mergify head branches, From f455baaa6e32971c4759705b47a27911ade1143b Mon Sep 17 00:00:00 2001 From: teor Date: Mon, 26 Jun 2023 11:12:29 +1000 Subject: [PATCH 145/265] Rename a CD job with the same name as a CI job (#7063) --- .github/workflows/continous-delivery.patch.yml | 2 +- .github/workflows/continous-delivery.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/continous-delivery.patch.yml b/.github/workflows/continous-delivery.patch.yml index 4dbc98652a7..53ecb144a9a 100644 --- a/.github/workflows/continous-delivery.patch.yml +++ b/.github/workflows/continous-delivery.patch.yml @@ -30,7 +30,7 @@ jobs: - run: 'echo "No build required"' test-configuration-file: - name: Test Zebra default Docker config file + name: Test Zebra CD Docker config file runs-on: ubuntu-latest steps: - run: 'echo "No build required"' diff --git a/.github/workflows/continous-delivery.yml b/.github/workflows/continous-delivery.yml index aee4e80e8b2..8cd334560fc 100644 --- a/.github/workflows/continous-delivery.yml +++ b/.github/workflows/continous-delivery.yml @@ -123,7 +123,7 @@ jobs: # Test that Zebra works using the default config with the latest Zebra version, # and test reconfiguring the docker image for testnet. test-configuration-file: - name: Test Zebra default Docker config file + name: Test Zebra CD Docker config file timeout-minutes: 15 runs-on: ubuntu-latest needs: build From 76a7ff45a990eb1137dc0f5c594534001c887b17 Mon Sep 17 00:00:00 2001 From: teor Date: Mon, 26 Jun 2023 15:44:19 +1000 Subject: [PATCH 146/265] fix(deps): Replace openssl with rustls in tests and experimental features (#7047) * Remove openssl dependency and prevent it coming back * Put the arguments in the right place * Put comment in the right place * Add a default-docker feature to zebrad and use it in workflows and Docker files * Fix a comment typo * Make sure that Docker production builds don't use openssl * Rename feature to default-release-binaries --- .github/workflows/build-docker-image.yml | 2 +- .../workflows/continous-integration-os.yml | 12 ++++++---- .github/workflows/release-binaries.yml | 2 +- deny.toml | 23 +++++++++++++++++-- docker/Dockerfile | 2 +- docker/zcash-params/Dockerfile | 4 ++-- zebra-node-services/Cargo.toml | 5 ++-- zebra-state/Cargo.toml | 3 ++- zebrad/Cargo.toml | 3 +++ 9 files changed, 41 insertions(+), 15 deletions(-) diff --git a/.github/workflows/build-docker-image.yml b/.github/workflows/build-docker-image.yml index 6ae6f28166d..95c187bec97 100644 --- a/.github/workflows/build-docker-image.yml +++ b/.github/workflows/build-docker-image.yml @@ -38,7 +38,7 @@ on: # https://github.com/ZcashFoundation/zebra/blob/main/docker/Dockerfile#L83 features: required: false - default: "sentry" + default: "default-release-binaries" type: string test_features: required: false diff --git a/.github/workflows/continous-integration-os.yml b/.github/workflows/continous-integration-os.yml index 93303977e95..3a189653a82 100644 --- a/.github/workflows/continous-integration-os.yml +++ b/.github/workflows/continous-integration-os.yml @@ -261,8 +261,8 @@ jobs: # We don't need to check `--no-default-features` here, because (except in very rare cases): # - disabling features isn't going to add duplicate dependencies # - disabling features isn't going to add more crate sources - features: ['', '--all-features'] - # We always want to run the --all-features job, because it gives accurate "skip tree root was not found" warnings + features: ['', '--features default-release-binaries', '--all-features'] + # Always run the --all-features job, to get accurate "skip tree root was not found" warnings fail-fast: false # Prevent sudden announcement of a new advisory from failing ci: @@ -274,12 +274,14 @@ jobs: persist-credentials: false - uses: r7kamura/rust-problem-matchers@v1.3.0 - # The --all-features job is the only job that gives accurate "skip tree root was not found" warnings. - # In other jobs, we expect some of these warnings, due to disabled features. - name: Check ${{ matrix.checks }} with features ${{ matrix.features }} uses: EmbarkStudios/cargo-deny-action@v1 with: - command: check ${{ matrix.checks }} + # --all-features spuriously activates openssl, but we want to ban that dependency in + # all of zebrad's production features for security reasons. But the --all-features job is + # the only job that gives accurate "skip tree root was not found" warnings. + # In other jobs, we expect some of these warnings, due to disabled features. + command: check ${{ matrix.checks }} ${{ matrix.features == '--all-features' && '--allow banned' || '--allow unmatched-skip-root' }} arguments: --workspace ${{ matrix.features }} unused-deps: diff --git a/.github/workflows/release-binaries.yml b/.github/workflows/release-binaries.yml index 2ec7338b5d5..81ee18c7b18 100644 --- a/.github/workflows/release-binaries.yml +++ b/.github/workflows/release-binaries.yml @@ -44,7 +44,7 @@ jobs: tag_suffix: .experimental network: Testnet rpc_port: '18232' - features: "sentry getblocktemplate-rpcs" + features: "default-release-binaries getblocktemplate-rpcs" test_features: "" rust_backtrace: '1' zebra_skip_ipv6_tests: '1' diff --git a/deny.toml b/deny.toml index 2014f775cd5..900d08ef9e0 100644 --- a/deny.toml +++ b/deny.toml @@ -10,6 +10,13 @@ [bans] # Lint level for when multiple versions of the same crate are detected multiple-versions = "deny" + +# Don't allow wildcard ("any version") dependencies +wildcards = "deny" +# Allow private and dev wildcard dependencies. +# Switch this to `false` when #6924 is implemented. +allow-wildcard-paths = true + # The graph highlighting used when creating dotgraphs for crates # with multiple versions # * lowest-version - The path to the lowest versioned duplicate is highlighted @@ -17,12 +24,18 @@ multiple-versions = "deny" # * all - Both lowest-version and simplest-path are used highlight = "all" -# We don't use this for Zebra. -# # List of crates that are allowed. Use with care! #allow = [ #] +# List of crates that can never become Zebra dependencies. +deny = [ + # Often has memory safety vulnerabilities. + # Enabled by --all-features, use the `cargo hack` script in the deny.toml CI job instead. + { name = "openssl" }, + { name = "openssl-sys" }, +] + # We only use this for some `librustzcash` and `orchard` crates. # If we add a crate here, duplicate dependencies of that crate are still shown. # @@ -47,6 +60,9 @@ skip-tree = [ # wait for criterion to upgrade { name = "itertools", version = "=0.10.5" }, + # wait for backtrace and multiple dependents to upgrade + { name = "miniz_oxide", version = "=0.6.2" }, + # ZF crates # wait for zcashd and zcash_script to upgrade @@ -71,6 +87,9 @@ skip-tree = [ # wait for zcash_address to upgrade { name = "bs58", version = "=0.4.0" }, + # wait for minreq and zcash_proofs to upgrade + { name = "rustls", version = "=0.20.8" }, + # zebra-utils dependencies # wait for structopt upgrade (or upgrade to clap 4) diff --git a/docker/Dockerfile b/docker/Dockerfile index 74224019637..3b636339797 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -81,7 +81,7 @@ ENV ZEBRA_SKIP_IPV6_TESTS ${ZEBRA_SKIP_IPV6_TESTS:-1} # Build zebrad with these features # Keep these in sync with: # https://github.com/ZcashFoundation/zebra/blob/main/.github/workflows/build-docker-image.yml#L42 -ARG FEATURES="sentry" +ARG FEATURES="default-release-binaries" ARG TEST_FEATURES="lightwalletd-grpc-tests zebra-checkpoints" # Use ENTRYPOINT_FEATURES to override the specific features used to run tests in entrypoint.sh, # separately from the test and production image builds. diff --git a/docker/zcash-params/Dockerfile b/docker/zcash-params/Dockerfile index 1036a2be40b..a9bea7233a6 100644 --- a/docker/zcash-params/Dockerfile +++ b/docker/zcash-params/Dockerfile @@ -23,7 +23,7 @@ RUN apt-get -qq update && \ ENV CARGO_HOME /opt/zebrad/.cargo/ # Build dependencies - this is the caching Docker layer! -RUN cargo chef cook --release --features sentry --package zebrad --recipe-path recipe.json +RUN cargo chef cook --release --features default-release-binaries --package zebrad --recipe-path recipe.json ARG RUST_BACKTRACE=0 ENV RUST_BACKTRACE ${RUST_BACKTRACE} @@ -36,4 +36,4 @@ ENV COLORBT_SHOW_HIDDEN ${COLORBT_SHOW_HIDDEN} COPY . . # Pre-download Zcash Sprout and Sapling parameters -RUN cargo run --locked --release --features sentry --package zebrad --bin zebrad download +RUN cargo run --locked --release --features default-release-binaries --package zebrad --bin zebrad download diff --git a/zebra-node-services/Cargo.toml b/zebra-node-services/Cargo.toml index 70b469727ea..9d9b3885a50 100644 --- a/zebra-node-services/Cargo.toml +++ b/zebra-node-services/Cargo.toml @@ -42,7 +42,8 @@ zebra-chain = { path = "../zebra-chain" , version = "1.0.0-beta.26"} # Tool and test feature rpc-client color-eyre = { version = "0.6.2", optional = true } jsonrpc-core = { version = "18.0.0", optional = true } -reqwest = { version = "0.11.18", optional = true } +# Security: avoid default dependency on openssl +reqwest = { version = "0.11.18", default-features = false, features = ["rustls-tls"], optional = true } serde = { version = "1.0.164", optional = true } serde_json = { version = "1.0.97", optional = true } @@ -50,6 +51,6 @@ serde_json = { version = "1.0.97", optional = true } color-eyre = "0.6.2" jsonrpc-core = "18.0.0" -reqwest = "0.11.18" +reqwest = { version = "0.11.18", default-features = false, features = ["rustls-tls"] } serde = "1.0.164" serde_json = "1.0.97" diff --git a/zebra-state/Cargo.toml b/zebra-state/Cargo.toml index e4d87463ce5..e4dcda077a9 100644 --- a/zebra-state/Cargo.toml +++ b/zebra-state/Cargo.toml @@ -67,7 +67,8 @@ tower = { version = "0.4.13", features = ["buffer", "util"] } tracing = "0.1.37" # elasticsearch specific dependencies. -elasticsearch = { version = "8.5.0-alpha.1", package = "elasticsearch", optional = true } +# Security: avoid default dependency on openssl +elasticsearch = { version = "8.5.0-alpha.1", default-features = false, features = ["rustls-tls"], optional = true } serde_json = { version = "1.0.97", package = "serde_json", optional = true } zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.26" } diff --git a/zebrad/Cargo.toml b/zebrad/Cargo.toml index 9682bc185fd..e1ac083e1af 100644 --- a/zebrad/Cargo.toml +++ b/zebrad/Cargo.toml @@ -39,6 +39,9 @@ pre-release-replacements = [ # In release builds, don't compile debug logging code, to improve performance. default = ["release_max_level_info"] +# Default features for official ZF binary release builds +default-release-binaries = ["default", "sentry"] + # Production features that activate extra dependencies, or extra features in dependencies # Experimental mining RPC support From a6f35afe3700c1ba0a19905794143e20b694f2bf Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 26 Jun 2023 19:43:29 +0000 Subject: [PATCH 147/265] build(deps): bump clap from 4.3.6 to 4.3.8 (#7066) Bumps [clap](https://github.com/clap-rs/clap) from 4.3.6 to 4.3.8. - [Release notes](https://github.com/clap-rs/clap/releases) - [Changelog](https://github.com/clap-rs/clap/blob/master/CHANGELOG.md) - [Commits](https://github.com/clap-rs/clap/compare/v4.3.6...v4.3.8) --- updated-dependencies: - dependency-name: clap dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 14 +++++++------- zebrad/Cargo.toml | 2 +- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1c984acaa25..63195c9fd51 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -12,7 +12,7 @@ dependencies = [ "arc-swap", "backtrace", "canonical-path", - "clap 4.3.6", + "clap 4.3.8", "color-eyre", "fs-err", "once_cell", @@ -773,9 +773,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.3.6" +version = "4.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6320c6d1c98b6981da7bb2dcecbd0be9dc98d42165fa8326b21000f7dbfde6d0" +checksum = "d9394150f5b4273a1763355bd1c2ec54cc5a2593f790587bcd6b2c947cfa9211" dependencies = [ "clap_builder", "clap_derive", @@ -784,9 +784,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.3.5" +version = "4.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e53afce1efce6ed1f633cf0e57612fe51db54a1ee4fd8f8503d078fe02d69ae" +checksum = "9a78fbdd3cc2914ddf37ba444114bc7765bbdcb55ec9cbe6fa054f0137400717" dependencies = [ "anstream", "anstyle", @@ -956,7 +956,7 @@ dependencies = [ "anes", "cast", "ciborium", - "clap 4.3.6", + "clap 4.3.8", "criterion-plot", "is-terminal", "itertools 0.10.5", @@ -5954,7 +5954,7 @@ dependencies = [ "abscissa_core", "atty", "chrono", - "clap 4.3.6", + "clap 4.3.8", "color-eyre", "console-subscriber", "dirs", diff --git a/zebrad/Cargo.toml b/zebrad/Cargo.toml index e1ac083e1af..56c034e2658 100644 --- a/zebrad/Cargo.toml +++ b/zebrad/Cargo.toml @@ -138,7 +138,7 @@ zebra-state = { path = "../zebra-state", version = "1.0.0-beta.26" } zebra-utils = { path = "../zebra-utils", version = "1.0.0-beta.26", optional = true } abscissa_core = "0.7.0" -clap = { version = "4.3.6", features = ["cargo"] } +clap = { version = "4.3.8", features = ["cargo"] } chrono = { version = "0.4.26", default-features = false, features = ["clock", "std"] } humantime-serde = "1.1.1" indexmap = "1.9.3" From 015a970e16a6c4913c6d1092633dac836570a5b7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 26 Jun 2023 21:30:10 +0000 Subject: [PATCH 148/265] build(deps): bump tj-actions/changed-files from 36.4.1 to 37.0.3 (#7065) Bumps [tj-actions/changed-files](https://github.com/tj-actions/changed-files) from 36.4.1 to 37.0.3. - [Release notes](https://github.com/tj-actions/changed-files/releases) - [Changelog](https://github.com/tj-actions/changed-files/blob/main/HISTORY.md) - [Commits](https://github.com/tj-actions/changed-files/compare/v36.4.1...v37.0.3) --- updated-dependencies: - dependency-name: tj-actions/changed-files dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/lint.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 9c43b4a128a..d3427fd19a3 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -37,7 +37,7 @@ jobs: - name: Rust files id: changed-files-rust - uses: tj-actions/changed-files@v36.4.1 + uses: tj-actions/changed-files@v37.0.3 with: files: | **/*.rs @@ -49,7 +49,7 @@ jobs: - name: Workflow files id: changed-files-workflows - uses: tj-actions/changed-files@v36.4.1 + uses: tj-actions/changed-files@v37.0.3 with: files: | .github/workflows/*.yml From 3af03c3971ec7e94c4fe04193d3055c58057d3f3 Mon Sep 17 00:00:00 2001 From: Conrado Gouvea Date: Tue, 27 Jun 2023 00:35:07 -0300 Subject: [PATCH 149/265] print a Zebra logo and some text if stderr is terminal (#6945) * print a Zebra logo and some text in progress bar mode * add network to printed line, add heart to logo * print logo and message regardless of progress-bar; document how logo was generated --- zebrad/src/application.rs | 5 ++- zebrad/src/components/tracing.rs | 6 +++ zebrad/src/components/tracing/component.rs | 43 +++++++++++++++++++--- zebrad/src/components/tracing/zebra.utf8 | 20 ++++++++++ 4 files changed, 67 insertions(+), 7 deletions(-) create mode 100644 zebrad/src/components/tracing/zebra.utf8 diff --git a/zebrad/src/application.rs b/zebrad/src/application.rs index d7fb3356dd1..47f324a2e96 100644 --- a/zebrad/src/application.rs +++ b/zebrad/src/application.rs @@ -419,7 +419,10 @@ impl Application for ZebradApp { tracing_config.filter = Some(default_filter.to_owned()); tracing_config.flamegraph = None; } - components.push(Box::new(Tracing::new(tracing_config)?)); + components.push(Box::new(Tracing::new( + config.network.network, + tracing_config, + )?)); // Log git metadata and platform info when zebrad starts up if is_server { diff --git a/zebrad/src/components/tracing.rs b/zebrad/src/components/tracing.rs index d12491f87a7..439c5052e49 100644 --- a/zebrad/src/components/tracing.rs +++ b/zebrad/src/components/tracing.rs @@ -138,6 +138,12 @@ impl Config { self.force_use_color || (self.use_color && atty::is(atty::Stream::Stdout)) } + /// Returns `true` if standard error should use color escapes. + /// Automatically checks if Zebra is running in a terminal. + pub fn use_color_stderr(&self) -> bool { + self.force_use_color || (self.use_color && atty::is(atty::Stream::Stderr)) + } + /// Returns `true` if output that could go to standard output or standard error /// should use color escapes. Automatically checks if Zebra is running in a terminal. pub fn use_color_stdout_and_stderr(&self) -> bool { diff --git a/zebrad/src/components/tracing/component.rs b/zebrad/src/components/tracing/component.rs index 4a3a4560eb3..4498b1a1054 100644 --- a/zebrad/src/components/tracing/component.rs +++ b/zebrad/src/components/tracing/component.rs @@ -15,12 +15,26 @@ use tracing_subscriber::{ util::SubscriberInitExt, EnvFilter, }; +use zebra_chain::parameters::Network; use crate::{application::build_version, components::tracing::Config}; #[cfg(feature = "flamegraph")] use super::flame; +// Art generated with these two images. +// Zebra logo: book/theme/favicon.png +// Heart image: https://commons.wikimedia.org/wiki/File:Heart_coraz%C3%B3n.svg +// (License: CC BY-SA 3.0) +// +// How to render +// +// Convert heart image to PNG (2000px) and run: +// img2txt -W 40 -H 20 -f utf8 -d none Heart_corazón.svg.png > heart.utf8 +// img2txt -W 40 -H 20 -f utf8 -d none favicon.png > logo.utf8 +// paste favicon.utf8 heart.utf8 > zebra.utf8 +static ZEBRA_ART: [u8; include_bytes!("zebra.utf8").len()] = *include_bytes!("zebra.utf8"); + /// A type-erased boxed writer that can be sent between threads safely. pub type BoxWrite = Box; @@ -52,15 +66,36 @@ pub struct Tracing { impl Tracing { /// Try to create a new [`Tracing`] component with the given `filter`. - #[allow(clippy::print_stdout, clippy::print_stderr)] - pub fn new(config: Config) -> Result { + #[allow(clippy::print_stdout, clippy::print_stderr, clippy::unwrap_in_result)] + pub fn new(network: Network, config: Config) -> Result { // Only use color if tracing output is being sent to a terminal or if it was explicitly // forced to. let use_color = config.use_color_stdout(); + let use_color_stderr = config.use_color_stderr(); let filter = config.filter.unwrap_or_default(); let flame_root = &config.flamegraph; + // If it's a terminal and color escaping is enabled: clear screen and + // print Zebra logo (here `use_color` is being interpreted as + // "use escape codes") + if use_color_stderr { + // Clear screen + eprint!("\x1B[2J"); + eprintln!( + "{}", + std::str::from_utf8(&ZEBRA_ART) + .expect("should always work on a UTF-8 encoded constant") + ); + } + + eprintln!( + "Thank you for running a {} zebrad {} node!", + network.lowercase_name(), + build_version() + ); + eprintln!("You're helping to strengthen the network and contributing to a social good :)"); + let writer = if let Some(log_file) = config.log_file.as_ref() { println!("running zebra"); @@ -263,10 +298,6 @@ impl Tracing { howudoin::init(terminal_consumer); info!("activated progress bar"); - - if config.log_file.is_some() { - eprintln!("waiting for initial progress reports..."); - } } Ok(Self { diff --git a/zebrad/src/components/tracing/zebra.utf8 b/zebrad/src/components/tracing/zebra.utf8 new file mode 100644 index 00000000000..bf620ddea8d --- /dev/null +++ b/zebrad/src/components/tracing/zebra.utf8 @@ -0,0 +1,20 @@ + X@8:::::::@X + @::X:;SX;%8S8@@:X::8 + @:;@8@.S8 ;;t;. XX.;@@;:@ 8; %X X% ;8 + 8:@88 .%@.S@.XS: 888:8 X :: .: X + :;SX XS8;::@X@8::;8t8: 8;;:@ % SS % + @::@ 88:::8 8::88::::::X@ 8::8 S X + @:XS8 ;::::::8.88%8@S88::::8: 8S8:8 X S + ::;8 t8S8::::@St 88SX @SS@8:8S;8S @@:: 8 8 +8:XX %%@@::X% 8: X@ t:8t X ..%t8 ;8:8   +::8t: tX:X8tS 8: :@ ;.8@.X tt888: @8:: @ X +::8S: @:8Xtt% 8: S 8 8S.   .8;::. 88:: 8 8 +::X8 S@8 8:8.@8@8Xt S  .%8;S %8:: : : + ::@8 S .  @:::::::X;@ 8.88S8 @8;:X :: :; + 8:X8X .@:X88:::::::::888 @;:X %88:: t t + @::S 888@8:::::::::8X; 8 8::8 8 8 + X:;S@ tXX8;::::::::S::@8 8;S:8 X. .X + ::8S@ :S88XX@8X: @XX:: :: :: + 8:;%X8.:S t; X8%S:8X tt + X8::8SS8SX8@@X@8;8S:8X + X@8:::::::@XS From 941be2965cbda4b3b49396d93c8d2c1c1caea0dd Mon Sep 17 00:00:00 2001 From: teor Date: Tue, 27 Jun 2023 16:50:35 +1000 Subject: [PATCH 150/265] Replace a chain length assertion with a NotReadyToBeCommitted error (#7072) --- zebra-state/src/error.rs | 2 +- zebra-state/src/service/check.rs | 26 ++++++++++++++++---------- 2 files changed, 17 insertions(+), 11 deletions(-) diff --git a/zebra-state/src/error.rs b/zebra-state/src/error.rs index f4cd7213e47..cf495311efb 100644 --- a/zebra-state/src/error.rs +++ b/zebra-state/src/error.rs @@ -51,7 +51,7 @@ pub struct CommitSemanticallyVerifiedError(#[from] ValidateContextError); #[non_exhaustive] #[allow(missing_docs)] pub enum ValidateContextError { - #[error("block parent not found in any chain")] + #[error("block parent not found in any chain, or not enough blocks in chain")] #[non_exhaustive] NotReadyToBeCommitted, diff --git a/zebra-state/src/service/check.rs b/zebra-state/src/service/check.rs index 07abce9895b..f1e45010194 100644 --- a/zebra-state/src/service/check.rs +++ b/zebra-state/src/service/check.rs @@ -80,19 +80,25 @@ where .expect("valid blocks have a coinbase height"); check::height_one_more_than_parent_height(parent_height, prepared.height)?; - // skip this check during tests if we don't have enough blocks in the chain - #[cfg(test)] if relevant_chain.len() < POW_ADJUSTMENT_BLOCK_SPAN { + // skip this check during tests if we don't have enough blocks in the chain + // process_queued also checks the chain length, so we can skip this assertion during testing + // (tests that want to check this code should use the correct number of blocks) + // + // TODO: accept a NotReadyToBeCommitted error in those tests instead + #[cfg(test)] return Ok(()); + + // In production, blocks without enough context are invalid. + // + // The BlockVerifierRouter makes sure that the first 1 million blocks (or more) are + // checkpoint verified. The state queues and block write task make sure that blocks are + // committed in strict height order. But this function is only called on semantically + // verified blocks, so there will be at least 1 million blocks in the state when it is + // called. So this error should never happen. + #[cfg(not(test))] + return Err(ValidateContextError::NotReadyToBeCommitted); } - // process_queued also checks the chain length, so we can skip this assertion during testing - // (tests that want to check this code should use the correct number of blocks) - assert_eq!( - relevant_chain.len(), - POW_ADJUSTMENT_BLOCK_SPAN, - "state must contain enough blocks to do proof of work contextual validation, \ - and validation must receive the exact number of required blocks" - ); let relevant_data = relevant_chain.iter().map(|block| { ( From 1f1d04b547cd2690f8703f1098f023fc4297809a Mon Sep 17 00:00:00 2001 From: Marek Date: Tue, 27 Jun 2023 10:58:14 +0200 Subject: [PATCH 151/265] change(state): Refactor the structure of finalizable blocks (#7035) * Add and use `FinalizableBlock` This commit adds `FinalizableBlock`, and uses it instead of `ContextuallyVerifiedBlockWithTrees` in `commit_finalized_direct()` * Use `ContextuallyVerifiedBlockWithTrees` This commit passes `ContextuallyVerifiedBlockWithTrees` instead of passing separate `finalized`, `history_tree` and `note_commitment_trees` when storing blocks in the finalized state. * Apply suggestions from code review Co-authored-by: teor * add docs to new methods * fix existing doc * rename `ContextuallyVerifiedBlockWithTrees` to `SemanticallyVerifiedBlockWithTrees` * Refactor docs * Refactor comments * Add missing docs, fix typo * Fix rustfmt --------- Co-authored-by: teor Co-authored-by: Alfredo Garcia Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> --- zebra-state/src/request.rs | 69 ++++++--- zebra-state/src/service/finalized_state.rs | 136 +++++++++--------- .../service/finalized_state/zebra_db/block.rs | 58 ++++---- .../service/finalized_state/zebra_db/chain.rs | 10 +- .../finalized_state/zebra_db/shielded.rs | 16 +-- .../src/service/non_finalized_state.rs | 6 +- .../non_finalized_state/tests/vectors.rs | 11 +- 7 files changed, 167 insertions(+), 139 deletions(-) diff --git a/zebra-state/src/request.rs b/zebra-state/src/request.rs index 5c1516886e8..5a942d99f7e 100644 --- a/zebra-state/src/request.rs +++ b/zebra-state/src/request.rs @@ -255,43 +255,62 @@ impl Treestate { /// when committing a block. The associated treestate is passed so that the /// finalized state does not have to retrieve the previous treestate from the /// database and recompute a new one. -pub struct ContextuallyVerifiedBlockWithTrees { +pub struct SemanticallyVerifiedBlockWithTrees { /// A block ready to be committed. - pub block: SemanticallyVerifiedBlock, + pub verified: SemanticallyVerifiedBlock, /// The tresstate associated with the block. - pub treestate: Option, + pub treestate: Treestate, } -impl ContextuallyVerifiedBlockWithTrees { +/// Contains a block ready to be committed. +/// +/// Zebra's state service passes this `enum` over to the finalized state +/// when committing a block. +pub enum FinalizableBlock { + Checkpoint { + checkpoint_verified: CheckpointVerifiedBlock, + }, + Contextual { + contextually_verified: ContextuallyVerifiedBlock, + treestate: Treestate, + }, +} + +impl FinalizableBlock { + /// Create a new [`FinalizableBlock`] given a [`ContextuallyVerifiedBlock`]. pub fn new(contextually_verified: ContextuallyVerifiedBlock, treestate: Treestate) -> Self { - Self { - block: SemanticallyVerifiedBlock::from(contextually_verified), - treestate: Some(treestate), + Self::Contextual { + contextually_verified, + treestate, } } -} -impl From> for ContextuallyVerifiedBlockWithTrees { - fn from(block: Arc) -> Self { - Self::from(SemanticallyVerifiedBlock::from(block)) + #[cfg(test)] + /// Extract a [`Block`] from a [`FinalizableBlock`] variant. + pub fn inner_block(&self) -> Arc { + match self { + FinalizableBlock::Checkpoint { + checkpoint_verified, + } => checkpoint_verified.block.clone(), + FinalizableBlock::Contextual { + contextually_verified, + .. + } => contextually_verified.block.clone(), + } } } -impl From for ContextuallyVerifiedBlockWithTrees { - fn from(semantically_verified: SemanticallyVerifiedBlock) -> Self { - Self { - block: semantically_verified, - treestate: None, +impl From for FinalizableBlock { + fn from(checkpoint_verified: CheckpointVerifiedBlock) -> Self { + Self::Checkpoint { + checkpoint_verified, } } } -impl From for ContextuallyVerifiedBlockWithTrees { - fn from(checkpoint_verified: CheckpointVerifiedBlock) -> Self { - Self { - block: checkpoint_verified.0, - treestate: None, - } +impl From> for FinalizableBlock { + fn from(block: Arc) -> Self { + Self::from(CheckpointVerifiedBlock::from(block)) } } @@ -413,6 +432,12 @@ impl From for SemanticallyVerifiedBlock { } } +impl From for SemanticallyVerifiedBlock { + fn from(checkpoint_verified: CheckpointVerifiedBlock) -> Self { + checkpoint_verified.0 + } +} + impl Deref for CheckpointVerifiedBlock { type Target = SemanticallyVerifiedBlock; diff --git a/zebra-state/src/service/finalized_state.rs b/zebra-state/src/service/finalized_state.rs index ca1f5887051..74ae8dd54ba 100644 --- a/zebra-state/src/service/finalized_state.rs +++ b/zebra-state/src/service/finalized_state.rs @@ -23,7 +23,7 @@ use std::{ use zebra_chain::{block, parameters::Network}; use crate::{ - request::ContextuallyVerifiedBlockWithTrees, + request::{FinalizableBlock, SemanticallyVerifiedBlockWithTrees, Treestate}, service::{check, QueuedCheckpointVerified}, BoxError, CheckpointVerifiedBlock, CloneError, Config, }; @@ -225,53 +225,25 @@ impl FinalizedState { #[allow(clippy::unwrap_in_result)] pub fn commit_finalized_direct( &mut self, - contextually_verified_with_trees: ContextuallyVerifiedBlockWithTrees, + finalizable_block: FinalizableBlock, source: &str, ) -> Result { - let finalized = contextually_verified_with_trees.block; - let committed_tip_hash = self.db.finalized_tip_hash(); - let committed_tip_height = self.db.finalized_tip_height(); - - // Assert that callers (including unit tests) get the chain order correct - if self.db.is_empty() { - assert_eq!( - committed_tip_hash, finalized.block.header.previous_block_hash, - "the first block added to an empty state must be a genesis block, source: {source}", - ); - assert_eq!( - block::Height(0), - finalized.height, - "cannot commit genesis: invalid height, source: {source}", - ); - } else { - assert_eq!( - committed_tip_height.expect("state must have a genesis block committed") + 1, - Some(finalized.height), - "committed block height must be 1 more than the finalized tip height, source: {source}", - ); - - assert_eq!( - committed_tip_hash, finalized.block.header.previous_block_hash, - "committed block must be a child of the finalized tip, source: {source}", - ); - } - - let (history_tree, note_commitment_trees) = match contextually_verified_with_trees.treestate - { - // If the treestate associated with the block was supplied, use it - // without recomputing it. - Some(ref treestate) => ( - treestate.history_tree.clone(), - treestate.note_commitment_trees.clone(), - ), - // If the treestate was not supplied, retrieve a previous treestate - // from the database, and update it for the block being committed. - None => { + let (height, hash, finalized) = match finalizable_block { + FinalizableBlock::Checkpoint { + checkpoint_verified, + } => { + // Checkpoint-verified blocks don't have an associated treestate, so we retrieve the + // treestate of the finalized tip from the database and update it for the block + // being committed, assuming the retrieved treestate is the parent block's + // treestate. Later on, this function proves this assumption by asserting that the + // finalized tip is the parent block of the block being committed. + + let block = checkpoint_verified.block.clone(); let mut history_tree = self.db.history_tree(); let mut note_commitment_trees = self.db.note_commitment_trees(); // Update the note commitment trees. - note_commitment_trees.update_trees_parallel(&finalized.block)?; + note_commitment_trees.update_trees_parallel(&block)?; // Check the block commitment if the history tree was not // supplied by the non-finalized state. Note that we don't do @@ -291,7 +263,7 @@ impl FinalizedState { // TODO: run this CPU-intensive cryptography in a parallel rayon // thread, if it shows up in profiles check::block_commitment_is_valid_for_chain_history( - finalized.block.clone(), + block.clone(), self.network, &history_tree, )?; @@ -303,30 +275,64 @@ impl FinalizedState { let history_tree_mut = Arc::make_mut(&mut history_tree); let sapling_root = note_commitment_trees.sapling.root(); let orchard_root = note_commitment_trees.orchard.root(); - history_tree_mut.push( - self.network(), - finalized.block.clone(), - sapling_root, - orchard_root, - )?; - - (history_tree, note_commitment_trees) + history_tree_mut.push(self.network(), block.clone(), sapling_root, orchard_root)?; + + ( + checkpoint_verified.height, + checkpoint_verified.hash, + SemanticallyVerifiedBlockWithTrees { + verified: checkpoint_verified.0, + treestate: Treestate { + note_commitment_trees, + history_tree, + }, + }, + ) } + FinalizableBlock::Contextual { + contextually_verified, + treestate, + } => ( + contextually_verified.height, + contextually_verified.hash, + SemanticallyVerifiedBlockWithTrees { + verified: contextually_verified.into(), + treestate, + }, + ), }; - let finalized_height = finalized.height; - let finalized_hash = finalized.hash; + let committed_tip_hash = self.db.finalized_tip_hash(); + let committed_tip_height = self.db.finalized_tip_height(); + + // Assert that callers (including unit tests) get the chain order correct + if self.db.is_empty() { + assert_eq!( + committed_tip_hash, finalized.verified.block.header.previous_block_hash, + "the first block added to an empty state must be a genesis block, source: {source}", + ); + assert_eq!( + block::Height(0), + height, + "cannot commit genesis: invalid height, source: {source}", + ); + } else { + assert_eq!( + committed_tip_height.expect("state must have a genesis block committed") + 1, + Some(height), + "committed block height must be 1 more than the finalized tip height, source: {source}", + ); + + assert_eq!( + committed_tip_hash, finalized.verified.block.header.previous_block_hash, + "committed block must be a child of the finalized tip, source: {source}", + ); + } #[cfg(feature = "elasticsearch")] - let finalized_block = finalized.block.clone(); - - let result = self.db.write_block( - finalized, - history_tree, - note_commitment_trees, - self.network, - source, - ); + let finalized_block = finalized.verified.block.clone(); + + let result = self.db.write_block(finalized, self.network, source); if result.is_ok() { // Save blocks to elasticsearch if the feature is enabled. @@ -334,10 +340,10 @@ impl FinalizedState { self.elasticsearch(&finalized_block); // TODO: move the stop height check to the syncer (#3442) - if self.is_at_stop_height(finalized_height) { + if self.is_at_stop_height(height) { tracing::info!( - height = ?finalized_height, - hash = ?finalized_hash, + ?height, + ?hash, block_source = ?source, "stopping at configured height, flushing database to disk" ); diff --git a/zebra-state/src/service/finalized_state/zebra_db/block.rs b/zebra-state/src/service/finalized_state/zebra_db/block.rs index aad9f2272bd..e540a0dbbd3 100644 --- a/zebra-state/src/service/finalized_state/zebra_db/block.rs +++ b/zebra-state/src/service/finalized_state/zebra_db/block.rs @@ -19,9 +19,7 @@ use itertools::Itertools; use zebra_chain::{ amount::NonNegative, block::{self, Block, Height}, - history_tree::HistoryTree, orchard, - parallel::tree::NoteCommitmentTrees, parameters::{Network, GENESIS_PREVIOUS_BLOCK_HASH}, sapling, serialization::TrustedPreallocate, @@ -31,6 +29,7 @@ use zebra_chain::{ }; use crate::{ + request::SemanticallyVerifiedBlockWithTrees, service::finalized_state::{ disk_db::{DiskDb, DiskWriteBatch, ReadDisk, WriteDisk}, disk_format::{ @@ -281,15 +280,12 @@ impl ZebraDb { /// - Propagates any errors from updating history and note commitment trees pub(in super::super) fn write_block( &mut self, - finalized: SemanticallyVerifiedBlock, - history_tree: Arc, - note_commitment_trees: NoteCommitmentTrees, + finalized: SemanticallyVerifiedBlockWithTrees, network: Network, source: &str, ) -> Result { - let finalized_hash = finalized.hash; - let tx_hash_indexes: HashMap = finalized + .verified .transaction_hashes .iter() .enumerate() @@ -302,11 +298,12 @@ impl ZebraDb { // simplify the spent_utxos location lookup code, // and remove the extra new_outputs_by_out_loc argument let new_outputs_by_out_loc: BTreeMap = finalized + .verified .new_outputs .iter() .map(|(outpoint, ordered_utxo)| { ( - lookup_out_loc(finalized.height, outpoint, &tx_hash_indexes), + lookup_out_loc(finalized.verified.height, outpoint, &tx_hash_indexes), ordered_utxo.utxo.clone(), ) }) @@ -315,6 +312,7 @@ impl ZebraDb { // Get a list of the spent UTXOs, before we delete any from the database let spent_utxos: Vec<(transparent::OutPoint, OutputLocation, transparent::Utxo)> = finalized + .verified .block .transactions .iter() @@ -326,12 +324,13 @@ impl ZebraDb { // Some utxos are spent in the same block, so they will be in // `tx_hash_indexes` and `new_outputs` self.output_location(&outpoint).unwrap_or_else(|| { - lookup_out_loc(finalized.height, &outpoint, &tx_hash_indexes) + lookup_out_loc(finalized.verified.height, &outpoint, &tx_hash_indexes) }), self.utxo(&outpoint) .map(|ordered_utxo| ordered_utxo.utxo) .or_else(|| { finalized + .verified .new_outputs .get(&outpoint) .map(|ordered_utxo| ordered_utxo.utxo.clone()) @@ -356,6 +355,7 @@ impl ZebraDb { .values() .chain( finalized + .verified .new_outputs .values() .map(|ordered_utxo| &ordered_utxo.utxo), @@ -376,13 +376,11 @@ impl ZebraDb { // In case of errors, propagate and do not write the batch. batch.prepare_block_batch( &self.db, - finalized, + &finalized, new_outputs_by_out_loc, spent_utxos_by_outpoint, spent_utxos_by_out_loc, address_balances, - history_tree, - note_commitment_trees, self.finalized_value_pool(), )?; @@ -390,7 +388,7 @@ impl ZebraDb { tracing::trace!(?source, "committed block from"); - Ok(finalized_hash) + Ok(finalized.verified.hash) } } @@ -429,25 +427,16 @@ impl DiskWriteBatch { pub fn prepare_block_batch( &mut self, db: &DiskDb, - finalized: SemanticallyVerifiedBlock, + finalized: &SemanticallyVerifiedBlockWithTrees, new_outputs_by_out_loc: BTreeMap, spent_utxos_by_outpoint: HashMap, spent_utxos_by_out_loc: BTreeMap, address_balances: HashMap, - history_tree: Arc, - note_commitment_trees: NoteCommitmentTrees, value_pool: ValueBalance, ) -> Result<(), BoxError> { - let SemanticallyVerifiedBlock { - block, - hash, - height, - .. - } = &finalized; - // Commit block and transaction data. // (Transaction indexes, note commitments, and UTXOs are committed later.) - self.prepare_block_header_and_transaction_data_batch(db, &finalized)?; + self.prepare_block_header_and_transaction_data_batch(db, &finalized.verified)?; // # Consensus // @@ -458,28 +447,37 @@ impl DiskWriteBatch { // // By returning early, Zebra commits the genesis block and transaction data, // but it ignores the genesis UTXO and value pool updates. - if self.prepare_genesis_batch(db, &finalized) { + if self.prepare_genesis_batch(db, &finalized.verified) { return Ok(()); } // Commit transaction indexes self.prepare_transparent_transaction_batch( db, - &finalized, + &finalized.verified, &new_outputs_by_out_loc, &spent_utxos_by_outpoint, &spent_utxos_by_out_loc, address_balances, )?; - self.prepare_shielded_transaction_batch(db, &finalized)?; + self.prepare_shielded_transaction_batch(db, &finalized.verified)?; - self.prepare_note_commitment_batch(db, &finalized, note_commitment_trees, history_tree)?; + self.prepare_note_commitment_batch(db, finalized)?; // Commit UTXOs and value pools - self.prepare_chain_value_pools_batch(db, &finalized, spent_utxos_by_outpoint, value_pool)?; + self.prepare_chain_value_pools_batch( + db, + &finalized.verified, + spent_utxos_by_outpoint, + value_pool, + )?; // The block has passed contextual validation, so update the metrics - block_precommit_metrics(block, *hash, *height); + block_precommit_metrics( + &finalized.verified.block, + finalized.verified.hash, + finalized.verified.height, + ); Ok(()) } diff --git a/zebra-state/src/service/finalized_state/zebra_db/chain.rs b/zebra-state/src/service/finalized_state/zebra_db/chain.rs index 590f609d824..7107717a466 100644 --- a/zebra-state/src/service/finalized_state/zebra_db/chain.rs +++ b/zebra-state/src/service/finalized_state/zebra_db/chain.rs @@ -21,6 +21,7 @@ use zebra_chain::{ }; use crate::{ + request::SemanticallyVerifiedBlockWithTrees, service::finalized_state::{ disk_db::{DiskDb, DiskWriteBatch, ReadDisk, WriteDisk}, zebra_db::ZebraDb, @@ -69,15 +70,14 @@ impl DiskWriteBatch { pub fn prepare_history_batch( &mut self, db: &DiskDb, - finalized: &SemanticallyVerifiedBlock, - history_tree: Arc, + finalized: &SemanticallyVerifiedBlockWithTrees, ) -> Result<(), BoxError> { let history_tree_cf = db.cf_handle("history_tree").unwrap(); - let SemanticallyVerifiedBlock { height, .. } = finalized; + let height = finalized.verified.height; // Update the tree in state - let current_tip_height = *height - 1; + let current_tip_height = height - 1; if let Some(h) = current_tip_height { self.zs_delete(&history_tree_cf, h); } @@ -87,7 +87,7 @@ impl DiskWriteBatch { // Otherwise, the ReadStateService could access a height // that was just deleted by a concurrent StateService write. // This requires a database version update. - if let Some(history_tree) = history_tree.as_ref().as_ref() { + if let Some(history_tree) = finalized.treestate.history_tree.as_ref().as_ref() { self.zs_insert(&history_tree_cf, height, history_tree); } diff --git a/zebra-state/src/service/finalized_state/zebra_db/shielded.rs b/zebra-state/src/service/finalized_state/zebra_db/shielded.rs index ac306bdfe1b..b5bfe26059e 100644 --- a/zebra-state/src/service/finalized_state/zebra_db/shielded.rs +++ b/zebra-state/src/service/finalized_state/zebra_db/shielded.rs @@ -15,11 +15,12 @@ use std::sync::Arc; use zebra_chain::{ - block::Height, history_tree::HistoryTree, orchard, parallel::tree::NoteCommitmentTrees, - sapling, sprout, transaction::Transaction, + block::Height, orchard, parallel::tree::NoteCommitmentTrees, sapling, sprout, + transaction::Transaction, }; use crate::{ + request::SemanticallyVerifiedBlockWithTrees, service::finalized_state::{ disk_db::{DiskDb, DiskWriteBatch, ReadDisk, WriteDisk}, zebra_db::ZebraDb, @@ -264,9 +265,7 @@ impl DiskWriteBatch { pub fn prepare_note_commitment_batch( &mut self, db: &DiskDb, - finalized: &SemanticallyVerifiedBlock, - note_commitment_trees: NoteCommitmentTrees, - history_tree: Arc, + finalized: &SemanticallyVerifiedBlockWithTrees, ) -> Result<(), BoxError> { let sprout_anchors = db.cf_handle("sprout_anchors").unwrap(); let sapling_anchors = db.cf_handle("sapling_anchors").unwrap(); @@ -276,7 +275,8 @@ impl DiskWriteBatch { let sapling_note_commitment_tree_cf = db.cf_handle("sapling_note_commitment_tree").unwrap(); let orchard_note_commitment_tree_cf = db.cf_handle("orchard_note_commitment_tree").unwrap(); - let SemanticallyVerifiedBlock { height, .. } = finalized; + let height = finalized.verified.height; + let note_commitment_trees = finalized.treestate.note_commitment_trees.clone(); // Use the cached values that were previously calculated in parallel. let sprout_root = note_commitment_trees.sprout.root(); @@ -290,7 +290,7 @@ impl DiskWriteBatch { self.zs_insert(&orchard_anchors, orchard_root, ()); // Delete the previously stored Sprout note commitment tree. - let current_tip_height = *height - 1; + let current_tip_height = height - 1; if let Some(h) = current_tip_height { self.zs_delete(&sprout_note_commitment_tree_cf, h); } @@ -317,7 +317,7 @@ impl DiskWriteBatch { note_commitment_trees.orchard, ); - self.prepare_history_batch(db, finalized, history_tree) + self.prepare_history_batch(db, finalized) } /// Prepare a database batch containing the initial note commitment trees, diff --git a/zebra-state/src/service/non_finalized_state.rs b/zebra-state/src/service/non_finalized_state.rs index 6cb9a2d447e..1fa2b29e347 100644 --- a/zebra-state/src/service/non_finalized_state.rs +++ b/zebra-state/src/service/non_finalized_state.rs @@ -16,7 +16,7 @@ use zebra_chain::{ use crate::{ constants::MAX_NON_FINALIZED_CHAIN_FORKS, - request::{ContextuallyVerifiedBlock, ContextuallyVerifiedBlockWithTrees}, + request::{ContextuallyVerifiedBlock, FinalizableBlock}, service::{check, finalized_state::ZebraDb}, SemanticallyVerifiedBlock, ValidateContextError, }; @@ -174,7 +174,7 @@ impl NonFinalizedState { /// Finalize the lowest height block in the non-finalized portion of the best /// chain and update all side-chains to match. - pub fn finalize(&mut self) -> ContextuallyVerifiedBlockWithTrees { + pub fn finalize(&mut self) -> FinalizableBlock { // Chain::cmp uses the partial cumulative work, and the hash of the tip block. // Neither of these fields has interior mutability. // (And when the tip block is dropped for a chain, the chain is also dropped.) @@ -226,7 +226,7 @@ impl NonFinalizedState { self.update_metrics_for_chains(); // Add the treestate to the finalized block. - ContextuallyVerifiedBlockWithTrees::new(best_chain_root, root_treestate) + FinalizableBlock::new(best_chain_root, root_treestate) } /// Commit block to the non-finalized state, on top of: diff --git a/zebra-state/src/service/non_finalized_state/tests/vectors.rs b/zebra-state/src/service/non_finalized_state/tests/vectors.rs index a7e008bcf57..9179dee7f89 100644 --- a/zebra-state/src/service/non_finalized_state/tests/vectors.rs +++ b/zebra-state/src/service/non_finalized_state/tests/vectors.rs @@ -213,13 +213,12 @@ fn finalize_pops_from_best_chain_for_network(network: Network) -> Result<()> { state.commit_block(block2.clone().prepare(), &finalized_state)?; state.commit_block(child.prepare(), &finalized_state)?; - let finalized_with_trees = state.finalize(); - let finalized = finalized_with_trees.block; - assert_eq!(block1, finalized.block); + let finalized = state.finalize().inner_block(); - let finalized_with_trees = state.finalize(); - let finalized = finalized_with_trees.block; - assert_eq!(block2, finalized.block); + assert_eq!(block1, finalized); + + let finalized = state.finalize().inner_block(); + assert_eq!(block2, finalized); assert!(state.best_chain().is_none()); From 5324e5afd23ca081a357366fbaf9ddd65599e8dd Mon Sep 17 00:00:00 2001 From: teor Date: Wed, 28 Jun 2023 01:32:30 +1000 Subject: [PATCH 152/265] add(tests): Add snapshot tests for sprout database formats (#7057) * Add methods for loading entire column families from the database * Add a method that loads all the sprout trees from the database * Add snapshot tests for sprout note commitment trees * Add round-trip proptests for tree root database serialization * Add a manual sprout note commitment tree database serialization snapshot test * Add tests for 1,2,4,8 note commitments in a tree * Remove redundant "rand" package rename in dependencies * Randomly cache roots rather than only caching even roots --------- Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> --- Cargo.lock | 1 + tower-batch-control/Cargo.toml | 2 +- zebra-chain/Cargo.toml | 4 +- zebra-consensus/Cargo.toml | 2 +- zebra-network/Cargo.toml | 2 +- zebra-rpc/Cargo.toml | 2 +- zebra-state/Cargo.toml | 3 +- .../src/service/finalized_state/disk_db.rs | 102 +++++- .../finalized_state/disk_format/shielded.rs | 21 ++ .../finalized_state/disk_format/tests/prop.rs | 21 ++ .../service/finalized_state/tests/vectors.rs | 316 +++++++++++++++++- .../zebra_db/block/tests/snapshot.rs | 30 +- .../sprout_tree_at_tip@mainnet_0.snap | 10 + .../sprout_tree_at_tip@mainnet_1.snap | 10 + .../sprout_tree_at_tip@mainnet_2.snap | 10 + .../sprout_tree_at_tip@testnet_0.snap | 10 + .../sprout_tree_at_tip@testnet_1.snap | 10 + .../sprout_tree_at_tip@testnet_2.snap | 10 + .../snapshots/sprout_trees@mainnet_0.snap | 5 + .../snapshots/sprout_trees@mainnet_1.snap | 12 + .../snapshots/sprout_trees@mainnet_2.snap | 12 + .../snapshots/sprout_trees@testnet_0.snap | 5 + .../snapshots/sprout_trees@testnet_1.snap | 12 + .../snapshots/sprout_trees@testnet_2.snap | 12 + .../finalized_state/zebra_db/shielded.rs | 15 +- zebra-test/Cargo.toml | 2 +- zebrad/Cargo.toml | 2 +- 27 files changed, 627 insertions(+), 16 deletions(-) create mode 100644 zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sprout_tree_at_tip@mainnet_0.snap create mode 100644 zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sprout_tree_at_tip@mainnet_1.snap create mode 100644 zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sprout_tree_at_tip@mainnet_2.snap create mode 100644 zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sprout_tree_at_tip@testnet_0.snap create mode 100644 zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sprout_tree_at_tip@testnet_1.snap create mode 100644 zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sprout_tree_at_tip@testnet_2.snap create mode 100644 zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sprout_trees@mainnet_0.snap create mode 100644 zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sprout_trees@mainnet_1.snap create mode 100644 zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sprout_trees@mainnet_2.snap create mode 100644 zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sprout_trees@testnet_0.snap create mode 100644 zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sprout_trees@testnet_1.snap create mode 100644 zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sprout_trees@testnet_2.snap diff --git a/Cargo.lock b/Cargo.lock index 63195c9fd51..2ac842f7d52 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5881,6 +5881,7 @@ dependencies = [ "once_cell", "proptest", "proptest-derive", + "rand 0.8.5", "rayon", "regex", "rlimit", diff --git a/tower-batch-control/Cargo.toml b/tower-batch-control/Cargo.toml index 12b4c265d99..87c7f1fa7a6 100644 --- a/tower-batch-control/Cargo.toml +++ b/tower-batch-control/Cargo.toml @@ -39,7 +39,7 @@ color-eyre = "0.6.2" tinyvec = { version = "1.6.0", features = ["rustc_1_55"] } ed25519-zebra = "4.0.0" -rand = { version = "0.8.5", package = "rand" } +rand = "0.8.5" tokio = { version = "1.28.2", features = ["full", "tracing", "test-util"] } tokio-test = "0.4.2" diff --git a/zebra-chain/Cargo.toml b/zebra-chain/Cargo.toml index 7615d602c34..c23f864127f 100644 --- a/zebra-chain/Cargo.toml +++ b/zebra-chain/Cargo.toml @@ -114,7 +114,7 @@ zcash_address = { version = "0.2.1", optional = true } proptest = { version = "1.2.0", optional = true } proptest-derive = { version = "0.3.0", optional = true } -rand = { version = "0.8.5", optional = true, package = "rand" } +rand = { version = "0.8.5", optional = true } rand_chacha = { version = "0.3.1", optional = true } tokio = { version = "1.28.2", features = ["tracing"], optional = true } @@ -137,7 +137,7 @@ tracing = "0.1.37" proptest = "1.2.0" proptest-derive = "0.3.0" -rand = { version = "0.8.5", package = "rand" } +rand = "0.8.5" rand_chacha = "0.3.1" tokio = { version = "1.28.2", features = ["full", "tracing", "test-util"] } diff --git a/zebra-consensus/Cargo.toml b/zebra-consensus/Cargo.toml index 524bf70b701..ddea30687c5 100644 --- a/zebra-consensus/Cargo.toml +++ b/zebra-consensus/Cargo.toml @@ -40,7 +40,7 @@ bellman = "0.14.0" bls12_381 = "0.8.0" halo2 = { package = "halo2_proofs", version = "0.3.0" } jubjub = "0.10.0" -rand = { version = "0.8.5", package = "rand" } +rand = "0.8.5" rayon = "1.7.0" chrono = { version = "0.4.26", default-features = false, features = ["clock", "std"] } diff --git a/zebra-network/Cargo.toml b/zebra-network/Cargo.toml index 7bb8e14f0ce..73fb7c239d7 100644 --- a/zebra-network/Cargo.toml +++ b/zebra-network/Cargo.toml @@ -53,7 +53,7 @@ lazy_static = "1.4.0" num-integer = "0.1.45" ordered-map = "0.4.2" pin-project = "1.1.0" -rand = { version = "0.8.5", package = "rand" } +rand = "0.8.5" rayon = "1.7.0" regex = "1.8.4" serde = { version = "1.0.164", features = ["serde_derive"] } diff --git a/zebra-rpc/Cargo.toml b/zebra-rpc/Cargo.toml index e9e4aa0e992..3b05d7ac071 100644 --- a/zebra-rpc/Cargo.toml +++ b/zebra-rpc/Cargo.toml @@ -63,7 +63,7 @@ hex = { version = "0.4.3", features = ["serde"] } serde = { version = "1.0.164", features = ["serde_derive"] } # Experimental feature getblocktemplate-rpcs -rand = { version = "0.8.5", package = "rand", optional = true } +rand = { version = "0.8.5", optional = true } # ECC deps used by getblocktemplate-rpcs feature zcash_address = { version = "0.2.1", optional = true } diff --git a/zebra-state/Cargo.toml b/zebra-state/Cargo.toml index e4dcda077a9..143d772f2c2 100644 --- a/zebra-state/Cargo.toml +++ b/zebra-state/Cargo.toml @@ -91,10 +91,11 @@ once_cell = "1.18.0" spandoc = "0.2.2" hex = { version = "0.4.3", features = ["serde"] } -insta = { version = "1.30.0", features = ["ron"] } +insta = { version = "1.30.0", features = ["ron", "redactions"] } proptest = "1.2.0" proptest-derive = "0.3.0" +rand = "0.8.5" halo2 = { package = "halo2_proofs", version = "0.3.0" } jubjub = "0.10.0" diff --git a/zebra-state/src/service/finalized_state/disk_db.rs b/zebra-state/src/service/finalized_state/disk_db.rs index f896b3aac25..7e8ebe44662 100644 --- a/zebra-state/src/service/finalized_state/disk_db.rs +++ b/zebra-state/src/service/finalized_state/disk_db.rs @@ -10,7 +10,14 @@ //! The [`crate::constants::DATABASE_FORMAT_VERSION`] constant must //! be incremented each time the database format (column, serialization, etc) changes. -use std::{cmp::Ordering, fmt::Debug, path::Path, sync::Arc}; +use std::{ + cmp::Ordering, + collections::{BTreeMap, HashMap}, + fmt::Debug, + ops::RangeBounds, + path::Path, + sync::Arc, +}; use itertools::Itertools; use rlimit::increase_nofile_limit; @@ -146,6 +153,7 @@ impl WriteDisk for DiskWriteBatch { /// defined format // // TODO: just implement these methods directly on DiskDb +// move this trait, its methods, and support methods to another module pub trait ReadDisk { /// Returns true if a rocksdb column family `cf` does not contain any entries. fn zs_is_empty(&self, cf: &C) -> bool @@ -202,6 +210,26 @@ pub trait ReadDisk { C: rocksdb::AsColumnFamilyRef, K: IntoDisk + FromDisk, V: FromDisk; + + /// Returns the keys and values in `cf` in `range`, in an ordered `BTreeMap`. + /// + /// Holding this iterator open might delay block commit transactions. + fn zs_items_in_range_ordered(&self, cf: &C, range: R) -> BTreeMap + where + C: rocksdb::AsColumnFamilyRef, + K: IntoDisk + FromDisk + Ord, + V: FromDisk, + R: RangeBounds; + + /// Returns the keys and values in `cf` in `range`, in an unordered `HashMap`. + /// + /// Holding this iterator open might delay block commit transactions. + fn zs_items_in_range_unordered(&self, cf: &C, range: R) -> HashMap + where + C: rocksdb::AsColumnFamilyRef, + K: IntoDisk + FromDisk + Eq + std::hash::Hash, + V: FromDisk, + R: RangeBounds; } impl PartialEq for DiskDb { @@ -342,6 +370,26 @@ impl ReadDisk for DiskDb { }) .expect("unexpected database failure") } + + fn zs_items_in_range_ordered(&self, cf: &C, range: R) -> BTreeMap + where + C: rocksdb::AsColumnFamilyRef, + K: IntoDisk + FromDisk + Ord, + V: FromDisk, + R: RangeBounds, + { + self.zs_range_iter(cf, range).collect() + } + + fn zs_items_in_range_unordered(&self, cf: &C, range: R) -> HashMap + where + C: rocksdb::AsColumnFamilyRef, + K: IntoDisk + FromDisk + Eq + std::hash::Hash, + V: FromDisk, + R: RangeBounds, + { + self.zs_range_iter(cf, range).collect() + } } impl DiskWriteBatch { @@ -366,6 +414,58 @@ impl DiskWriteBatch { } impl DiskDb { + /// Returns an iterator over the items in `cf` in `range`. + /// + /// Holding this iterator open might delay block commit transactions. + fn zs_range_iter(&self, cf: &C, range: R) -> impl Iterator + '_ + where + C: rocksdb::AsColumnFamilyRef, + K: IntoDisk + FromDisk, + V: FromDisk, + R: RangeBounds, + { + use std::ops::Bound::{self, *}; + + // Replace with map() when it stabilises: + // https://github.com/rust-lang/rust/issues/86026 + let map_to_vec = |bound: Bound<&K>| -> Bound> { + match bound { + Unbounded => Unbounded, + Included(x) => Included(x.as_bytes().as_ref().to_vec()), + Excluded(x) => Excluded(x.as_bytes().as_ref().to_vec()), + } + }; + + let start_bound = map_to_vec(range.start_bound()); + let end_bound = map_to_vec(range.end_bound()); + let range = (start_bound.clone(), end_bound); + + let start_bound_vec = + if let Included(ref start_bound) | Excluded(ref start_bound) = start_bound { + start_bound.clone() + } else { + // Actually unused + Vec::new() + }; + + let start_mode = if matches!(start_bound, Unbounded) { + // Unbounded iterators start at the first item + rocksdb::IteratorMode::Start + } else { + rocksdb::IteratorMode::From(start_bound_vec.as_slice(), rocksdb::Direction::Forward) + }; + + // Reading multiple items from iterators has caused database hangs, + // in previous RocksDB versions + self.db + .iterator_cf(cf, start_mode) + .map(|result| result.expect("unexpected database failure")) + .map(|(key, value)| (key.to_vec(), value)) + // Handle Excluded start and the end bound + .filter(move |(key, _value)| range.contains(key)) + .map(|(key, value)| (K::from_bytes(key), V::from_bytes(value))) + } + /// The ideal open file limit for Zebra const IDEAL_OPEN_FILE_LIMIT: u64 = 1024; diff --git a/zebra-state/src/service/finalized_state/disk_format/shielded.rs b/zebra-state/src/service/finalized_state/disk_format/shielded.rs index 8836549c332..3b136236542 100644 --- a/zebra-state/src/service/finalized_state/disk_format/shielded.rs +++ b/zebra-state/src/service/finalized_state/disk_format/shielded.rs @@ -44,6 +44,13 @@ impl IntoDisk for sprout::tree::Root { } } +impl FromDisk for sprout::tree::Root { + fn from_bytes(bytes: impl AsRef<[u8]>) -> Self { + let array: [u8; 32] = bytes.as_ref().try_into().unwrap(); + array.into() + } +} + impl IntoDisk for sapling::tree::Root { type Bytes = [u8; 32]; @@ -52,6 +59,13 @@ impl IntoDisk for sapling::tree::Root { } } +impl FromDisk for sapling::tree::Root { + fn from_bytes(bytes: impl AsRef<[u8]>) -> Self { + let array: [u8; 32] = bytes.as_ref().try_into().unwrap(); + array.try_into().expect("finalized data must be valid") + } +} + impl IntoDisk for orchard::tree::Root { type Bytes = [u8; 32]; @@ -60,6 +74,13 @@ impl IntoDisk for orchard::tree::Root { } } +impl FromDisk for orchard::tree::Root { + fn from_bytes(bytes: impl AsRef<[u8]>) -> Self { + let array: [u8; 32] = bytes.as_ref().try_into().unwrap(); + array.try_into().expect("finalized data must be valid") + } +} + // The following implementations for the note commitment trees use `serde` and // `bincode` because currently the inner Merkle tree frontier (from // `incrementalmerkletree`) only supports `serde` for serialization. `bincode` diff --git a/zebra-state/src/service/finalized_state/disk_format/tests/prop.rs b/zebra-state/src/service/finalized_state/disk_format/tests/prop.rs index 8c5edfa03a8..6b7261082ab 100644 --- a/zebra-state/src/service/finalized_state/disk_format/tests/prop.rs +++ b/zebra-state/src/service/finalized_state/disk_format/tests/prop.rs @@ -279,6 +279,13 @@ fn serialized_sprout_tree_root_equal() { ); } +#[test] +fn roundtrip_sprout_tree_root() { + let _init_guard = zebra_test::init(); + + proptest!(|(val in any::())| assert_value_properties(val)); +} + // TODO: test note commitment tree round-trip, after implementing proptest::Arbitrary // Sapling @@ -347,6 +354,13 @@ fn serialized_sapling_tree_root_equal() { ); } +#[test] +fn roundtrip_sapling_tree_root() { + let _init_guard = zebra_test::init(); + + proptest!(|(val in any::())| assert_value_properties(val)); +} + // TODO: test note commitment tree round-trip, after implementing proptest::Arbitrary // Orchard @@ -415,6 +429,13 @@ fn serialized_orchard_tree_root_equal() { ); } +#[test] +fn roundtrip_orchard_tree_root() { + let _init_guard = zebra_test::init(); + + proptest!(|(val in any::())| assert_value_properties(val)); +} + // TODO: test note commitment tree round-trip, after implementing proptest::Arbitrary // Chain diff --git a/zebra-state/src/service/finalized_state/tests/vectors.rs b/zebra-state/src/service/finalized_state/tests/vectors.rs index 73f787ac67b..98975646263 100644 --- a/zebra-state/src/service/finalized_state/tests/vectors.rs +++ b/zebra-state/src/service/finalized_state/tests/vectors.rs @@ -1,11 +1,142 @@ //! Fixed test vectors for the finalized state. +//! These tests contain snapshots of the note commitment tree serialization format. +//! +//! We don't need to check empty trees, because the database format snapshot tests +//! use empty trees. use halo2::pasta::{group::ff::PrimeField, pallas}; use hex::FromHex; +use rand::random; + +use zebra_chain::{orchard, sapling, sprout}; use crate::service::finalized_state::disk_format::{FromDisk, IntoDisk}; -use zebra_chain::{orchard, sapling}; +/// Check that the sprout tree database serialization format has not changed. +#[test] +fn sprout_note_commitment_tree_serialization() { + let _init_guard = zebra_test::init(); + + let mut incremental_tree = sprout::tree::NoteCommitmentTree::default(); + + // Some commitments from zebra-chain/src/sprout/tests/test_vectors.rs + let hex_commitments = [ + "62fdad9bfbf17c38ea626a9c9b8af8a748e6b4367c8494caf0ca592999e8b6ba", + "68eb35bc5e1ddb80a761718e63a1ecf4d4977ae22cc19fa732b85515b2a4c943", + "836045484077cf6390184ea7cd48b460e2d0f22b2293b69633bb152314a692fb", + ]; + + for (idx, cm_hex) in hex_commitments.iter().enumerate() { + let bytes = <[u8; 32]>::from_hex(cm_hex).unwrap(); + + let cm = sprout::NoteCommitment::from(bytes); + incremental_tree.append(cm).unwrap(); + if random() { + info!(?idx, "randomly caching root for note commitment tree index"); + // Cache the root half of the time to make sure it works in both cases + let _ = incremental_tree.root(); + } + } + + // Make sure the last root is cached + let _ = incremental_tree.root(); + + // This test vector was generated by the code itself. + // The purpose of this test is to make sure the serialization format does + // not change by accident. + let expected_serialized_tree_hex = "010200836045484077cf6390184ea7cd48b460e2d0f22b2293b69633bb152314a692fb019f5b2b1e4bf7e7318d0a1f417ca6bca36077025b3d11e074b94cd55ce9f3861801c45297124f50dcd3f78eed017afd1e30764cd74cdf0a57751978270fd0721359"; + let serialized_tree = incremental_tree.as_bytes(); + assert_eq!(hex::encode(&serialized_tree), expected_serialized_tree_hex); + + let deserialized_tree = sprout::tree::NoteCommitmentTree::from_bytes(serialized_tree); + + assert_eq!(incremental_tree.root(), deserialized_tree.root()); +} + +/// Check that the sprout tree database serialization format has not changed for one commitment. +#[test] +fn sprout_note_commitment_tree_serialization_one() { + let _init_guard = zebra_test::init(); + + let mut incremental_tree = sprout::tree::NoteCommitmentTree::default(); + + // Some commitments from zebra-chain/src/sprout/tests/test_vectors.rs + let hex_commitments = ["836045484077cf6390184ea7cd48b460e2d0f22b2293b69633bb152314a692fb"]; + + for (idx, cm_hex) in hex_commitments.iter().enumerate() { + let bytes = <[u8; 32]>::from_hex(cm_hex).unwrap(); + + let cm = sprout::NoteCommitment::from(bytes); + incremental_tree.append(cm).unwrap(); + if random() { + info!(?idx, "randomly caching root for note commitment tree index"); + // Cache the root half of the time to make sure it works in both cases + let _ = incremental_tree.root(); + } + } + + // Make sure the last root is cached + let _ = incremental_tree.root(); + + // This test vector was generated by the code itself. + // The purpose of this test is to make sure the serialization format does + // not change by accident. + let expected_serialized_tree_hex = "010000836045484077cf6390184ea7cd48b460e2d0f22b2293b69633bb152314a692fb000193e5f97ce1d5d94d0c6e1b66a4a262c9ae89e56e28f3f6e4a557b6fb70e173a8"; + let serialized_tree = incremental_tree.as_bytes(); + assert_eq!(hex::encode(&serialized_tree), expected_serialized_tree_hex); + + let deserialized_tree = sprout::tree::NoteCommitmentTree::from_bytes(serialized_tree); + + assert_eq!(incremental_tree.root(), deserialized_tree.root()); +} + +/// Check that the sprout tree database serialization format has not changed when the number of +/// commitments is a power of two. +/// +/// Some trees have special handling for even numbers of roots, or powers of two, +/// so we also check that case. +#[test] +fn sprout_note_commitment_tree_serialization_pow2() { + let _init_guard = zebra_test::init(); + + let mut incremental_tree = sprout::tree::NoteCommitmentTree::default(); + + // Some commitments from zebra-chain/src/sprout/tests/test_vectors.rs + let hex_commitments = [ + "62fdad9bfbf17c38ea626a9c9b8af8a748e6b4367c8494caf0ca592999e8b6ba", + "68eb35bc5e1ddb80a761718e63a1ecf4d4977ae22cc19fa732b85515b2a4c943", + "836045484077cf6390184ea7cd48b460e2d0f22b2293b69633bb152314a692fb", + "92498a8295ea36d593eaee7cb8b55be3a3e37b8185d3807693184054cd574ae4", + ]; + + for (idx, cm_hex) in hex_commitments.iter().enumerate() { + let bytes = <[u8; 32]>::from_hex(cm_hex).unwrap(); + + let cm = sprout::NoteCommitment::from(bytes); + incremental_tree.append(cm).unwrap(); + if random() { + info!(?idx, "randomly caching root for note commitment tree index"); + // Cache the root half of the time to make sure it works in both cases + let _ = incremental_tree.root(); + } + } + + // Make sure the last root is cached + let _ = incremental_tree.root(); + + // This test vector was generated by the code itself. + // The purpose of this test is to make sure the serialization format does + // not change by accident. + let expected_serialized_tree_hex = "010301836045484077cf6390184ea7cd48b460e2d0f22b2293b69633bb152314a692fb92498a8295ea36d593eaee7cb8b55be3a3e37b8185d3807693184054cd574ae4019f5b2b1e4bf7e7318d0a1f417ca6bca36077025b3d11e074b94cd55ce9f3861801b61f588fcba9cea79e94376adae1c49583f716d2f20367141f1369a235b95c98"; + let serialized_tree = incremental_tree.as_bytes(); + assert_eq!(hex::encode(&serialized_tree), expected_serialized_tree_hex); + + let deserialized_tree = sprout::tree::NoteCommitmentTree::from_bytes(serialized_tree); + + assert_eq!(incremental_tree.root(), deserialized_tree.root()); +} + +/// Check that the sapling tree database serialization format has not changed. #[test] fn sapling_note_commitment_tree_serialization() { let _init_guard = zebra_test::init(); @@ -24,7 +155,8 @@ fn sapling_note_commitment_tree_serialization() { let cm_u = jubjub::Fq::from_bytes(&bytes).unwrap(); incremental_tree.append(cm_u).unwrap(); - if idx % 2 == 0 { + if random() { + info!(?idx, "randomly caching root for note commitment tree index"); // Cache the root half of the time to make sure it works in both cases let _ = incremental_tree.root(); } @@ -45,6 +177,94 @@ fn sapling_note_commitment_tree_serialization() { assert_eq!(incremental_tree.root(), deserialized_tree.root()); } +/// Check that the sapling tree database serialization format has not changed for one commitment. +#[test] +fn sapling_note_commitment_tree_serialization_one() { + let _init_guard = zebra_test::init(); + + let mut incremental_tree = sapling::tree::NoteCommitmentTree::default(); + + // Some commitments from zebra-chain/src/sapling/tests/test_vectors.rs + let hex_commitments = ["225747f3b5d5dab4e5a424f81f85c904ff43286e0f3fd07ef0b8c6a627b11458"]; + + for (idx, cm_u_hex) in hex_commitments.iter().enumerate() { + let bytes = <[u8; 32]>::from_hex(cm_u_hex).unwrap(); + + let cm_u = jubjub::Fq::from_bytes(&bytes).unwrap(); + incremental_tree.append(cm_u).unwrap(); + if random() { + info!(?idx, "randomly caching root for note commitment tree index"); + // Cache the root half of the time to make sure it works in both cases + let _ = incremental_tree.root(); + } + } + + // Make sure the last root is cached + let _ = incremental_tree.root(); + + // This test vector was generated by the code itself. + // The purpose of this test is to make sure the serialization format does + // not change by accident. + let expected_serialized_tree_hex = "010000225747f3b5d5dab4e5a424f81f85c904ff43286e0f3fd07ef0b8c6a627b1145800012c60c7de033d7539d123fb275011edfe08d57431676981d162c816372063bc71"; + let serialized_tree = incremental_tree.as_bytes(); + assert_eq!(hex::encode(&serialized_tree), expected_serialized_tree_hex); + + let deserialized_tree = sapling::tree::NoteCommitmentTree::from_bytes(serialized_tree); + + assert_eq!(incremental_tree.root(), deserialized_tree.root()); +} + +/// Check that the sapling tree database serialization format has not changed when the number of +/// commitments is a power of two. +/// +/// Some trees have special handling for even numbers of roots, or powers of two, +/// so we also check that case. +#[test] +fn sapling_note_commitment_tree_serialization_pow2() { + let _init_guard = zebra_test::init(); + + let mut incremental_tree = sapling::tree::NoteCommitmentTree::default(); + + // Some commitments from zebra-chain/src/sapling/tests/test_vectors.rs + let hex_commitments = [ + "3a27fed5dbbc475d3880360e38638c882fd9b273b618fc433106896083f77446", + "c7ca8f7df8fd997931d33985d935ee2d696856cc09cc516d419ea6365f163008", + "f0fa37e8063b139d342246142fc48e7c0c50d0a62c97768589e06466742c3702", + "e6d4d7685894d01b32f7e081ab188930be6c2b9f76d6847b7f382e3dddd7c608", + "8cebb73be883466d18d3b0c06990520e80b936440a2c9fd184d92a1f06c4e826", + "22fab8bcdb88154dbf5877ad1e2d7f1b541bc8a5ec1b52266095381339c27c03", + "f43e3aac61e5a753062d4d0508c26ceaf5e4c0c58ba3c956e104b5d2cf67c41c", + "3a3661bc12b72646c94bc6c92796e81953985ee62d80a9ec3645a9a95740ac15", + ]; + + for (idx, cm_u_hex) in hex_commitments.iter().enumerate() { + let bytes = <[u8; 32]>::from_hex(cm_u_hex).unwrap(); + + let cm_u = jubjub::Fq::from_bytes(&bytes).unwrap(); + incremental_tree.append(cm_u).unwrap(); + if random() { + info!(?idx, "randomly caching root for note commitment tree index"); + // Cache the root half of the time to make sure it works in both cases + let _ = incremental_tree.root(); + } + } + + // Make sure the last root is cached + let _ = incremental_tree.root(); + + // This test vector was generated by the code itself. + // The purpose of this test is to make sure the serialization format does + // not change by accident. + let expected_serialized_tree_hex = "010701f43e3aac61e5a753062d4d0508c26ceaf5e4c0c58ba3c956e104b5d2cf67c41c3a3661bc12b72646c94bc6c92796e81953985ee62d80a9ec3645a9a95740ac15025991131c5c25911b35fcea2a8343e2dfd7a4d5b45493390e0cb184394d91c349002df68503da9247dfde6585cb8c9fa94897cf21735f8fc1b32116ef474de05c01d23765f3d90dfd97817ed6d995bd253d85967f77b9f1eaef6ecbcb0ef6796812"; + let serialized_tree = incremental_tree.as_bytes(); + assert_eq!(hex::encode(&serialized_tree), expected_serialized_tree_hex); + + let deserialized_tree = sapling::tree::NoteCommitmentTree::from_bytes(serialized_tree); + + assert_eq!(incremental_tree.root(), deserialized_tree.root()); +} + +/// Check that the orchard tree database serialization format has not changed. #[test] fn orchard_note_commitment_tree_serialization() { let _init_guard = zebra_test::init(); @@ -73,7 +293,8 @@ fn orchard_note_commitment_tree_serialization() { for (idx, cm_x_bytes) in commitments.iter().enumerate() { let cm_x = pallas::Base::from_repr(*cm_x_bytes).unwrap(); incremental_tree.append(cm_x).unwrap(); - if idx % 2 == 0 { + if random() { + info!(?idx, "randomly caching root for note commitment tree index"); // Cache the root half of the time to make sure it works in both cases let _ = incremental_tree.root(); } @@ -93,3 +314,92 @@ fn orchard_note_commitment_tree_serialization() { assert_eq!(incremental_tree.root(), deserialized_tree.root()); } + +/// Check that the orchard tree database serialization format has not changed for one commitment. +#[test] +fn orchard_note_commitment_tree_serialization_one() { + let _init_guard = zebra_test::init(); + + let mut incremental_tree = orchard::tree::NoteCommitmentTree::default(); + + // Some commitments from zebra-chain/src/orchard/tests/tree.rs + let commitments = [[ + 0x68, 0x13, 0x5c, 0xf4, 0x99, 0x33, 0x22, 0x90, 0x99, 0xa4, 0x4e, 0xc9, 0x9a, 0x75, 0xe1, + 0xe1, 0xcb, 0x46, 0x40, 0xf9, 0xb5, 0xbd, 0xec, 0x6b, 0x32, 0x23, 0x85, 0x6f, 0xea, 0x16, + 0x39, 0x0a, + ]]; + + for (idx, cm_x_bytes) in commitments.iter().enumerate() { + let cm_x = pallas::Base::from_repr(*cm_x_bytes).unwrap(); + incremental_tree.append(cm_x).unwrap(); + if random() { + info!(?idx, "randomly caching root for note commitment tree index"); + // Cache the root half of the time to make sure it works in both cases + let _ = incremental_tree.root(); + } + } + + // Make sure the last root is cached + let _ = incremental_tree.root(); + + // This test vector was generated by the code itself. + // The purpose of this test is to make sure the serialization format does + // not change by accident. + let expected_serialized_tree_hex = "01000068135cf49933229099a44ec99a75e1e1cb4640f9b5bdec6b3223856fea16390a000178afd4da59c541e9c2f317f9aff654f1fb38d14dc99431cbbfa93601c7068117"; + let serialized_tree = incremental_tree.as_bytes(); + assert_eq!(hex::encode(&serialized_tree), expected_serialized_tree_hex); + + let deserialized_tree = orchard::tree::NoteCommitmentTree::from_bytes(serialized_tree); + + assert_eq!(incremental_tree.root(), deserialized_tree.root()); +} + +/// Check that the orchard tree database serialization format has not changed when the number of +/// commitments is a power of two. +/// +/// Some trees have special handling for even numbers of roots, or powers of two, +/// so we also check that case. +#[test] +fn orchard_note_commitment_tree_serialization_pow2() { + let _init_guard = zebra_test::init(); + + let mut incremental_tree = orchard::tree::NoteCommitmentTree::default(); + + // Some commitments from zebra-chain/src/orchard/tests/tree.rs + let commitments = [ + [ + 0x78, 0x31, 0x50, 0x08, 0xfb, 0x29, 0x98, 0xb4, 0x30, 0xa5, 0x73, 0x1d, 0x67, 0x26, + 0x20, 0x7d, 0xc0, 0xf0, 0xec, 0x81, 0xea, 0x64, 0xaf, 0x5c, 0xf6, 0x12, 0x95, 0x69, + 0x01, 0xe7, 0x2f, 0x0e, + ], + [ + 0xee, 0x94, 0x88, 0x05, 0x3a, 0x30, 0xc5, 0x96, 0xb4, 0x30, 0x14, 0x10, 0x5d, 0x34, + 0x77, 0xe6, 0xf5, 0x78, 0xc8, 0x92, 0x40, 0xd1, 0xd1, 0xee, 0x17, 0x43, 0xb7, 0x7b, + 0xb6, 0xad, 0xc4, 0x0a, + ], + ]; + + for (idx, cm_x_bytes) in commitments.iter().enumerate() { + let cm_x = pallas::Base::from_repr(*cm_x_bytes).unwrap(); + incremental_tree.append(cm_x).unwrap(); + if random() { + info!(?idx, "randomly caching root for note commitment tree index"); + // Cache the root half of the time to make sure it works in both cases + let _ = incremental_tree.root(); + } + } + + // Make sure the last root is cached + let _ = incremental_tree.root(); + + // This test vector was generated by the code itself. + // The purpose of this test is to make sure the serialization format does + // not change by accident. + let expected_serialized_tree_hex = "01010178315008fb2998b430a5731d6726207dc0f0ec81ea64af5cf612956901e72f0eee9488053a30c596b43014105d3477e6f578c89240d1d1ee1743b77bb6adc40a0001d3d525931005e45f5a29bc82524e871e5ee1b6d77839deb741a6e50cd99fdf1a"; + let serialized_tree = incremental_tree.as_bytes(); + assert_eq!(hex::encode(&serialized_tree), expected_serialized_tree_hex); + + let deserialized_tree = orchard::tree::NoteCommitmentTree::from_bytes(serialized_tree); + + assert_eq!(incremental_tree.root(), deserialized_tree.root()); +} diff --git a/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshot.rs b/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshot.rs index 8ce1e67ece5..9c19f29ee61 100644 --- a/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshot.rs +++ b/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshot.rs @@ -217,6 +217,8 @@ fn snapshot_block_and_transaction_data(state: &FinalizedState) { if let Some((max_height, tip_block_hash)) = tip { // Check that the database returns empty note commitment trees for the // genesis block. + // + // We only store the sprout tree for the tip by height, so we can't check sprout here. let sapling_tree = state .sapling_note_commitment_tree_by_height(&block::Height::MIN) .expect("the genesis block in the database has a Sapling tree"); @@ -241,9 +243,11 @@ fn snapshot_block_and_transaction_data(state: &FinalizedState) { // Shielded + let stored_sprout_trees = state.sprout_note_commitments_full_map(); let mut stored_sapling_trees = Vec::new(); let mut stored_orchard_trees = Vec::new(); + let sprout_tree_at_tip = state.sprout_note_commitment_tree(); let sapling_tree_at_tip = state.sapling_note_commitment_tree(); let orchard_tree_at_tip = state.orchard_note_commitment_tree(); @@ -268,9 +272,11 @@ fn snapshot_block_and_transaction_data(state: &FinalizedState) { .block(query_height.into()) .expect("heights up to tip have blocks"); - // Check the sapling and orchard note commitment trees. + // Check the shielded note commitment trees. + // + // We only store the sprout tree for the tip by height, so we can't check sprout here. // - // TODO: test the rest of the shielded data (anchors, nullifiers, sprout) + // TODO: test the rest of the shielded data (anchors, nullifiers) let sapling_tree_by_height = state .sapling_note_commitment_tree_by_height(&query_height) .expect("heights up to tip have Sapling trees"); @@ -297,6 +303,18 @@ fn snapshot_block_and_transaction_data(state: &FinalizedState) { if query_height == max_height { assert_eq!(stored_block_hash, tip_block_hash); + // We only store the sprout tree for the tip by height, + // so the sprout check is less strict. + // We enforce the tip tree order by snapshotting it as well. + if let Some(stored_tree) = stored_sprout_trees.get(&sprout_tree_at_tip.root()) { + assert_eq!( + &sprout_tree_at_tip, stored_tree, + "unexpected missing sprout tip tree:\n\ + all trees: {stored_sprout_trees:?}" + ); + } else { + assert_eq!(sprout_tree_at_tip, Default::default()); + } assert_eq!(sapling_tree_at_tip, sapling_tree_by_height); assert_eq!(orchard_tree_at_tip, orchard_tree_by_height); @@ -427,6 +445,14 @@ fn snapshot_block_and_transaction_data(state: &FinalizedState) { // These snapshots will change if the trees do not have cached roots. // But we expect them to always have cached roots, // because those roots are used to populate the anchor column families. + insta::assert_ron_snapshot!("sprout_tree_at_tip", sprout_tree_at_tip); + insta::assert_ron_snapshot!( + "sprout_trees", + stored_sprout_trees, + { + "." => insta::sorted_redaction() + } + ); insta::assert_ron_snapshot!("sapling_trees", stored_sapling_trees); insta::assert_ron_snapshot!("orchard_trees", stored_orchard_trees); diff --git a/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sprout_tree_at_tip@mainnet_0.snap b/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sprout_tree_at_tip@mainnet_0.snap new file mode 100644 index 00000000000..b18353999df --- /dev/null +++ b/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sprout_tree_at_tip@mainnet_0.snap @@ -0,0 +1,10 @@ +--- +source: zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshot.rs +expression: sprout_tree_at_tip +--- +NoteCommitmentTree( + inner: Frontier( + frontier: None, + ), + cached_root: Some(Root((215, 198, 18, 200, 23, 121, 49, 145, 161, 230, 134, 82, 18, 24, 118, 214, 179, 189, 228, 15, 79, 165, 43, 195, 20, 20, 92, 230, 229, 205, 210, 89))), +) diff --git a/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sprout_tree_at_tip@mainnet_1.snap b/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sprout_tree_at_tip@mainnet_1.snap new file mode 100644 index 00000000000..b18353999df --- /dev/null +++ b/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sprout_tree_at_tip@mainnet_1.snap @@ -0,0 +1,10 @@ +--- +source: zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshot.rs +expression: sprout_tree_at_tip +--- +NoteCommitmentTree( + inner: Frontier( + frontier: None, + ), + cached_root: Some(Root((215, 198, 18, 200, 23, 121, 49, 145, 161, 230, 134, 82, 18, 24, 118, 214, 179, 189, 228, 15, 79, 165, 43, 195, 20, 20, 92, 230, 229, 205, 210, 89))), +) diff --git a/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sprout_tree_at_tip@mainnet_2.snap b/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sprout_tree_at_tip@mainnet_2.snap new file mode 100644 index 00000000000..b18353999df --- /dev/null +++ b/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sprout_tree_at_tip@mainnet_2.snap @@ -0,0 +1,10 @@ +--- +source: zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshot.rs +expression: sprout_tree_at_tip +--- +NoteCommitmentTree( + inner: Frontier( + frontier: None, + ), + cached_root: Some(Root((215, 198, 18, 200, 23, 121, 49, 145, 161, 230, 134, 82, 18, 24, 118, 214, 179, 189, 228, 15, 79, 165, 43, 195, 20, 20, 92, 230, 229, 205, 210, 89))), +) diff --git a/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sprout_tree_at_tip@testnet_0.snap b/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sprout_tree_at_tip@testnet_0.snap new file mode 100644 index 00000000000..b18353999df --- /dev/null +++ b/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sprout_tree_at_tip@testnet_0.snap @@ -0,0 +1,10 @@ +--- +source: zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshot.rs +expression: sprout_tree_at_tip +--- +NoteCommitmentTree( + inner: Frontier( + frontier: None, + ), + cached_root: Some(Root((215, 198, 18, 200, 23, 121, 49, 145, 161, 230, 134, 82, 18, 24, 118, 214, 179, 189, 228, 15, 79, 165, 43, 195, 20, 20, 92, 230, 229, 205, 210, 89))), +) diff --git a/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sprout_tree_at_tip@testnet_1.snap b/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sprout_tree_at_tip@testnet_1.snap new file mode 100644 index 00000000000..b18353999df --- /dev/null +++ b/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sprout_tree_at_tip@testnet_1.snap @@ -0,0 +1,10 @@ +--- +source: zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshot.rs +expression: sprout_tree_at_tip +--- +NoteCommitmentTree( + inner: Frontier( + frontier: None, + ), + cached_root: Some(Root((215, 198, 18, 200, 23, 121, 49, 145, 161, 230, 134, 82, 18, 24, 118, 214, 179, 189, 228, 15, 79, 165, 43, 195, 20, 20, 92, 230, 229, 205, 210, 89))), +) diff --git a/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sprout_tree_at_tip@testnet_2.snap b/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sprout_tree_at_tip@testnet_2.snap new file mode 100644 index 00000000000..b18353999df --- /dev/null +++ b/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sprout_tree_at_tip@testnet_2.snap @@ -0,0 +1,10 @@ +--- +source: zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshot.rs +expression: sprout_tree_at_tip +--- +NoteCommitmentTree( + inner: Frontier( + frontier: None, + ), + cached_root: Some(Root((215, 198, 18, 200, 23, 121, 49, 145, 161, 230, 134, 82, 18, 24, 118, 214, 179, 189, 228, 15, 79, 165, 43, 195, 20, 20, 92, 230, 229, 205, 210, 89))), +) diff --git a/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sprout_trees@mainnet_0.snap b/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sprout_trees@mainnet_0.snap new file mode 100644 index 00000000000..fc004eddd5a --- /dev/null +++ b/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sprout_trees@mainnet_0.snap @@ -0,0 +1,5 @@ +--- +source: zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshot.rs +expression: stored_sprout_trees +--- +{} diff --git a/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sprout_trees@mainnet_1.snap b/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sprout_trees@mainnet_1.snap new file mode 100644 index 00000000000..438e0809a21 --- /dev/null +++ b/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sprout_trees@mainnet_1.snap @@ -0,0 +1,12 @@ +--- +source: zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshot.rs +expression: stored_sprout_trees +--- +{ + Root((215, 198, 18, 200, 23, 121, 49, 145, 161, 230, 134, 82, 18, 24, 118, 214, 179, 189, 228, 15, 79, 165, 43, 195, 20, 20, 92, 230, 229, 205, 210, 89)): NoteCommitmentTree( + inner: Frontier( + frontier: None, + ), + cached_root: Some(Root((215, 198, 18, 200, 23, 121, 49, 145, 161, 230, 134, 82, 18, 24, 118, 214, 179, 189, 228, 15, 79, 165, 43, 195, 20, 20, 92, 230, 229, 205, 210, 89))), + ), +} diff --git a/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sprout_trees@mainnet_2.snap b/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sprout_trees@mainnet_2.snap new file mode 100644 index 00000000000..438e0809a21 --- /dev/null +++ b/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sprout_trees@mainnet_2.snap @@ -0,0 +1,12 @@ +--- +source: zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshot.rs +expression: stored_sprout_trees +--- +{ + Root((215, 198, 18, 200, 23, 121, 49, 145, 161, 230, 134, 82, 18, 24, 118, 214, 179, 189, 228, 15, 79, 165, 43, 195, 20, 20, 92, 230, 229, 205, 210, 89)): NoteCommitmentTree( + inner: Frontier( + frontier: None, + ), + cached_root: Some(Root((215, 198, 18, 200, 23, 121, 49, 145, 161, 230, 134, 82, 18, 24, 118, 214, 179, 189, 228, 15, 79, 165, 43, 195, 20, 20, 92, 230, 229, 205, 210, 89))), + ), +} diff --git a/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sprout_trees@testnet_0.snap b/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sprout_trees@testnet_0.snap new file mode 100644 index 00000000000..fc004eddd5a --- /dev/null +++ b/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sprout_trees@testnet_0.snap @@ -0,0 +1,5 @@ +--- +source: zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshot.rs +expression: stored_sprout_trees +--- +{} diff --git a/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sprout_trees@testnet_1.snap b/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sprout_trees@testnet_1.snap new file mode 100644 index 00000000000..438e0809a21 --- /dev/null +++ b/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sprout_trees@testnet_1.snap @@ -0,0 +1,12 @@ +--- +source: zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshot.rs +expression: stored_sprout_trees +--- +{ + Root((215, 198, 18, 200, 23, 121, 49, 145, 161, 230, 134, 82, 18, 24, 118, 214, 179, 189, 228, 15, 79, 165, 43, 195, 20, 20, 92, 230, 229, 205, 210, 89)): NoteCommitmentTree( + inner: Frontier( + frontier: None, + ), + cached_root: Some(Root((215, 198, 18, 200, 23, 121, 49, 145, 161, 230, 134, 82, 18, 24, 118, 214, 179, 189, 228, 15, 79, 165, 43, 195, 20, 20, 92, 230, 229, 205, 210, 89))), + ), +} diff --git a/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sprout_trees@testnet_2.snap b/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sprout_trees@testnet_2.snap new file mode 100644 index 00000000000..438e0809a21 --- /dev/null +++ b/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sprout_trees@testnet_2.snap @@ -0,0 +1,12 @@ +--- +source: zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshot.rs +expression: stored_sprout_trees +--- +{ + Root((215, 198, 18, 200, 23, 121, 49, 145, 161, 230, 134, 82, 18, 24, 118, 214, 179, 189, 228, 15, 79, 165, 43, 195, 20, 20, 92, 230, 229, 205, 210, 89)): NoteCommitmentTree( + inner: Frontier( + frontier: None, + ), + cached_root: Some(Root((215, 198, 18, 200, 23, 121, 49, 145, 161, 230, 134, 82, 18, 24, 118, 214, 179, 189, 228, 15, 79, 165, 43, 195, 20, 20, 92, 230, 229, 205, 210, 89))), + ), +} diff --git a/zebra-state/src/service/finalized_state/zebra_db/shielded.rs b/zebra-state/src/service/finalized_state/zebra_db/shielded.rs index b5bfe26059e..68a75ae1162 100644 --- a/zebra-state/src/service/finalized_state/zebra_db/shielded.rs +++ b/zebra-state/src/service/finalized_state/zebra_db/shielded.rs @@ -12,7 +12,7 @@ //! The [`crate::constants::DATABASE_FORMAT_VERSION`] constant must //! be incremented each time the database format (column, serialization, etc) changes. -use std::sync::Arc; +use std::{collections::HashMap, sync::Arc}; use zebra_chain::{ block::Height, orchard, parallel::tree::NoteCommitmentTrees, sapling, sprout, @@ -99,6 +99,19 @@ impl ZebraDb { .map(Arc::new) } + /// Returns all the Sprout note commitment trees in the database. + /// + /// Calling this method can load a lot of data into RAM, and delay block commit transactions. + #[allow(dead_code, clippy::unwrap_in_result)] + pub fn sprout_note_commitments_full_map( + &self, + ) -> HashMap> { + let sprout_anchors_handle = self.db.cf_handle("sprout_anchors").unwrap(); + + self.db + .zs_items_in_range_unordered(&sprout_anchors_handle, ..) + } + /// Returns the Sapling note commitment tree of the finalized tip /// or the empty tree if the state is empty. pub fn sapling_note_commitment_tree(&self) -> Arc { diff --git a/zebra-test/Cargo.toml b/zebra-test/Cargo.toml index 4c712e3beb3..d64b6e59628 100644 --- a/zebra-test/Cargo.toml +++ b/zebra-test/Cargo.toml @@ -21,7 +21,7 @@ lazy_static = "1.4.0" insta = "1.30.0" proptest = "1.2.0" once_cell = "1.18.0" -rand = { version = "0.8.5", package = "rand" } +rand = "0.8.5" regex = "1.8.4" tokio = { version = "1.28.2", features = ["full", "tracing", "test-util"] } diff --git a/zebrad/Cargo.toml b/zebrad/Cargo.toml index 56c034e2658..14e33dc16a1 100644 --- a/zebrad/Cargo.toml +++ b/zebrad/Cargo.toml @@ -172,7 +172,7 @@ dirs = "5.0.1" atty = "0.2.14" num-integer = "0.1.45" -rand = { version = "0.8.5", package = "rand" } +rand = "0.8.5" # prod feature sentry sentry = { version = "0.31.5", default-features = false, features = ["backtrace", "contexts", "reqwest", "rustls", "tracing"], optional = true } From 6311cfbfb3c10e43a6da5339f0bf5843f5a76439 Mon Sep 17 00:00:00 2001 From: teor Date: Wed, 28 Jun 2023 03:38:57 +1000 Subject: [PATCH 153/265] fix(log): Only show the Zebra logo & intro for the `start` command (#7075) * Only show the intro for the `start` command * Also disable the log file intro text --- zebrad/src/application.rs | 1 + zebrad/src/commands.rs | 26 ++++++++--- zebrad/src/components/tracing/component.rs | 52 +++++++++++++--------- 3 files changed, 53 insertions(+), 26 deletions(-) diff --git a/zebrad/src/application.rs b/zebrad/src/application.rs index 47f324a2e96..d701bd80870 100644 --- a/zebrad/src/application.rs +++ b/zebrad/src/application.rs @@ -422,6 +422,7 @@ impl Application for ZebradApp { components.push(Box::new(Tracing::new( config.network.network, tracing_config, + command.cmd().uses_intro(), )?)); // Log git metadata and platform info when zebrad starts up diff --git a/zebrad/src/commands.rs b/zebrad/src/commands.rs index 806ac360808..d7f4fa337be 100644 --- a/zebrad/src/commands.rs +++ b/zebrad/src/commands.rs @@ -64,6 +64,26 @@ impl ZebradCmd { } } + /// Returns true if this command shows the Zebra intro logo and text. + /// + /// For example, `Start` acts as a Zcash node. + pub(crate) fn uses_intro(&self) -> bool { + // List all the commands, so new commands have to make a choice here + match self { + // Commands that need an intro + Start(_) => true, + + // Utility commands + CopyState(_) | Download(_) | Generate(_) | TipHeight(_) => false, + } + } + + /// Returns true if this command should ignore errors when + /// attempting to load a config file. + pub(crate) fn should_ignore_load_config_error(&self) -> bool { + matches!(self, ZebradCmd::Generate(_) | ZebradCmd::Download(_)) + } + /// Returns the default log level for this command, based on the `verbose` command line flag. /// /// Some commands need to be quiet by default. @@ -87,12 +107,6 @@ impl ZebradCmd { "debug" } } - - /// Returns true if this command should ignore errors when - /// attempting to load a config file. - pub(crate) fn should_ignore_load_config_error(&self) -> bool { - matches!(self, ZebradCmd::Generate(_) | ZebradCmd::Download(_)) - } } impl Runnable for ZebradCmd { diff --git a/zebrad/src/components/tracing/component.rs b/zebrad/src/components/tracing/component.rs index 4498b1a1054..ff1db585c59 100644 --- a/zebrad/src/components/tracing/component.rs +++ b/zebrad/src/components/tracing/component.rs @@ -65,9 +65,12 @@ pub struct Tracing { } impl Tracing { - /// Try to create a new [`Tracing`] component with the given `filter`. + /// Try to create a new [`Tracing`] component with the given `config`. + /// + /// If `uses_intro` is true, show a welcome message, the `network`, + /// and the Zebra logo on startup. (If the terminal supports it.) #[allow(clippy::print_stdout, clippy::print_stderr, clippy::unwrap_in_result)] - pub fn new(network: Network, config: Config) -> Result { + pub fn new(network: Network, config: Config, uses_intro: bool) -> Result { // Only use color if tracing output is being sent to a terminal or if it was explicitly // forced to. let use_color = config.use_color_stdout(); @@ -76,28 +79,35 @@ impl Tracing { let filter = config.filter.unwrap_or_default(); let flame_root = &config.flamegraph; - // If it's a terminal and color escaping is enabled: clear screen and - // print Zebra logo (here `use_color` is being interpreted as - // "use escape codes") - if use_color_stderr { - // Clear screen - eprint!("\x1B[2J"); + // Only show the intro for user-focused node server commands like `start` + if uses_intro { + // If it's a terminal and color escaping is enabled: clear screen and + // print Zebra logo (here `use_color` is being interpreted as + // "use escape codes") + if use_color_stderr { + // Clear screen + eprint!("\x1B[2J"); + eprintln!( + "{}", + std::str::from_utf8(&ZEBRA_ART) + .expect("should always work on a UTF-8 encoded constant") + ); + } + + eprintln!( + "Thank you for running a {} zebrad {} node!", + network.lowercase_name(), + build_version() + ); eprintln!( - "{}", - std::str::from_utf8(&ZEBRA_ART) - .expect("should always work on a UTF-8 encoded constant") + "You're helping to strengthen the network and contributing to a social good :)" ); } - eprintln!( - "Thank you for running a {} zebrad {} node!", - network.lowercase_name(), - build_version() - ); - eprintln!("You're helping to strengthen the network and contributing to a social good :)"); - let writer = if let Some(log_file) = config.log_file.as_ref() { - println!("running zebra"); + if uses_intro { + println!("running zebra"); + } // Make sure the directory for the log file exists. // If the log is configured in the current directory, it won't have a parent directory. @@ -122,7 +132,9 @@ impl Tracing { } } - println!("sending logs to {log_file:?}..."); + if uses_intro { + println!("sending logs to {log_file:?}..."); + } let log_file = File::options().append(true).create(true).open(log_file)?; Box::new(log_file) as BoxWrite } else { From f8e26347a0ff3aa48f9425f285c08c047a043e39 Mon Sep 17 00:00:00 2001 From: teor Date: Wed, 28 Jun 2023 05:42:04 +1000 Subject: [PATCH 154/265] Log a zebra-network task cancel on shutdown, rather than panicking (#7078) --- zebra-network/src/peer_set/initialize.rs | 27 +++++++++++++++++++----- 1 file changed, 22 insertions(+), 5 deletions(-) diff --git a/zebra-network/src/peer_set/initialize.rs b/zebra-network/src/peer_set/initialize.rs index 14f2ba5c005..cdd4807e61b 100644 --- a/zebra-network/src/peer_set/initialize.rs +++ b/zebra-network/src/peer_set/initialize.rs @@ -1138,12 +1138,29 @@ async fn report_failed(address_book: Arc>, addr: M // # Correctness // - // Spawn address book accesses on a blocking thread, - // to avoid deadlocks (see #1976). + // Spawn address book accesses on a blocking thread, to avoid deadlocks (see #1976). let span = Span::current(); - tokio::task::spawn_blocking(move || { + let task_result = tokio::task::spawn_blocking(move || { span.in_scope(|| address_book.lock().unwrap().update(addr)) }) - .await - .expect("panic in peer failure address book update task"); + .await; + + match task_result { + Ok(updated_addr) => assert_eq!( + updated_addr.map(|addr| addr.addr()), + Some(addr.addr()), + "incorrect address updated by address book: \ + original: {addr:?}, updated: {updated_addr:?}" + ), + Err(e @ JoinError { .. }) => { + if e.is_panic() { + panic!("panic in peer failure address book update task: {e:?}"); + } else { + info!( + "task error during peer failure address book update task: {e:?},\ + is Zebra shutting down?" + ) + } + } + } } From caf9a0925f80cd93003a90cad6394ded1bdc1b15 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 28 Jun 2023 00:29:07 +0000 Subject: [PATCH 155/265] build(deps): bump hyper from 0.14.26 to 0.14.27 (#7082) Bumps [hyper](https://github.com/hyperium/hyper) from 0.14.26 to 0.14.27. - [Release notes](https://github.com/hyperium/hyper/releases) - [Changelog](https://github.com/hyperium/hyper/blob/v0.14.27/CHANGELOG.md) - [Commits](https://github.com/hyperium/hyper/compare/v0.14.26...v0.14.27) --- updated-dependencies: - dependency-name: hyper dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- zebra-rpc/Cargo.toml | 2 +- zebrad/Cargo.toml | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2ac842f7d52..ad9c41ace30 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1915,9 +1915,9 @@ dependencies = [ [[package]] name = "hyper" -version = "0.14.26" +version = "0.14.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab302d72a6f11a3b910431ff93aae7e773078c769f0a3ef15fb9ec692ed147d4" +checksum = "ffb1cfd654a8219eaef89881fdb3bb3b1cdc5fa75ded05d6933b2b382e395468" dependencies = [ "bytes", "futures-channel", diff --git a/zebra-rpc/Cargo.toml b/zebra-rpc/Cargo.toml index 3b05d7ac071..f7891f45325 100644 --- a/zebra-rpc/Cargo.toml +++ b/zebra-rpc/Cargo.toml @@ -43,7 +43,7 @@ chrono = { version = "0.4.26", default-features = false, features = ["clock", "s futures = "0.3.28" # lightwalletd sends JSON-RPC requests over HTTP 1.1 -hyper = { version = "0.14.26", features = ["http1", "server"] } +hyper = { version = "0.14.27", features = ["http1", "server"] } jsonrpc-core = "18.0.0" jsonrpc-derive = "18.0.0" diff --git a/zebrad/Cargo.toml b/zebrad/Cargo.toml index 14e33dc16a1..98251d1dfff 100644 --- a/zebrad/Cargo.toml +++ b/zebrad/Cargo.toml @@ -185,7 +185,7 @@ inferno = { version = "0.11.15", default-features = false, optional = true } tracing-journald = { version = "0.3.0", optional = true } # prod feature filter-reload -hyper = { version = "0.14.26", features = ["http1", "http2", "server"], optional = true } +hyper = { version = "0.14.27", features = ["http1", "http2", "server"], optional = true } # prod feature prometheus metrics-exporter-prometheus = { version = "0.12.0", default-features = false, features = ["http-listener"], optional = true } @@ -224,7 +224,7 @@ regex = "1.8.4" serde_json = { version = "1.0.97", features = ["preserve_order"] } tempfile = "3.5.0" -hyper = { version = "0.14.26", features = ["http1", "http2", "server"]} +hyper = { version = "0.14.27", features = ["http1", "http2", "server"]} tracing-test = { version = "0.2.4", features = ["no-env-filter"] } tokio = { version = "1.28.2", features = ["full", "tracing", "test-util"] } From 62b4fa21a2f026d4655f9f69509fe32944817c02 Mon Sep 17 00:00:00 2001 From: teor Date: Wed, 28 Jun 2023 12:34:46 +1000 Subject: [PATCH 156/265] change(docker): Replace the zcash-params Dockerfile build with a zebrad binary copy (#7054) * Replace the zcash-params Dockerfile build with a zebrad binary copy * Update zcash-params workflow conditions --- .github/workflows/zcash-params.yml | 4 ++- docker/zcash-params/Dockerfile | 45 ++++++++---------------------- 2 files changed, 14 insertions(+), 35 deletions(-) diff --git a/.github/workflows/zcash-params.yml b/.github/workflows/zcash-params.yml index e0b952e331a..5a05a340a83 100644 --- a/.github/workflows/zcash-params.yml +++ b/.github/workflows/zcash-params.yml @@ -22,10 +22,12 @@ on: paths: # parameter download code - 'zebra-consensus/src/primitives/groth16/params.rs' - - 'zebra-consensus/src/chain.rs' + - 'zebra-consensus/src/router.rs' + - 'zebrad/src/commands/download.rs' - 'zebrad/src/commands/start.rs' # workflow definitions - 'docker/zcash-params/Dockerfile' + - '.dockerignore' - '.github/workflows/zcash-params.yml' - '.github/workflows/build-docker-image.yml' diff --git a/docker/zcash-params/Dockerfile b/docker/zcash-params/Dockerfile index a9bea7233a6..bda075f4b94 100644 --- a/docker/zcash-params/Dockerfile +++ b/docker/zcash-params/Dockerfile @@ -1,39 +1,16 @@ -# This steps implement cargo-chef for docker layer caching # This image is for caching Zcash Sprout and Sapling parameters -FROM rust:bullseye as chef -RUN cargo install cargo-chef --locked -WORKDIR /opt/zebrad -FROM chef AS planner -COPY . . -RUN cargo chef prepare --recipe-path recipe.json +FROM debian:bullseye-slim AS release -FROM chef AS release -COPY --from=planner /opt/zebrad/recipe.json recipe.json +# Just use the precompiled zebrad binary from a recent test image. +# +# It doesn't matter what build or commit of Zebra we use, because it just calls into the +# zcash_proofs download code. (Which doesn't change much.) +# Release image zebrad binaries would also work. +# +# Compiling the download-params example using `cargo ` is another alternative: +# `cargo run --locked --release --features default-docker --example download-params` +COPY --from=us-docker.pkg.dev/zealous-zebra/zebra/zebrad-test /usr/local/bin/zebrad /usr/local/bin -# Install zebra build deps -RUN apt-get -qq update && \ - apt-get -qq install -y --no-install-recommends \ - llvm \ - libclang-dev \ - clang \ - ca-certificates \ - ; \ - rm -rf /var/lib/apt/lists/* /tmp/* - -ENV CARGO_HOME /opt/zebrad/.cargo/ -# Build dependencies - this is the caching Docker layer! -RUN cargo chef cook --release --features default-release-binaries --package zebrad --recipe-path recipe.json - -ARG RUST_BACKTRACE=0 -ENV RUST_BACKTRACE ${RUST_BACKTRACE} - -ARG RUST_LIB_BACKTRACE=0 -ENV RUST_LIB_BACKTRACE ${RUST_LIB_BACKTRACE} - -ARG COLORBT_SHOW_HIDDEN=0 -ENV COLORBT_SHOW_HIDDEN ${COLORBT_SHOW_HIDDEN} - -COPY . . # Pre-download Zcash Sprout and Sapling parameters -RUN cargo run --locked --release --features default-release-binaries --package zebrad --bin zebrad download +RUN zebrad download From fc9baf92dd1f8ed1d88c298f38da73bbffab6a5a Mon Sep 17 00:00:00 2001 From: teor Date: Wed, 28 Jun 2023 12:35:20 +1000 Subject: [PATCH 157/265] Make CD docker tests more flexible (#7077) --- .github/workflows/continous-delivery.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/continous-delivery.yml b/.github/workflows/continous-delivery.yml index 8cd334560fc..ba4d5272e9b 100644 --- a/.github/workflows/continous-delivery.yml +++ b/.github/workflows/continous-delivery.yml @@ -171,7 +171,8 @@ jobs: docker logs --tail all --follow testnet-conf-tests | \ tee --output-error=exit /dev/stderr | \ grep --max-count=1 --extended-regexp --color=always \ - 'net.*=.*Test.*estimated progress to chain tip.*Genesis' + -e 'net.*=.*Test.*estimated progress to chain tip.*Genesis' \ + -e 'net.*=.*Test.*estimated progress to chain tip.*BeforeOverwinter' docker stop testnet-conf-tests # get the exit status from docker EXIT_STATUS=$( \ From c2bc799dd9cae76e998331a191dc240b19bf720f Mon Sep 17 00:00:00 2001 From: teor Date: Wed, 28 Jun 2023 12:35:50 +1000 Subject: [PATCH 158/265] Use correct name for CD build patch job (#7073) --- .github/workflows/continous-delivery.patch.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.github/workflows/continous-delivery.patch.yml b/.github/workflows/continous-delivery.patch.yml index 53ecb144a9a..f51ef601468 100644 --- a/.github/workflows/continous-delivery.patch.yml +++ b/.github/workflows/continous-delivery.patch.yml @@ -22,9 +22,8 @@ on: jobs: - # Also patched by continous-integration-docker.patch.yml, which has a different paths-ignore build: - name: Build CI Docker / Build images + name: Build CD Docker / Build images runs-on: ubuntu-latest steps: - run: 'echo "No build required"' From 8c90f653915b1587a2fcc3f227a791a421f832ad Mon Sep 17 00:00:00 2001 From: Gustavo Valverde Date: Tue, 27 Jun 2023 22:36:07 -0400 Subject: [PATCH 159/265] refactor(docker): allow more flexible zebra configuration (#7045) * fix(docker): use `entrypoint.sh` as default for users * ref(entrypoint): allow more flexible configurations This changes allow users to: - Mount their own configuration file - Allow for Zebra to be exposed outside the container or not - Allow the user to turn off sync - Allow to enable `metrics` and `tracing`, exposing them or not Having the `-x` option prints variable expasions, so we don't have to echo each value. * chore(docker): remove unused ARGs from the Dockerfile ARGs are not available at build time, so we don't require this ARGs as their ENV variables counterparts are being set in the `entrypoint`, at runtime. * revert: keep old naming * fix: renaming mistake :) * Apply suggestions from code review Co-authored-by: teor * fix(docker): revert some breaking changes * imp(docker): allow more flexibility with FEATURES config * chore(docker): remove confusing port on `EXPOSE` * chore(docker): remove unused command * fix(docker): handle quotes while building the conf file --------- Co-authored-by: teor Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> --- docker/Dockerfile | 48 +------------------- docker/runtime-entrypoint.sh | 88 ++++++++++++++++++++++-------------- 2 files changed, 57 insertions(+), 79 deletions(-) diff --git a/docker/Dockerfile b/docker/Dockerfile index 3b636339797..4f15b3bf51c 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -58,6 +58,7 @@ RUN if [ "$(uname -m)" != "aarch64" ]; then \ && \ rm -rf /var/lib/apt/lists/* /tmp/* +# TODO: just keep the backtrace, colorbt, rust_log, and cargo_home variables as those are the only needed at build time. # Build arguments and variables set to change how tests are run, tracelog levels, # and Network to be used (Mainnet or Testnet) # @@ -179,53 +180,8 @@ RUN apt-get update && \ ARG NETWORK ENV NETWORK ${NETWORK:-Mainnet} -# Set this to enable the RPC port -ARG RPC_PORT -ENV RPC_PORT ${RPC_PORT} - -# Set this to log to a file, if not set, logs to standard output -ARG LOG_FILE -ENV LOG_FILE ${LOG_FILE} - -# Set this to change the default cached state directory -ARG ZEBRA_CACHED_STATE_DIR -ENV ZEBRA_CACHED_STATE_DIR ${ZEBRA_CACHED_STATE_DIR:-/var/cache/zebrad-cache} - -# Zebra automatically detects if it is attached to a terminal, and uses colored output. -# Set this to 'true' to force using color even if the output is not a terminal. -# Set this to 'false' to disable using color even if the output is a terminal. -ARG LOG_COLOR -ENV LOG_COLOR ${LOG_COLOR} - # Expose configured ports - -EXPOSE 8233 18233 $RPC_PORT - -# Config location - -# Use a configurable dir and file for the zebrad configuration file -ARG ZEBRA_CONF_DIR -ENV ZEBRA_CONF_DIR ${ZEBRA_CONF_DIR:-/etc/zebra} - -ARG ZEBRA_CONF_FILE -ENV ZEBRA_CONF_FILE ${ZEBRA_CONF_FILE:-zebrad.toml} - -ARG ZEBRA_CONF_PATH -ENV ZEBRA_CONF_PATH ${ZEBRA_CONF_PATH:-$ZEBRA_CONF_DIR/$ZEBRA_CONF_FILE} - -# Other settings - -ARG SHORT_SHA -ENV SHORT_SHA ${SHORT_SHA} - -# Set this to send sentry reports when Zebra crashes -ARG SENTRY_DSN -ENV SENTRY_DSN ${SENTRY_DSN} - -# Create a default config file based on the Docker build arguments, -# and report the available zebrad arguments. -# (--help is used as a dummy command.) -RUN /runtime-entrypoint.sh --help +EXPOSE 8233 18233 # Update the config file based on the Docker run variables, # and launch zebrad with it diff --git a/docker/runtime-entrypoint.sh b/docker/runtime-entrypoint.sh index 98b1c9447d6..5eaeb7a4cc7 100755 --- a/docker/runtime-entrypoint.sh +++ b/docker/runtime-entrypoint.sh @@ -1,73 +1,93 @@ #!/usr/bin/env bash -# show the commands we are executing +# Show the commands we are executing set -x -# exit if a command fails +# Exit if a command fails set -e -# exit if any command in a pipeline fails +# Exit if any command in a pipeline fails set -o pipefail -echo "Config variables:" -echo "NETWORK=$NETWORK" -echo "RPC_PORT=$RPC_PORT" -echo "LOG_FILE=$LOG_FILE" +# Set this to change the default cached state directory +# Path and name of the config file +: "${ZEBRA_CONF_DIR:=/etc/zebrad}" +: "${ZEBRA_CONF_FILE:=zebrad.toml}" +if [[ -n "$ZEBRA_CONF_DIR" ]] && [[ -n "$ZEBRA_CONF_FILE" ]]; then + ZEBRA_CONF_PATH="$ZEBRA_CONF_DIR/$ZEBRA_CONF_FILE" +fi -echo "Config location:" -echo "ZEBRA_CONF_DIR=$ZEBRA_CONF_DIR" -echo "ZEBRA_CONF_FILE=$ZEBRA_CONF_FILE" -echo "ZEBRA_CONF_PATH=$ZEBRA_CONF_PATH" +# [network] +: "${NETWORK:=Mainnet}" +: "${ZEBRA_LISTEN_ADDR:=0.0.0.0}" +# [consensus] +: "${ZEBRA_CHECKPOINT_SYNC:=true}" +# [state] +: "${ZEBRA_CACHED_STATE_DIR:=/var/cache/zebrad-cache}" +# [metrics] +: "${METRICS_ENDPOINT_ADDR:=0.0.0.0}" +: "${METRICS_ENDPOINT_PORT:=9999}" +# [tracing] +: "${LOG_COLOR:=false}" +: "${TRACING_ENDPOINT_ADDR:=0.0.0.0}" +: "${TRACING_ENDPOINT_PORT:=3000}" +# [rpc] +: "${RPC_LISTEN_ADDR:=0.0.0.0}" -echo "Other variables:" -echo "SHORT_SHA=$SHORT_SHA" -echo "SENTRY_DSN=$SENTRY_DSN" - -# Create the conf path and file if it does not exist. -mkdir -p "$ZEBRA_CONF_DIR" -touch "$ZEBRA_CONF_PATH" # Populate `zebrad.toml` before starting zebrad, using the environmental -# variables set by the Dockerfile. +# variables set by the Dockerfile or the user. If the user has already created a config, don't replace it. # # We disable most ports by default, so the default config is secure. # Users have to opt-in to additional functionality by setting environmental variables. -# -# TODO: -# - make `cache_dir`, `metrics.endpoint_addr`, and `tracing.endpoint_addr` into Docker arguments -# - add an $EXTRA_CONFIG or $REPLACEMENT_CONFIG environmental variable +if [[ -n "$ZEBRA_CONF_PATH" ]] && [[ ! -f "$ZEBRA_CONF_PATH" ]]; then + +# Create the conf path and file +mkdir -p "$ZEBRA_CONF_DIR" +touch "$ZEBRA_CONF_PATH" + +# Populate the conf file cat < "$ZEBRA_CONF_PATH" [network] network = "$NETWORK" -listen_addr = "0.0.0.0" - +listen_addr = "$ZEBRA_LISTEN_ADDR" [state] cache_dir = "$ZEBRA_CACHED_STATE_DIR" +EOF +if [[ " $FEATURES " =~ " prometheus " ]]; then # spaces are important here to avoid partial matches +cat <> "$ZEBRA_CONF_PATH" [metrics] -#endpoint_addr = "0.0.0.0:9999" +endpoint_addr = "${METRICS_ENDPOINT_ADDR}:${METRICS_ENDPOINT_PORT}" EOF +fi -if [[ -n "$RPC_PORT" ]]; then +# Set this to enable the RPC port +if [[ " $FEATURES " =~ " getblocktemplate-rpcs " ]]; then # spaces are important here to avoid partial matches cat <> "$ZEBRA_CONF_PATH" [rpc] -listen_addr = "0.0.0.0:${RPC_PORT}" +listen_addr = "${RPC_LISTEN_ADDR}:${RPC_PORT}" EOF fi -if [[ -n "$LOG_FILE" ]] || [[ -n "$LOG_COLOR" ]]; then +if [[ -n "$LOG_FILE" ]] || [[ -n "$LOG_COLOR" ]] || [[ -n "$TRACING_ENDPOINT_ADDR" ]]; then cat <> "$ZEBRA_CONF_PATH" [tracing] -#endpoint_addr = "0.0.0.0:3000" +EOF +if [[ " $FEATURES " =~ " filter-reload " ]]; then # spaces are important here to avoid partial matches +cat <> "$ZEBRA_CONF_PATH" +endpoint_addr = "${TRACING_ENDPOINT_ADDR}:${TRACING_ENDPOINT_PORT}" EOF fi - +# Set this to log to a file, if not set, logs to standard output if [[ -n "$LOG_FILE" ]]; then -mkdir -p $(dirname "$LOG_FILE") - +mkdir -p "$(dirname "$LOG_FILE")" cat <> "$ZEBRA_CONF_PATH" log_file = "${LOG_FILE}" EOF fi +# Zebra automatically detects if it is attached to a terminal, and uses colored output. +# Set this to 'true' to force using color even if the output is not a terminal. +# Set this to 'false' to disable using color even if the output is a terminal. if [[ "$LOG_COLOR" = "true" ]]; then cat <> "$ZEBRA_CONF_PATH" force_use_color = true @@ -77,6 +97,8 @@ cat <> "$ZEBRA_CONF_PATH" use_color = false EOF fi +fi +fi echo "Using zebrad.toml:" cat "$ZEBRA_CONF_PATH" From 9112aa43d0f013df02dd7f9ef01709c1a1b698f4 Mon Sep 17 00:00:00 2001 From: teor Date: Wed, 28 Jun 2023 19:46:59 +1000 Subject: [PATCH 160/265] change(release): Split release checklist into a ticket and PR template (#7088) * Split release checklist into a ticket and PR template * Fix quoting --- .github/ISSUE_TEMPLATE/release.md | 47 +++++++ .../release-checklist.md | 116 ++++++++---------- 2 files changed, 101 insertions(+), 62 deletions(-) create mode 100644 .github/ISSUE_TEMPLATE/release.md diff --git a/.github/ISSUE_TEMPLATE/release.md b/.github/ISSUE_TEMPLATE/release.md new file mode 100644 index 00000000000..af1a68331c7 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/release.md @@ -0,0 +1,47 @@ +--- +name: 'Zebra Release' +about: 'Zebra team use only' +title: 'Publish next Zebra release: (version)' +labels: 'A-release, C-trivial, P-Medium :zap:' +assignees: '' + +--- + +# Prepare for the Release + +These release steps can be done a week before the release, in separate PRs. +They can be skipped for urgent releases. + +## Checkpoints + +For performance and security, we want to update the Zebra checkpoints in every release. +- [ ] You can copy the latest checkpoints from CI by following [the zebra-checkpoints README](https://github.com/ZcashFoundation/zebra/blob/main/zebra-utils/README.md#zebra-checkpoints). + +## Missed Dependency Updates + +Sometimes `dependabot` misses some dependency updates, or we accidentally turned them off. + +This step can be skipped if there is a large pending dependency upgrade. (For example, shared ECC crates.) + +Here's how we make sure we got everything: +- [ ] Run `cargo update` on the latest `main` branch, and keep the output +- [ ] If needed, [add duplicate dependency exceptions to deny.toml](https://github.com/ZcashFoundation/zebra/blob/main/book/src/dev/continuous-integration.md#fixing-duplicate-dependencies-in-check-denytoml-bans) +- [ ] If needed, remove resolved duplicate dependencies from `deny.toml` +- [ ] Open a separate PR with the changes +- [ ] Add the output of `cargo update` to that PR as a comment + +# Prepare and Publish the Release + +Follow the steps in the [release checklist](https://github.com/ZcashFoundation/zebra/blob/main/.github/PULL_REQUEST_TEMPLATE/release-checklist.md) to prepare the release: + +Release PR: +- [ ] Update Changelog +- [ ] Update README +- [ ] Update Zebra Versions +- [ ] Update End of Support Height + +Publish Release: +- [ ] Create & Test GitHub Pre-Release +- [ ] Publish GitHub Release +- [ ] Publish Rust Crates +- [ ] Publish Docker Images diff --git a/.github/PULL_REQUEST_TEMPLATE/release-checklist.md b/.github/PULL_REQUEST_TEMPLATE/release-checklist.md index 1f010c13a07..71090a99407 100644 --- a/.github/PULL_REQUEST_TEMPLATE/release-checklist.md +++ b/.github/PULL_REQUEST_TEMPLATE/release-checklist.md @@ -1,48 +1,73 @@ --- -name: Release Checklist Template -about: Checklist of versioning to create a taggable commit for Zebra -title: '' -labels: +name: 'Release Checklist Template' +about: 'Checklist to create and publish a Zebra release' +title: 'Release Zebra (version)' +labels: 'A-release, C-trivial, P-Critical :ambulance:' assignees: '' --- # Prepare for the Release -These release steps can be done a week before the release, in separate PRs. -They can be skipped for urgent releases. +- [ ] Make sure the PRs with the new checkpoint hashes and missed dependencies are already merged. + (See the release ticket checklist for details) -## Checkpoints -For performance and security, we want to update the Zebra checkpoints in every release. -- [ ] You can copy the latest checkpoints from CI by following [the zebra-checkpoints README](https://github.com/ZcashFoundation/zebra/blob/main/zebra-utils/README.md#zebra-checkpoints). +# Summarise Release Changes -## Missed Dependency Updates +These steps can be done a few days before the release, in the same PR: -Sometimes `dependabot` misses some dependency updates, or we accidentally turned them off. +## Change Log -Here's how we make sure we got everything: -- [ ] Run `cargo update` on the latest `main` branch, and keep the output -- [ ] If needed, [add duplicate dependency exceptions to deny.toml](https://github.com/ZcashFoundation/zebra/blob/main/book/src/dev/continuous-integration.md#fixing-duplicate-dependencies-in-check-denytoml-bans) -- [ ] If needed, remove resolved duplicate dependencies from `deny.toml` -- [ ] Open a separate PR with the changes -- [ ] Add the output of `cargo update` to that PR as a comment +**Important**: Any merge into `main` deletes any edits to the draft changelog. +Once you are ready to tag a release, copy the draft changelog into `CHANGELOG.md`. +We use [the Release Drafter workflow](https://github.com/marketplace/actions/release-drafter) to automatically create a [draft changelog](https://github.com/ZcashFoundation/zebra/releases). We follow the [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) format. -# Make Release Changes +To create the final change log: +- [ ] Copy the **latest** draft changelog into `CHANGELOG.md` (there can be multiple draft releases) +- [ ] Delete any trivial changes + - [ ] Put the list of deleted changelog entries in a PR comment to make reviewing easier +- [ ] Combine duplicate changes +- [ ] Edit change descriptions so they will make sense to Zebra users +- [ ] Check the category for each change + - Prefer the "Fix" category if you're not sure -These release steps can be done a few days before the release, in the same PR: -- [ ] Make sure the PRs with the new checkpoint hashes and missed dependencies are already merged +## README -## Versioning +README updates can be skipped for urgent releases. -### How to Increment Versions +Update the README to: +- [ ] Remove any "Known Issues" that have been fixed since the last release. +- [ ] Update the "Build and Run Instructions" with any new dependencies. + Check for changes in the `Dockerfile` since the last tag: `git diff docker/Dockerfile`. +- [ ] If Zebra has started using newer Rust language features or standard library APIs, update the known working Rust version in the README, book, and `Cargo.toml`s + +You can use a command like: +```sh +fastmod --fixed-strings '1.58' '1.65' +``` + +## Create the Release PR + +- [ ] Push the updated changelog and README into a new branch + for example: `bump-v1.0.0` - this needs to be different to the tag name +- [ ] Create a release PR by adding `&template=release-checklist.md` to the comparing url ([Example](https://github.com/ZcashFoundation/zebra/compare/bump-v1.0.0?expand=1&template=release-checklist.md)). +- [ ] Freeze the [`batched` queue](https://dashboard.mergify.com/github/ZcashFoundation/repo/zebra/queues) using Mergify. +- [ ] Mark all the release PRs as `Critical` priority, so they go in the `urgent` Mergify queue. + + +# Update Versions and End of Support + +## Update Zebra Version + +### Choose a Release Level Zebra follows [semantic versioning](https://semver.org). Semantic versions look like: MAJOR.MINOR.PATCH[-TAG.PRE-RELEASE] -Choose a release level for `zebrad` based on the changes in the release that users will see: +Choose a release level for `zebrad`. Release levels are based on user-visible changes from the changelog: - Mainnet Network Upgrades are `major` releases -- new features, large changes, deprecations, and removals are `minor` releases +- significant new features, large changes, deprecations, and removals are `minor` releases - otherwise, it is a `patch` release Zebra's Rust API doesn't have any support or stability guarantees, so we keep all the `zebra-*` and `tower-*` crates on a beta `pre-release` version. @@ -65,37 +90,6 @@ Zebra's Rust API doesn't have any support or stability guarantees, so we keep al - [ ] `cargo release publish --verbose --dry-run --workspace` - [ ] Commit the version changes to your release PR branch using `git`: `cargo release commit --verbose --execute --workspace` -## README - -README updates can be skipped for urgent releases. - -Update the README to: -- [ ] Remove any "Known Issues" that have been fixed since the last release. -- [ ] Update the "Build and Run Instructions" with any new dependencies. - Check for changes in the `Dockerfile` since the last tag: `git diff docker/Dockerfile`. -- [ ] If Zebra has started using newer Rust language features or standard library APIs, update the known working Rust version in the README, book, and `Cargo.toml`s - -You can use a command like: -```sh -fastmod --fixed-strings '1.58' '1.65' -``` - -## Change Log - -**Important**: Any merge into `main` deletes any edits to the draft changelog. -Once you are ready to tag a release, copy the draft changelog into `CHANGELOG.md`. - -We use [the Release Drafter workflow](https://github.com/marketplace/actions/release-drafter) to automatically create a [draft changelog](https://github.com/ZcashFoundation/zebra/releases). We follow the [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) format. - -To create the final change log: -- [ ] Copy the **latest** draft changelog into `CHANGELOG.md` (there can be multiple draft releases) -- [ ] Delete any trivial changes - - [ ] Put the list of deleted changelog entries in a PR comment to make reviewing easier -- [ ] Combine duplicate changes -- [ ] Edit change descriptions so they will make sense to Zebra users -- [ ] Check the category for each change - - Prefer the "Fix" category if you're not sure - ## Update End of Support The end of support height is calculated from the current blockchain height: @@ -111,16 +105,12 @@ The end of support height is calculated from the current blockchain height:

-### Create the Release PR +## Update the Release PR -- [ ] Push the version increments, the updated changelog, and the release constants into a branch, - for example: `bump-v1.0.0` - this needs to be different to the tag name -- [ ] Create a release PR by adding `&template=release-checklist.md` to the comparing url ([Example](https://github.com/ZcashFoundation/zebra/compare/bump-v1.0.0?expand=1&template=release-checklist.md)). -- [ ] Freeze the [`batched` queue](https://dashboard.mergify.com/github/ZcashFoundation/repo/zebra/queues) using Mergify. -- [ ] Mark all the release PRs as `Critical` priority, so they go in the `urgent` Mergify queue. +- [ ] Push the version increments and the release constants to the release branch. -# Release Zebra +# Publish the Zebra Release ## Create the GitHub Pre-Release @@ -167,6 +157,8 @@ If building or running fails after tagging:
+Tag a new release, following these instructions... + 1. Fix the bug that caused the failure 2. Start a new `patch` release 3. Skip the **Release Preparation**, and start at the **Release Changes** step From 6ab451c6605b9acdcc5108aa8c2f907f17a78e83 Mon Sep 17 00:00:00 2001 From: Conrado Gouvea Date: Wed, 28 Jun 2023 17:07:30 -0300 Subject: [PATCH 161/265] make logo fit 80 columns (#7095) --- zebrad/src/components/tracing/component.rs | 2 +- zebrad/src/components/tracing/zebra.utf8 | 40 +++++++++++----------- 2 files changed, 21 insertions(+), 21 deletions(-) diff --git a/zebrad/src/components/tracing/component.rs b/zebrad/src/components/tracing/component.rs index ff1db585c59..2dff3a15741 100644 --- a/zebrad/src/components/tracing/component.rs +++ b/zebrad/src/components/tracing/component.rs @@ -32,7 +32,7 @@ use super::flame; // Convert heart image to PNG (2000px) and run: // img2txt -W 40 -H 20 -f utf8 -d none Heart_corazón.svg.png > heart.utf8 // img2txt -W 40 -H 20 -f utf8 -d none favicon.png > logo.utf8 -// paste favicon.utf8 heart.utf8 > zebra.utf8 +// paste -d "\0" favicon.utf8 heart.utf8 > zebra.utf8 static ZEBRA_ART: [u8; include_bytes!("zebra.utf8").len()] = *include_bytes!("zebra.utf8"); /// A type-erased boxed writer that can be sent between threads safely. diff --git a/zebrad/src/components/tracing/zebra.utf8 b/zebrad/src/components/tracing/zebra.utf8 index bf620ddea8d..cb30359a98f 100644 --- a/zebrad/src/components/tracing/zebra.utf8 +++ b/zebrad/src/components/tracing/zebra.utf8 @@ -1,20 +1,20 @@ - X@8:::::::@X - @::X:;SX;%8S8@@:X::8 - @:;@8@.S8 ;;t;. XX.;@@;:@ 8; %X X% ;8 - 8:@88 .%@.S@.XS: 888:8 X :: .: X - :;SX XS8;::@X@8::;8t8: 8;;:@ % SS % - @::@ 88:::8 8::88::::::X@ 8::8 S X - @:XS8 ;::::::8.88%8@S88::::8: 8S8:8 X S - ::;8 t8S8::::@St 88SX @SS@8:8S;8S @@:: 8 8 -8:XX %%@@::X% 8: X@ t:8t X ..%t8 ;8:8   -::8t: tX:X8tS 8: :@ ;.8@.X tt888: @8:: @ X -::8S: @:8Xtt% 8: S 8 8S.   .8;::. 88:: 8 8 -::X8 S@8 8:8.@8@8Xt S  .%8;S %8:: : : - ::@8 S .  @:::::::X;@ 8.88S8 @8;:X :: :; - 8:X8X .@:X88:::::::::888 @;:X %88:: t t - @::S 888@8:::::::::8X; 8 8::8 8 8 - X:;S@ tXX8;::::::::S::@8 8;S:8 X. .X - ::8S@ :S88XX@8X: @XX:: :: :: - 8:;%X8.:S t; X8%S:8X tt - X8::8SS8SX8@@X@8;8S:8X - X@8:::::::@XS + X@8:::::::@X + @::X:;SX;%8S8@@:X::8 + @:;@8@.S8 ;;t;. XX.;@@;:@ 8; %X X% ;8 + 8:@88 .%@.S@.XS: 888:8 X :: .: X + :;SX XS8;::@X@8::;8t8: 8;;:@ % SS % + @::@ 88:::8 8::88::::::X@ 8::8 S X + @:XS8 ;::::::8.88%8@S88::::8: 8S8:8 X S + ::;8 t8S8::::@St 88SX @SS@8:8S;8S @@:: 8 8 +8:XX %%@@::X% 8: X@ t:8t X ..%t8 ;8:8   +::8t: tX:X8tS 8: :@ ;.8@.X tt888: @8:: @ X +::8S: @:8Xtt% 8: S 8 8S.   .8;::. 88:: 8 8 +::X8 S@8 8:8.@8@8Xt S  .%8;S %8:: : : + ::@8 S .  @:::::::X;@ 8.88S8 @8;:X :: :; + 8:X8X .@:X88:::::::::888 @;:X %88:: t t + @::S 888@8:::::::::8X; 8 8::8 8 8 + X:;S@ tXX8;::::::::S::@8 8;S:8 X. .X + ::8S@ :S88XX@8X: @XX:: :: :: + 8:;%X8.:S t; X8%S:8X tt + X8::8SS8SX8@@X@8;8S:8X + X@8:::::::@XS From 560d5f7de90f394beff66e0062e640201b9cf9de Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 28 Jun 2023 20:46:27 +0000 Subject: [PATCH 162/265] build(deps): bump tj-actions/changed-files from 37.0.3 to 37.0.4 (#7092) Bumps [tj-actions/changed-files](https://github.com/tj-actions/changed-files) from 37.0.3 to 37.0.4. - [Release notes](https://github.com/tj-actions/changed-files/releases) - [Changelog](https://github.com/tj-actions/changed-files/blob/main/HISTORY.md) - [Commits](https://github.com/tj-actions/changed-files/compare/v37.0.3...v37.0.4) --- updated-dependencies: - dependency-name: tj-actions/changed-files dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/lint.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index d3427fd19a3..dd981e8e5c0 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -37,7 +37,7 @@ jobs: - name: Rust files id: changed-files-rust - uses: tj-actions/changed-files@v37.0.3 + uses: tj-actions/changed-files@v37.0.4 with: files: | **/*.rs @@ -49,7 +49,7 @@ jobs: - name: Workflow files id: changed-files-workflows - uses: tj-actions/changed-files@v37.0.3 + uses: tj-actions/changed-files@v37.0.4 with: files: | .github/workflows/*.yml From a6731d16f6bbb8f765afd481d426ede49b377bdf Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 29 Jun 2023 01:28:00 +0000 Subject: [PATCH 163/265] build(deps): bump bitflags from 2.3.2 to 2.3.3 (#7083) Bumps [bitflags](https://github.com/bitflags/bitflags) from 2.3.2 to 2.3.3. - [Release notes](https://github.com/bitflags/bitflags/releases) - [Changelog](https://github.com/bitflags/bitflags/blob/main/CHANGELOG.md) - [Commits](https://github.com/bitflags/bitflags/compare/2.3.2...2.3.3) --- updated-dependencies: - dependency-name: bitflags dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 10 +++++----- zebra-chain/Cargo.toml | 2 +- zebra-network/Cargo.toml | 2 +- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ad9c41ace30..2663ca7b429 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -470,9 +470,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.3.2" +version = "2.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6dbe3c979c178231552ecba20214a8272df4e09f232a87aef4320cf06539aded" +checksum = "630be753d4e58660abd17930c71b647fe46c27ea6b63cc59e1e3851406972e42" [[package]] name = "bitflags-serde-legacy" @@ -480,7 +480,7 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b64e60c28b6d25ad92e8b367801ff9aa12b41d05fc8798055d296bace4a60cc" dependencies = [ - "bitflags 2.3.2", + "bitflags 2.3.3", "serde", ] @@ -5659,7 +5659,7 @@ dependencies = [ name = "zebra-chain" version = "1.0.0-beta.26" dependencies = [ - "bitflags 2.3.2", + "bitflags 2.3.3", "bitflags-serde-legacy", "bitvec", "blake2b_simd", @@ -5764,7 +5764,7 @@ dependencies = [ name = "zebra-network" version = "1.0.0-beta.26" dependencies = [ - "bitflags 2.3.2", + "bitflags 2.3.3", "byteorder", "bytes", "chrono", diff --git a/zebra-chain/Cargo.toml b/zebra-chain/Cargo.toml index c23f864127f..d2a17a0467c 100644 --- a/zebra-chain/Cargo.toml +++ b/zebra-chain/Cargo.toml @@ -49,7 +49,7 @@ bench = ["zebra-test"] # Cryptography bitvec = "1.0.1" -bitflags = "2.3.2" +bitflags = "2.3.3" bitflags-serde-legacy = "0.1.1" blake2b_simd = "1.0.1" blake2s_simd = "1.0.1" diff --git a/zebra-network/Cargo.toml b/zebra-network/Cargo.toml index 73fb7c239d7..494f182503f 100644 --- a/zebra-network/Cargo.toml +++ b/zebra-network/Cargo.toml @@ -40,7 +40,7 @@ progress-bar = [ proptest-impl = ["proptest", "proptest-derive", "zebra-chain/proptest-impl"] [dependencies] -bitflags = "2.3.2" +bitflags = "2.3.3" byteorder = "1.4.3" bytes = "1.4.0" chrono = { version = "0.4.26", default-features = false, features = ["clock", "std"] } From 256e4cd2ce5edba5a28dfd63fd72d1bb02b4685f Mon Sep 17 00:00:00 2001 From: teor Date: Fri, 30 Jun 2023 02:01:51 +1000 Subject: [PATCH 164/265] Use a heart image with a compatible licence (#7102) --- zebrad/src/components/tracing/component.rs | 18 +++++++--- zebrad/src/components/tracing/zebra.utf8 | 40 +++++++++++----------- 2 files changed, 33 insertions(+), 25 deletions(-) diff --git a/zebrad/src/components/tracing/component.rs b/zebrad/src/components/tracing/component.rs index 2dff3a15741..36a2abaacd0 100644 --- a/zebrad/src/components/tracing/component.rs +++ b/zebrad/src/components/tracing/component.rs @@ -24,15 +24,23 @@ use super::flame; // Art generated with these two images. // Zebra logo: book/theme/favicon.png -// Heart image: https://commons.wikimedia.org/wiki/File:Heart_coraz%C3%B3n.svg -// (License: CC BY-SA 3.0) +// License: MIT or Apache 2.0 +// +// Heart image: https://commons.wikimedia.org/wiki/File:Love_Heart_SVG.svg +// Author: Bubinator +// License: Public Domain or Unconditional Use // // How to render // -// Convert heart image to PNG (2000px) and run: -// img2txt -W 40 -H 20 -f utf8 -d none Heart_corazón.svg.png > heart.utf8 +// Convert heart image to PNG (2000px): +// curl -o heart.svg https://upload.wikimedia.org/wikipedia/commons/4/42/Love_Heart_SVG.svg +// cargo install resvg +// resvg --width 2000 --height 2000 heart.svg heart.png +// +// Then to text (40x20): +// img2txt -W 40 -H 20 -f utf8 -d none heart.png > heart.utf8 // img2txt -W 40 -H 20 -f utf8 -d none favicon.png > logo.utf8 -// paste -d "\0" favicon.utf8 heart.utf8 > zebra.utf8 +// paste -d "\0" logo.utf8 heart.utf8 > zebra.utf8 static ZEBRA_ART: [u8; include_bytes!("zebra.utf8").len()] = *include_bytes!("zebra.utf8"); /// A type-erased boxed writer that can be sent between threads safely. diff --git a/zebrad/src/components/tracing/zebra.utf8 b/zebrad/src/components/tracing/zebra.utf8 index cb30359a98f..18b39c533c5 100644 --- a/zebrad/src/components/tracing/zebra.utf8 +++ b/zebrad/src/components/tracing/zebra.utf8 @@ -1,20 +1,20 @@ - X@8:::::::@X - @::X:;SX;%8S8@@:X::8 - @:;@8@.S8 ;;t;. XX.;@@;:@ 8; %X X% ;8 - 8:@88 .%@.S@.XS: 888:8 X :: .: X - :;SX XS8;::@X@8::;8t8: 8;;:@ % SS % - @::@ 88:::8 8::88::::::X@ 8::8 S X - @:XS8 ;::::::8.88%8@S88::::8: 8S8:8 X S - ::;8 t8S8::::@St 88SX @SS@8:8S;8S @@:: 8 8 -8:XX %%@@::X% 8: X@ t:8t X ..%t8 ;8:8   -::8t: tX:X8tS 8: :@ ;.8@.X tt888: @8:: @ X -::8S: @:8Xtt% 8: S 8 8S.   .8;::. 88:: 8 8 -::X8 S@8 8:8.@8@8Xt S  .%8;S %8:: : : - ::@8 S .  @:::::::X;@ 8.88S8 @8;:X :: :; - 8:X8X .@:X88:::::::::888 @;:X %88:: t t - @::S 888@8:::::::::8X; 8 8::8 8 8 - X:;S@ tXX8;::::::::S::@8 8;S:8 X. .X - ::8S@ :S88XX@8X: @XX:: :: :: - 8:;%X8.:S t; X8%S:8X tt - X8::8SS8SX8@@X@8;8S:8X - X@8:::::::@XS + X@8:::::::@X  S888@t S888% + @::X:;SX;%8S8@@:X::8 S: .8 @. .@ + @:;@8@.S8 ;;t;. XX.;@@;:@ ; % ; X + 8:@88 .%@.S@.XS: 888:8 8 t% @ + :;SX XS8;::@X@8::;8t8: 8;;:@    + @::@ 88:::8 8::88::::::X@ 8::8 @ 8 + @:XS8 ;::::::8.88%8@S88::::8: 8S8:8 X X + ::;8 t8S8::::@St 88SX @SS@8:8S;8S @@:: 8 8 +8:XX %%@@::X% 8: X@ t:8t X ..%t8 ;8:8   +::8t: tX:X8tS 8: :@ ;.8@.X tt888: @8:: X % +::8S: @:8Xtt% 8: S 8 8S.   .8;::. 88:: % t +::X8 S@8 8:8.@8@8Xt S  .%8;S %8:: 8 8 + ::@8 S .  @:::::::X;@ 8.88S8 @8;:X % % + 8:X8X .@:X88:::::::::888 @;:X %88:: % % + @::S 888@8:::::::::8X; 8 8::8 8 8 + X:;S@ tXX8;::::::::S::@8 8;S:8 S. :t + ::8S@ :S88XX@8X: @XX:: % % + 8:;%X8.:S t; X8%S:8X @ S + X8::8SS8SX8@@X@8;8S:8X S % + X@8:::::::@XS % From 455779c8332ca5f609d8dc5d8d3be07863a47773 Mon Sep 17 00:00:00 2001 From: Alfredo Garcia Date: Thu, 29 Jun 2023 17:18:52 -0300 Subject: [PATCH 165/265] build(deps): Bump indexmap, toml and serde_json (#7100) * bump toml, indexmap and serde_json all together * add duplicated indexmap dep to deny.toml --- Cargo.lock | 65 +++++++++++++++++++++------------- deny.toml | 3 ++ zebra-chain/Cargo.toml | 2 +- zebra-network/Cargo.toml | 4 +-- zebra-node-services/Cargo.toml | 2 +- zebra-rpc/Cargo.toml | 4 +-- zebra-state/Cargo.toml | 4 +-- zebra-test/Cargo.toml | 2 +- zebra-utils/Cargo.toml | 2 +- zebrad/Cargo.toml | 6 ++-- 10 files changed, 57 insertions(+), 37 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2663ca7b429..4e6fbcdc318 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1333,6 +1333,12 @@ dependencies = [ "byteorder", ] +[[package]] +name = "equivalent" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "88bffebc5d80432c9b140ee17875ff173a8ab62faad5b257da912bd2f6c1c0a1" + [[package]] name = "errno" version = "0.3.1" @@ -1681,7 +1687,7 @@ dependencies = [ "futures-sink", "futures-util", "http", - "indexmap", + "indexmap 1.9.3", "slab", "tokio", "tokio-util 0.7.8", @@ -2060,6 +2066,17 @@ dependencies = [ "serde", ] +[[package]] +name = "indexmap" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d5477fe2230a79769d8dc68e0eabf5437907c0457a5614a9e8dddb67f65eb65d" +dependencies = [ + "equivalent", + "hashbrown 0.14.0", + "serde", +] + [[package]] name = "indicatif" version = "0.17.5" @@ -2494,7 +2511,7 @@ checksum = "8a4964177ddfdab1e3a2b37aec7cf320e14169abb0ed73999f558136409178d5" dependencies = [ "base64 0.21.2", "hyper", - "indexmap", + "indexmap 1.9.3", "ipnet", "metrics 0.21.0", "metrics-util", @@ -3057,7 +3074,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4dd7d28ee937e54fe3080c91faa1c3a46c06de6252988a7f4592ba2310ef22a4" dependencies = [ "fixedbitset", - "indexmap", + "indexmap 1.9.3", ] [[package]] @@ -4073,11 +4090,11 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.97" +version = "1.0.99" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bdf3bf93142acad5821c99197022e170842cdbc1c30482b98750c688c640842a" +checksum = "46266871c240a00b8f503b877622fe33430b3c7d963bdc0f2adc511e54a1eae3" dependencies = [ - "indexmap", + "indexmap 2.0.0", "itoa", "ryu", "serde", @@ -4085,9 +4102,9 @@ dependencies = [ [[package]] name = "serde_spanned" -version = "0.6.2" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93107647184f6027e3b7dcb2e11034cf95ffa1e3a682c67951963ac69c1c007d" +checksum = "96426c9936fd7a0124915f9185ea1d20aa9445cc9821142f0a73bc9207a2e186" dependencies = [ "serde", ] @@ -4123,7 +4140,7 @@ dependencies = [ "base64 0.21.2", "chrono", "hex", - "indexmap", + "indexmap 1.9.3", "serde", "serde_json", "serde_with_macros 3.0.0", @@ -4632,9 +4649,9 @@ dependencies = [ [[package]] name = "toml" -version = "0.7.4" +version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6135d499e69981f9ff0ef2167955a5333c35e36f6937d382974566b3d5b94ec" +checksum = "1ebafdf5ad1220cb59e7d17cf4d2c72015297b75b19a10472f99b89225089240" dependencies = [ "serde", "serde_spanned", @@ -4644,20 +4661,20 @@ dependencies = [ [[package]] name = "toml_datetime" -version = "0.6.2" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a76a9312f5ba4c2dec6b9161fdf25d87ad8a09256ccea5a556fef03c706a10f" +checksum = "7cda73e2f1397b1262d6dfdcef8aafae14d1de7748d66822d3bfeeb6d03e5e4b" dependencies = [ "serde", ] [[package]] name = "toml_edit" -version = "0.19.10" +version = "0.19.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2380d56e8670370eee6566b0bfd4265f65b3f432e8c6d85623f728d4fa31f739" +checksum = "266f016b7f039eec8a1a80dfe6156b633d208b9fccca5e4db1d6775b0c4e34a7" dependencies = [ - "indexmap", + "indexmap 2.0.0", "serde", "serde_spanned", "toml_datetime", @@ -4714,7 +4731,7 @@ dependencies = [ "futures-core", "futures-util", "hdrhistogram", - "indexmap", + "indexmap 1.9.3", "pin-project", "pin-project-lite", "rand 0.8.5", @@ -5773,7 +5790,7 @@ dependencies = [ "hex", "howudoin", "humantime-serde", - "indexmap", + "indexmap 2.0.0", "itertools 0.11.0", "lazy_static", "metrics 0.21.0", @@ -5792,7 +5809,7 @@ dependencies = [ "tokio", "tokio-stream", "tokio-util 0.7.8", - "toml 0.7.4", + "toml 0.7.5", "tower", "tracing", "tracing-error", @@ -5821,7 +5838,7 @@ dependencies = [ "futures", "hex", "hyper", - "indexmap", + "indexmap 2.0.0", "insta", "jsonrpc-core", "jsonrpc-derive", @@ -5871,7 +5888,7 @@ dependencies = [ "halo2_proofs", "hex", "howudoin", - "indexmap", + "indexmap 2.0.0", "insta", "itertools 0.11.0", "jubjub", @@ -5908,7 +5925,7 @@ dependencies = [ "futures", "hex", "humantime", - "indexmap", + "indexmap 2.0.0", "insta", "lazy_static", "once_cell", @@ -5964,7 +5981,7 @@ dependencies = [ "howudoin", "humantime-serde", "hyper", - "indexmap", + "indexmap 2.0.0", "indicatif", "inferno", "jsonrpc-core", @@ -5990,7 +6007,7 @@ dependencies = [ "tinyvec", "tokio", "tokio-stream", - "toml 0.7.4", + "toml 0.7.5", "tonic", "tonic-build", "tower", diff --git a/deny.toml b/deny.toml index 900d08ef9e0..fa51dc2e439 100644 --- a/deny.toml +++ b/deny.toml @@ -63,6 +63,9 @@ skip-tree = [ # wait for backtrace and multiple dependents to upgrade { name = "miniz_oxide", version = "=0.6.2" }, + # wait for h2 and tower to upgrade + { name = "indexmap", version = "=1.9.3" }, + # ZF crates # wait for zcashd and zcash_script to upgrade diff --git a/zebra-chain/Cargo.toml b/zebra-chain/Cargo.toml index d2a17a0467c..18209d99ce0 100644 --- a/zebra-chain/Cargo.toml +++ b/zebra-chain/Cargo.toml @@ -105,7 +105,7 @@ redjubjub = "0.7.0" reddsa = "0.5.0" # Production feature json-conversion -serde_json = { version = "1.0.97", optional = true } +serde_json = { version = "1.0.99", optional = true } # Experimental feature getblocktemplate-rpcs zcash_address = { version = "0.2.1", optional = true } diff --git a/zebra-network/Cargo.toml b/zebra-network/Cargo.toml index 494f182503f..1527d880612 100644 --- a/zebra-network/Cargo.toml +++ b/zebra-network/Cargo.toml @@ -47,7 +47,7 @@ chrono = { version = "0.4.26", default-features = false, features = ["clock", "s dirs = "5.0.1" hex = "0.4.3" humantime-serde = "1.1.1" -indexmap = { version = "1.9.3", features = ["serde"] } +indexmap = { version = "2.0.0", features = ["serde"] } itertools = "0.11.0" lazy_static = "1.4.0" num-integer = "0.1.45" @@ -91,7 +91,7 @@ proptest-derive = "0.3.0" static_assertions = "1.1.0" tokio = { version = "1.28.2", features = ["full", "tracing", "test-util"] } -toml = "0.7.4" +toml = "0.7.5" zebra-chain = { path = "../zebra-chain", features = ["proptest-impl"] } zebra-test = { path = "../zebra-test/" } diff --git a/zebra-node-services/Cargo.toml b/zebra-node-services/Cargo.toml index 9d9b3885a50..11db2015541 100644 --- a/zebra-node-services/Cargo.toml +++ b/zebra-node-services/Cargo.toml @@ -45,7 +45,7 @@ jsonrpc-core = { version = "18.0.0", optional = true } # Security: avoid default dependency on openssl reqwest = { version = "0.11.18", default-features = false, features = ["rustls-tls"], optional = true } serde = { version = "1.0.164", optional = true } -serde_json = { version = "1.0.97", optional = true } +serde_json = { version = "1.0.99", optional = true } [dev-dependencies] diff --git a/zebra-rpc/Cargo.toml b/zebra-rpc/Cargo.toml index f7891f45325..e9c0f4d3308 100644 --- a/zebra-rpc/Cargo.toml +++ b/zebra-rpc/Cargo.toml @@ -51,8 +51,8 @@ jsonrpc-http-server = "18.0.0" num_cpus = "1.15.0" # zebra-rpc needs the preserve_order feature in serde_json, which is a dependency of jsonrpc-core -serde_json = { version = "1.0.97", features = ["preserve_order"] } -indexmap = { version = "1.9.3", features = ["serde"] } +serde_json = { version = "1.0.99", features = ["preserve_order"] } +indexmap = { version = "2.0.0", features = ["serde"] } tokio = { version = "1.28.2", features = ["time", "rt-multi-thread", "macros", "tracing"] } tower = "0.4.13" diff --git a/zebra-state/Cargo.toml b/zebra-state/Cargo.toml index 143d772f2c2..512d2d09d31 100644 --- a/zebra-state/Cargo.toml +++ b/zebra-state/Cargo.toml @@ -48,7 +48,7 @@ chrono = { version = "0.4.26", default-features = false, features = ["clock", "s dirs = "5.0.1" futures = "0.3.28" hex = "0.4.3" -indexmap = "1.9.3" +indexmap = "2.0.0" itertools = "0.11.0" lazy_static = "1.4.0" metrics = "0.21.0" @@ -69,7 +69,7 @@ tracing = "0.1.37" # elasticsearch specific dependencies. # Security: avoid default dependency on openssl elasticsearch = { version = "8.5.0-alpha.1", default-features = false, features = ["rustls-tls"], optional = true } -serde_json = { version = "1.0.97", package = "serde_json", optional = true } +serde_json = { version = "1.0.99", package = "serde_json", optional = true } zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.26" } diff --git a/zebra-test/Cargo.toml b/zebra-test/Cargo.toml index d64b6e59628..04d91019581 100644 --- a/zebra-test/Cargo.toml +++ b/zebra-test/Cargo.toml @@ -16,7 +16,7 @@ categories = ["command-line-utilities", "cryptography::cryptocurrencies"] [dependencies] hex = "0.4.3" -indexmap = "1.9.3" +indexmap = "2.0.0" lazy_static = "1.4.0" insta = "1.30.0" proptest = "1.2.0" diff --git a/zebra-utils/Cargo.toml b/zebra-utils/Cargo.toml index 98b7fb50d20..4bf58589c88 100644 --- a/zebra-utils/Cargo.toml +++ b/zebra-utils/Cargo.toml @@ -69,7 +69,7 @@ tinyvec = { version = "1.6.0", features = ["rustc_1_55"] } structopt = "0.3.26" hex = "0.4.3" -serde_json = "1.0.97" +serde_json = "1.0.99" tracing-error = "0.2.0" tracing-subscriber = "0.3.17" thiserror = "1.0.40" diff --git a/zebrad/Cargo.toml b/zebrad/Cargo.toml index 98251d1dfff..8353fecf6ee 100644 --- a/zebrad/Cargo.toml +++ b/zebrad/Cargo.toml @@ -141,11 +141,11 @@ abscissa_core = "0.7.0" clap = { version = "4.3.8", features = ["cargo"] } chrono = { version = "0.4.26", default-features = false, features = ["clock", "std"] } humantime-serde = "1.1.1" -indexmap = "1.9.3" +indexmap = "2.0.0" lazy_static = "1.4.0" semver = "1.0.17" serde = { version = "1.0.164", features = ["serde_derive"] } -toml = "0.7.4" +toml = "0.7.5" futures = "0.3.28" rayon = "1.7.0" @@ -221,7 +221,7 @@ once_cell = "1.18.0" regex = "1.8.4" # zebra-rpc needs the preserve_order feature, it also makes test results more stable -serde_json = { version = "1.0.97", features = ["preserve_order"] } +serde_json = { version = "1.0.99", features = ["preserve_order"] } tempfile = "3.5.0" hyper = { version = "0.14.27", features = ["http1", "http2", "server"]} From aa8489373e65dbc282e0227d07af852ba3790f09 Mon Sep 17 00:00:00 2001 From: teor Date: Fri, 30 Jun 2023 06:19:15 +1000 Subject: [PATCH 166/265] Use release images for zcash-params downloads (#7097) --- docker/zcash-params/Dockerfile | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/docker/zcash-params/Dockerfile b/docker/zcash-params/Dockerfile index bda075f4b94..dce8153d185 100644 --- a/docker/zcash-params/Dockerfile +++ b/docker/zcash-params/Dockerfile @@ -1,16 +1,19 @@ -# This image is for caching Zcash Sprout and Sapling parameters +# This image is for caching Zcash Sprout and Sapling parameters. +# We don't test it automatically in CI due to download server rate-limiting. +# To manually run it on the PR branch before merging, go to: +# https://github.com/ZcashFoundation/zebra/actions/workflows/zcash-params.yml FROM debian:bullseye-slim AS release -# Just use the precompiled zebrad binary from a recent test image. +# Just use the precompiled zebrad binary from a recent release image. # # It doesn't matter what build or commit of Zebra we use, because it just calls into the # zcash_proofs download code. (Which doesn't change much.) -# Release image zebrad binaries would also work. +# Test image zebrad binaries would also work, but it's harder to get a recent tag for them. # # Compiling the download-params example using `cargo ` is another alternative: # `cargo run --locked --release --features default-docker --example download-params` -COPY --from=us-docker.pkg.dev/zealous-zebra/zebra/zebrad-test /usr/local/bin/zebrad /usr/local/bin +COPY --from=zfnd/zebra:latest /usr/local/bin/zebrad /usr/local/bin # Pre-download Zcash Sprout and Sapling parameters RUN zebrad download From 2a3197221ea167c98010050799cbb337853ff8cd Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 29 Jun 2023 23:19:25 +0000 Subject: [PATCH 167/265] build(deps): bump num_cpus from 1.15.0 to 1.16.0 (#7106) Bumps [num_cpus](https://github.com/seanmonstar/num_cpus) from 1.15.0 to 1.16.0. - [Release notes](https://github.com/seanmonstar/num_cpus/releases) - [Changelog](https://github.com/seanmonstar/num_cpus/blob/master/CHANGELOG.md) - [Commits](https://github.com/seanmonstar/num_cpus/compare/v1.15.0...v1.16.0) --- updated-dependencies: - dependency-name: num_cpus dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 15 +++------------ zebra-rpc/Cargo.toml | 2 +- 2 files changed, 4 insertions(+), 13 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4e6fbcdc318..1263f4f5603 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1814,15 +1814,6 @@ dependencies = [ "libc", ] -[[package]] -name = "hermit-abi" -version = "0.2.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee512640fe35acbfb4bb779db6f0d80704c2cacfa2e39b601ef3e3f47d1ae4c7" -dependencies = [ - "libc", -] - [[package]] name = "hermit-abi" version = "0.3.1" @@ -2730,11 +2721,11 @@ dependencies = [ [[package]] name = "num_cpus" -version = "1.15.0" +version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fac9e2da13b5eb447a6ce3d392f23a29d8694bff781bf03a16cd9ac8697593b" +checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" dependencies = [ - "hermit-abi 0.2.6", + "hermit-abi 0.3.1", "libc", ] diff --git a/zebra-rpc/Cargo.toml b/zebra-rpc/Cargo.toml index e9c0f4d3308..a41376cdecc 100644 --- a/zebra-rpc/Cargo.toml +++ b/zebra-rpc/Cargo.toml @@ -48,7 +48,7 @@ hyper = { version = "0.14.27", features = ["http1", "server"] } jsonrpc-core = "18.0.0" jsonrpc-derive = "18.0.0" jsonrpc-http-server = "18.0.0" -num_cpus = "1.15.0" +num_cpus = "1.16.0" # zebra-rpc needs the preserve_order feature in serde_json, which is a dependency of jsonrpc-core serde_json = { version = "1.0.99", features = ["preserve_order"] } From 5eea1119225a3e272d2f528882079995a5f7a173 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 30 Jun 2023 02:58:00 +0000 Subject: [PATCH 168/265] build(deps): bump tokio from 1.28.2 to 1.29.0 (#7094) Bumps [tokio](https://github.com/tokio-rs/tokio) from 1.28.2 to 1.29.0. - [Release notes](https://github.com/tokio-rs/tokio/releases) - [Commits](https://github.com/tokio-rs/tokio/compare/tokio-1.28.2...tokio-1.29.0) --- updated-dependencies: - dependency-name: tokio dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 5 +++-- tower-batch-control/Cargo.toml | 4 ++-- tower-fallback/Cargo.toml | 2 +- zebra-chain/Cargo.toml | 4 ++-- zebra-consensus/Cargo.toml | 4 ++-- zebra-network/Cargo.toml | 4 ++-- zebra-rpc/Cargo.toml | 4 ++-- zebra-state/Cargo.toml | 4 ++-- zebra-test/Cargo.toml | 2 +- zebra-utils/Cargo.toml | 2 +- zebrad/Cargo.toml | 4 ++-- 11 files changed, 20 insertions(+), 19 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1263f4f5603..b5a217dbc65 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4517,11 +4517,12 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.28.2" +version = "1.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94d7b1cfd2aa4011f2de74c2c4c63665e27a71006b0a192dcd2710272e73dfa2" +checksum = "374442f06ee49c3a28a8fc9f01a2596fed7559c6b99b31279c3261778e77d84f" dependencies = [ "autocfg", + "backtrace", "bytes", "libc", "mio", diff --git a/tower-batch-control/Cargo.toml b/tower-batch-control/Cargo.toml index 87c7f1fa7a6..709d0d77437 100644 --- a/tower-batch-control/Cargo.toml +++ b/tower-batch-control/Cargo.toml @@ -26,7 +26,7 @@ futures = "0.3.28" futures-core = "0.3.28" pin-project = "1.1.0" rayon = "1.7.0" -tokio = { version = "1.28.2", features = ["time", "sync", "tracing", "macros"] } +tokio = { version = "1.29.0", features = ["time", "sync", "tracing", "macros"] } tokio-util = "0.7.8" tower = { version = "0.4.13", features = ["util", "buffer"] } tracing = "0.1.37" @@ -41,7 +41,7 @@ tinyvec = { version = "1.6.0", features = ["rustc_1_55"] } ed25519-zebra = "4.0.0" rand = "0.8.5" -tokio = { version = "1.28.2", features = ["full", "tracing", "test-util"] } +tokio = { version = "1.29.0", features = ["full", "tracing", "test-util"] } tokio-test = "0.4.2" tower-fallback = { path = "../tower-fallback/" } tower-test = "0.4.0" diff --git a/tower-fallback/Cargo.toml b/tower-fallback/Cargo.toml index fd18e859eee..1c7551e3cd1 100644 --- a/tower-fallback/Cargo.toml +++ b/tower-fallback/Cargo.toml @@ -22,6 +22,6 @@ futures-core = "0.3.28" tracing = "0.1.37" [dev-dependencies] -tokio = { version = "1.28.2", features = ["full", "tracing", "test-util"] } +tokio = { version = "1.29.0", features = ["full", "tracing", "test-util"] } zebra-test = { path = "../zebra-test/" } diff --git a/zebra-chain/Cargo.toml b/zebra-chain/Cargo.toml index 18209d99ce0..dad4cbc55df 100644 --- a/zebra-chain/Cargo.toml +++ b/zebra-chain/Cargo.toml @@ -117,7 +117,7 @@ proptest-derive = { version = "0.3.0", optional = true } rand = { version = "0.8.5", optional = true } rand_chacha = { version = "0.3.1", optional = true } -tokio = { version = "1.28.2", features = ["tracing"], optional = true } +tokio = { version = "1.29.0", features = ["tracing"], optional = true } zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.26", optional = true } @@ -140,7 +140,7 @@ proptest-derive = "0.3.0" rand = "0.8.5" rand_chacha = "0.3.1" -tokio = { version = "1.28.2", features = ["full", "tracing", "test-util"] } +tokio = { version = "1.29.0", features = ["full", "tracing", "test-util"] } zebra-test = { path = "../zebra-test/" } diff --git a/zebra-consensus/Cargo.toml b/zebra-consensus/Cargo.toml index ddea30687c5..f7c12e56aee 100644 --- a/zebra-consensus/Cargo.toml +++ b/zebra-consensus/Cargo.toml @@ -53,7 +53,7 @@ futures = "0.3.28" futures-util = "0.3.28" metrics = "0.21.0" thiserror = "1.0.40" -tokio = { version = "1.28.2", features = ["time", "sync", "tracing", "rt-multi-thread"] } +tokio = { version = "1.29.0", features = ["time", "sync", "tracing", "rt-multi-thread"] } tower = { version = "0.4.13", features = ["timeout", "util", "buffer"] } tracing = "0.1.37" tracing-futures = "0.2.5" @@ -89,7 +89,7 @@ proptest = "1.2.0" proptest-derive = "0.3.0" spandoc = "0.2.2" -tokio = { version = "1.28.2", features = ["full", "tracing", "test-util"] } +tokio = { version = "1.29.0", features = ["full", "tracing", "test-util"] } tracing-error = "0.2.0" tracing-subscriber = "0.3.17" diff --git a/zebra-network/Cargo.toml b/zebra-network/Cargo.toml index 1527d880612..1df18e413ae 100644 --- a/zebra-network/Cargo.toml +++ b/zebra-network/Cargo.toml @@ -61,7 +61,7 @@ tempfile = "3.5.0" thiserror = "1.0.40" futures = "0.3.28" -tokio = { version = "1.28.2", features = ["fs", "io-util", "net", "time", "tracing", "macros", "rt-multi-thread"] } +tokio = { version = "1.29.0", features = ["fs", "io-util", "net", "time", "tracing", "macros", "rt-multi-thread"] } tokio-stream = { version = "0.1.14", features = ["sync", "time"] } tokio-util = { version = "0.7.8", features = ["codec"] } tower = { version = "0.4.13", features = ["retry", "discover", "load", "load-shed", "timeout", "util", "buffer"] } @@ -90,7 +90,7 @@ proptest = "1.2.0" proptest-derive = "0.3.0" static_assertions = "1.1.0" -tokio = { version = "1.28.2", features = ["full", "tracing", "test-util"] } +tokio = { version = "1.29.0", features = ["full", "tracing", "test-util"] } toml = "0.7.5" zebra-chain = { path = "../zebra-chain", features = ["proptest-impl"] } diff --git a/zebra-rpc/Cargo.toml b/zebra-rpc/Cargo.toml index a41376cdecc..f21d7b6638d 100644 --- a/zebra-rpc/Cargo.toml +++ b/zebra-rpc/Cargo.toml @@ -54,7 +54,7 @@ num_cpus = "1.16.0" serde_json = { version = "1.0.99", features = ["preserve_order"] } indexmap = { version = "2.0.0", features = ["serde"] } -tokio = { version = "1.28.2", features = ["time", "rt-multi-thread", "macros", "tracing"] } +tokio = { version = "1.29.0", features = ["time", "rt-multi-thread", "macros", "tracing"] } tower = "0.4.13" tracing = "0.1.37" @@ -83,7 +83,7 @@ insta = { version = "1.30.0", features = ["redactions", "json", "ron"] } proptest = "1.2.0" thiserror = "1.0.40" -tokio = { version = "1.28.2", features = ["full", "tracing", "test-util"] } +tokio = { version = "1.29.0", features = ["full", "tracing", "test-util"] } zebra-chain = { path = "../zebra-chain", features = ["proptest-impl"] } zebra-consensus = { path = "../zebra-consensus", features = ["proptest-impl"] } diff --git a/zebra-state/Cargo.toml b/zebra-state/Cargo.toml index 512d2d09d31..d1d48717964 100644 --- a/zebra-state/Cargo.toml +++ b/zebra-state/Cargo.toml @@ -62,7 +62,7 @@ tempfile = "3.5.0" thiserror = "1.0.40" rayon = "1.7.0" -tokio = { version = "1.28.2", features = ["rt-multi-thread", "sync", "tracing"] } +tokio = { version = "1.29.0", features = ["rt-multi-thread", "sync", "tracing"] } tower = { version = "0.4.13", features = ["buffer", "util"] } tracing = "0.1.37" @@ -100,7 +100,7 @@ rand = "0.8.5" halo2 = { package = "halo2_proofs", version = "0.3.0" } jubjub = "0.10.0" -tokio = { version = "1.28.2", features = ["full", "tracing", "test-util"] } +tokio = { version = "1.29.0", features = ["full", "tracing", "test-util"] } zebra-chain = { path = "../zebra-chain", features = ["proptest-impl"] } zebra-test = { path = "../zebra-test/" } diff --git a/zebra-test/Cargo.toml b/zebra-test/Cargo.toml index 04d91019581..57ea54f3527 100644 --- a/zebra-test/Cargo.toml +++ b/zebra-test/Cargo.toml @@ -24,7 +24,7 @@ once_cell = "1.18.0" rand = "0.8.5" regex = "1.8.4" -tokio = { version = "1.28.2", features = ["full", "tracing", "test-util"] } +tokio = { version = "1.29.0", features = ["full", "tracing", "test-util"] } tower = { version = "0.4.13", features = ["util"] } futures = "0.3.28" diff --git a/zebra-utils/Cargo.toml b/zebra-utils/Cargo.toml index 4bf58589c88..a9b32576751 100644 --- a/zebra-utils/Cargo.toml +++ b/zebra-utils/Cargo.toml @@ -88,4 +88,4 @@ regex = { version = "1.8.4", optional = true } reqwest = { version = "0.11.18", optional = true } # These crates are needed for the zebra-checkpoints and search-issue-refs binaries -tokio = { version = "1.28.2", features = ["full"], optional = true } +tokio = { version = "1.29.0", features = ["full"], optional = true } diff --git a/zebrad/Cargo.toml b/zebrad/Cargo.toml index 8353fecf6ee..4988bafdf06 100644 --- a/zebrad/Cargo.toml +++ b/zebrad/Cargo.toml @@ -149,7 +149,7 @@ toml = "0.7.5" futures = "0.3.28" rayon = "1.7.0" -tokio = { version = "1.28.2", features = ["time", "rt-multi-thread", "macros", "tracing", "signal"] } +tokio = { version = "1.29.0", features = ["time", "rt-multi-thread", "macros", "tracing", "signal"] } tower = { version = "0.4.13", features = ["hedge", "limit"] } pin-project = "1.1.0" @@ -227,7 +227,7 @@ tempfile = "3.5.0" hyper = { version = "0.14.27", features = ["http1", "http2", "server"]} tracing-test = { version = "0.2.4", features = ["no-env-filter"] } -tokio = { version = "1.28.2", features = ["full", "tracing", "test-util"] } +tokio = { version = "1.29.0", features = ["full", "tracing", "test-util"] } tokio-stream = "0.1.14" # test feature lightwalletd-grpc-tests From 322cbec81700bf488cc977d7b900c80c4bdad6d1 Mon Sep 17 00:00:00 2001 From: teor Date: Sat, 1 Jul 2023 02:58:05 +1000 Subject: [PATCH 169/265] fix(hang): Stop blocking some Zebra futures for up to a minute using a CPU busy-loop, Credit: Ziggurat Team (#6763), james_katz (#7000) (#7103) * Stop busy-waiting in a Future for 45 seconds every minute * Use the correct elapsed time calculation * Add some TODOs for making the structure of the loop and wait times clearer --- zebrad/src/components/sync/progress.rs | 29 +++++++++++++++----------- 1 file changed, 17 insertions(+), 12 deletions(-) diff --git a/zebrad/src/components/sync/progress.rs b/zebrad/src/components/sync/progress.rs index 9ab272a2ca8..8c49df4363c 100644 --- a/zebrad/src/components/sync/progress.rs +++ b/zebrad/src/components/sync/progress.rs @@ -1,8 +1,12 @@ //! Progress tracking for blockchain syncing. -use std::{cmp::min, ops::Add, time::Duration}; +use std::{ + cmp::min, + ops::Add, + time::{Duration, Instant}, +}; -use chrono::{TimeZone, Utc}; +use chrono::Utc; use num_integer::div_ceil; use zebra_chain::{ @@ -118,17 +122,15 @@ pub async fn show_block_chain_progress( let mut last_state_change_height = Height(0); // The last time we logged an update. - // Initialised with the unix epoch, to simplify the code while still staying in the std range. - let mut last_log_time = Utc - .timestamp_opt(0, 0) - .single() - .expect("in-range number of seconds and valid nanosecond"); + let mut last_log_time = Instant::now(); #[cfg(feature = "progress-bar")] let block_bar = howudoin::new().label("Blocks"); loop { let now = Utc::now(); + let instant_now = Instant::now(); + let is_syncer_stopped = sync_status.is_close_to_tip(); if let Some(estimated_height) = @@ -142,6 +144,8 @@ pub async fn show_block_chain_progress( let network_upgrade = NetworkUpgrade::current(network, current_height); // Send progress reports for block height + // + // TODO: split the progress bar height update into its own function. #[cfg(feature = "progress-bar")] if matches!(howudoin::cancelled(), Some(true)) { block_bar.close(); @@ -152,16 +156,17 @@ pub async fn show_block_chain_progress( .desc(network_upgrade.to_string()); } - // Skip logging if it isn't time for it yet - let elapsed_since_log = (now - last_log_time) - .to_std() - .expect("elapsed times are in range"); + // Skip logging and status updates if it isn't time for them yet. + let elapsed_since_log = instant_now.saturating_duration_since(last_log_time); if elapsed_since_log < LOG_INTERVAL { + tokio::time::sleep(PROGRESS_BAR_INTERVAL).await; continue; } else { - last_log_time = now; + last_log_time = instant_now; } + // TODO: split logging / status updates into their own function. + // Work out the sync progress towards the estimated tip. let sync_progress = f64::from(current_height.0) / f64::from(estimated_height.0); let sync_percent = format!( From e6c3b878727115bf64c9494e60598141e2d47297 Mon Sep 17 00:00:00 2001 From: teor Date: Mon, 3 Jul 2023 06:08:11 +1000 Subject: [PATCH 170/265] Stop panicking on shutdown in the syncer and network init (#7104) --- zebra-network/src/meta_addr/peer_addr.rs | 7 ++++++- zebra-network/src/peer_set/initialize.rs | 26 +++++++++++++++++++++--- zebrad/src/components/sync.rs | 14 +++++++++++-- 3 files changed, 41 insertions(+), 6 deletions(-) diff --git a/zebra-network/src/meta_addr/peer_addr.rs b/zebra-network/src/meta_addr/peer_addr.rs index e7aa6859318..92a27defcca 100644 --- a/zebra-network/src/meta_addr/peer_addr.rs +++ b/zebra-network/src/meta_addr/peer_addr.rs @@ -3,7 +3,7 @@ use std::{ fmt, - net::SocketAddr, + net::{Ipv4Addr, SocketAddr}, ops::{Deref, DerefMut}, str::FromStr, }; @@ -66,6 +66,11 @@ impl DerefMut for PeerSocketAddr { } impl PeerSocketAddr { + /// Returns an unspecified `PeerSocketAddr`, which can't be used for outbound connections. + pub fn unspecified() -> Self { + Self(SocketAddr::new(Ipv4Addr::UNSPECIFIED.into(), 0)) + } + /// Return the underlying [`SocketAddr`], which allows sensitive peer address information to /// be printed and logged. pub fn remove_socket_addr_privacy(&self) -> SocketAddr { diff --git a/zebra-network/src/peer_set/initialize.rs b/zebra-network/src/peer_set/initialize.rs index cdd4807e61b..8b91320fef8 100644 --- a/zebra-network/src/peer_set/initialize.rs +++ b/zebra-network/src/peer_set/initialize.rs @@ -213,7 +213,17 @@ where // Wait for the initial seed peer count let mut active_outbound_connections = initial_peers_join .await - .expect("unexpected panic in spawned initial peers task") + .unwrap_or_else(|e @ JoinError { .. }| { + if e.is_panic() { + panic!("panic in initial peer connections task: {e:?}"); + } else { + info!( + "task error during initial peer connections: {e:?},\ + is Zebra shutting down?" + ); + Err(e.into()) + } + }) .expect("unexpected error connecting to initial peers"); let active_initial_peer_count = active_outbound_connections.update_count(); @@ -353,8 +363,18 @@ where .collect(); while let Some(handshake_result) = handshakes.next().await { - let handshake_result = - handshake_result.expect("unexpected panic in initial peer handshake"); + let handshake_result = handshake_result.unwrap_or_else(|e @ JoinError { .. }| { + if e.is_panic() { + panic!("panic in initial peer connection: {e:?}"); + } else { + info!( + "task error during initial peer connection: {e:?},\ + is Zebra shutting down?" + ); + // Fake the address, it doesn't matter because we're shutting down anyway + Err((PeerSocketAddr::unspecified(), e.into())) + } + }); match handshake_result { Ok(change) => { handshake_success_total += 1; diff --git a/zebrad/src/components/sync.rs b/zebrad/src/components/sync.rs index 2e984b3af25..89ee01fb34b 100644 --- a/zebrad/src/components/sync.rs +++ b/zebrad/src/components/sync.rs @@ -8,7 +8,7 @@ use color_eyre::eyre::{eyre, Report}; use futures::stream::{FuturesUnordered, StreamExt}; use indexmap::IndexSet; use serde::{Deserialize, Serialize}; -use tokio::{sync::watch, time::sleep}; +use tokio::{sync::watch, task::JoinError, time::sleep}; use tower::{ builder::ServiceBuilder, hedge::Hedge, limit::ConcurrencyLimit, retry::Retry, timeout::Timeout, Service, ServiceExt, @@ -668,7 +668,17 @@ where let mut download_set = IndexSet::new(); while let Some(res) = requests.next().await { match res - .expect("panic in spawned obtain tips request") + .unwrap_or_else(|e @ JoinError { .. }| { + if e.is_panic() { + panic!("panic in obtain tips task: {e:?}"); + } else { + info!( + "task error during obtain tips task: {e:?},\ + is Zebra shutting down?" + ); + Err(e.into()) + } + }) .map_err::(|e| eyre!(e)) { Ok(zn::Response::BlockHashes(hashes)) => { From 2add0e50a9db2923c0e4ecba3f3c6f8c55691239 Mon Sep 17 00:00:00 2001 From: teor Date: Mon, 3 Jul 2023 09:08:40 +1000 Subject: [PATCH 171/265] Release Zebra v1.0.1 (#7090) * Update license description in README for MIT-only crates * Draft changelog with trivial issues * Remove trivial issues * Update changelog entries as of commit 2a31972 and PR #7103 * Update mainnet and testnet checkpoints as of 2023-06-30 * chore: Release * Estimate release height for Zebra v1.0.1 Block height 2139118 at 2023-06-30 01:55:38 UTC Release is likely to be 2023-07-01 2139118 + 1152 * 3 = 2142574 Then round up to the nearest 1000. --- CHANGELOG.md | 79 +++++++++++++++++++ Cargo.lock | 24 +++--- README.md | 4 + tower-batch-control/Cargo.toml | 2 +- tower-fallback/Cargo.toml | 2 +- zebra-chain/Cargo.toml | 4 +- zebra-consensus/Cargo.toml | 14 ++-- .../src/checkpoint/main-checkpoints.txt | 62 +++++++++++++++ .../src/checkpoint/test-checkpoints.txt | 62 +++++++++++++++ zebra-network/Cargo.toml | 4 +- zebra-node-services/Cargo.toml | 4 +- zebra-rpc/Cargo.toml | 14 ++-- zebra-script/Cargo.toml | 4 +- zebra-state/Cargo.toml | 6 +- zebra-test/Cargo.toml | 2 +- zebra-utils/Cargo.toml | 8 +- zebrad/Cargo.toml | 16 ++-- zebrad/src/components/sync/end_of_support.rs | 4 +- 18 files changed, 261 insertions(+), 54 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1230bb0de46..572babc2d72 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,85 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org). +## [Zebra 1.0.1](https://github.com/ZcashFoundation/zebra/releases/tag/v1.0.1) - 2023-06-29 + +Zebra's first patch release fixes multiple peer connection security issues and panics. It also significantly reduces Zebra's CPU usage. We recommend that all users upgrade to Zebra 1.0.1 or later. + +As of this release, Zebra requires Rust 1.70 to build. macOS builds are no longer officially supported by the Zebra team. + +If you're running `zebrad` in a terminal, you'll see a new Zebra welcome message. + +Please report bugs to [the Zebra GitHub repository](https://github.com/ZcashFoundation/zebra/issues/new?assignees=&labels=C-bug%2C+S-needs-triage&projects=&template=bug_report.yml&title=) + +### Breaking Changes + +This release has the following breaking changes: +- Zebra limits each IP address to 1 peer connection, to prevent denial of service attacks. This can be changed using the `network.max_connections_per_ip` config. ([#6980](https://github.com/ZcashFoundation/zebra/pull/6980), [#6993](https://github.com/ZcashFoundation/zebra/pull/6993), [#7013](https://github.com/ZcashFoundation/zebra/pull/7013)). + Thank you to @dimxy from komodo for reporting this bug, and the Ziggurat team for demonstrating + its impact on testnet. +- Zebra uses new APIs in Rust 1.70 to prevent concurrency bugs that could cause hangs or panics + ([#7032](https://github.com/ZcashFoundation/zebra/pull/7032)). + +### Support Changes + +These platforms are no longer supported by the Zebra team: +- macOS has been moved from tier 2 to [tier 3 support](https://github.com/ZcashFoundation/zebra/blob/main/book/src/user/supported-platforms.md#tier-3) ([#6965](https://github.com/ZcashFoundation/zebra/pull/6965)). We disabled our regular macOS builds because Rust 1.70 [causes crashes during shutdown on macOS x86_64 (#6812)](https://github.com/ZcashFoundation/zebra/issues/6812). Zebra's state uses database transactions, so it should not be corrupted by the crash. + +### Security + +- Use Arc::into\_inner() to avoid potential hangs or panics ([#7032](https://github.com/ZcashFoundation/zebra/pull/7032)) +- Replace openssl with rustls in tests and experimental features ([#7047](https://github.com/ZcashFoundation/zebra/pull/7047)) + +#### Network Security + +- Fix long delays in accepting inbound handshakes, and delays in async operations throughout Zebra. ([#7103](https://github.com/ZcashFoundation/zebra/pull/7103)). Thank you to the Ziggurat Team for reporting this bug. +- Limit each IP address to 1 peer connection, to prevent denial of service attacks. ([#6980](https://github.com/ZcashFoundation/zebra/pull/6980), [#6993](https://github.com/ZcashFoundation/zebra/pull/6993)) +- Close new peer connections from the same IP and port, rather than replacing the older connection ([#6980](https://github.com/ZcashFoundation/zebra/pull/6980)) +- Reduce inbound service overloads and add a timeout ([#6950](https://github.com/ZcashFoundation/zebra/pull/6950)) +- Stop panicking when handling inbound connection handshakes ([#6984](https://github.com/ZcashFoundation/zebra/pull/6984)) + +### Added + +- Make the maximum number of connections per IP configurable ([#7013](https://github.com/ZcashFoundation/zebra/pull/7013)) +- Make it easier to modify Zebra's config inside the Docker image ([#7045](https://github.com/ZcashFoundation/zebra/pull/7045)) +- Print a Zebra logo and welcome text if stderr is terminal ([#6945](https://github.com/ZcashFoundation/zebra/pull/6945), [#7075](https://github.com/ZcashFoundation/zebra/pull/7075), [#7095](https://github.com/ZcashFoundation/zebra/pull/7095), [#7102](https://github.com/ZcashFoundation/zebra/pull/7102)) + +### Changed + +- Move macOS to tier 3 support ([#6965](https://github.com/ZcashFoundation/zebra/pull/6965)) +- Install from crates.io in the README, rather than a git release tag ([#6977](https://github.com/ZcashFoundation/zebra/pull/6977)) +- Add extra timeout logging to peer TCP connections ([#6969](https://github.com/ZcashFoundation/zebra/pull/6969)) + +### Fixed + +- Stop overwriting custom user configs inside Zebra's Docker image ([#7045](https://github.com/ZcashFoundation/zebra/pull/7045)) +- Stop Zebra using 100% CPU even when idle ([#7103](https://github.com/ZcashFoundation/zebra/pull/7103)), thank you to james_katz for reporting this bug +- Avoid potential hangs in the `tokio` async runtime ([#7094](https://github.com/ZcashFoundation/zebra/pull/7094)) +- Replace or add RPC content type header to support `zcashd` RPC examples ([#6885](https://github.com/ZcashFoundation/zebra/pull/6885)) +- Make `zebra-network` licensing clearer ([#6995](https://github.com/ZcashFoundation/zebra/pull/6995)) + +#### Configuration + +- Ignore error from loading config if running the 'generate' or 'download' commands ([#7014](https://github.com/ZcashFoundation/zebra/pull/7014)) +- Apply force\_color to panic logs ([#6997](https://github.com/ZcashFoundation/zebra/pull/6997)) + +#### Logging & Error Handling + +- Log a zebra-network task cancel on shutdown, rather than panicking ([#7078](https://github.com/ZcashFoundation/zebra/pull/7078)) +- Fix incorrect function spans in some logs ([#6923](https://github.com/ZcashFoundation/zebra/pull/6923), [#6995](https://github.com/ZcashFoundation/zebra/pull/6995)) +- Replace a state validation chain length assertion with a NotReadyToBeCommitted error ([#7072](https://github.com/ZcashFoundation/zebra/pull/7072)) + +#### Experimental Feature Fixes + +- Add an elasticsearch feature to block serialize to fix experimental build failures ([#6709](https://github.com/ZcashFoundation/zebra/pull/6709)) +- Prevent progress bar from panicking by disabling limits that are never reached ([#6940](https://github.com/ZcashFoundation/zebra/pull/6940)) + +### Contributors + +Thank you to everyone who contributed to this release, we couldn't make Zebra without you: +@arya2, @conradoplg, @dconnolly, @dimxy from komodo, james_katz, @oxarbitrage, @teor2345, @upbqdn, and the Ziggurat team. + + ## [Zebra 1.0.0](https://github.com/ZcashFoundation/zebra/releases/tag/v1.0.0) - 2023-06-14 This is our 1.0.0 stable release. diff --git a/Cargo.lock b/Cargo.lock index b5a217dbc65..adf26f67a42 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4737,7 +4737,7 @@ dependencies = [ [[package]] name = "tower-batch-control" -version = "0.2.41-beta.2" +version = "0.2.41-beta.3" dependencies = [ "color-eyre", "ed25519-zebra", @@ -4761,7 +4761,7 @@ dependencies = [ [[package]] name = "tower-fallback" -version = "0.2.41-beta.2" +version = "0.2.41-beta.3" dependencies = [ "futures-core", "pin-project", @@ -5666,7 +5666,7 @@ dependencies = [ [[package]] name = "zebra-chain" -version = "1.0.0-beta.26" +version = "1.0.0-beta.27" dependencies = [ "bitflags 2.3.3", "bitflags-serde-legacy", @@ -5726,7 +5726,7 @@ dependencies = [ [[package]] name = "zebra-consensus" -version = "1.0.0-beta.26" +version = "1.0.0-beta.27" dependencies = [ "bellman", "blake2b_simd", @@ -5771,7 +5771,7 @@ dependencies = [ [[package]] name = "zebra-network" -version = "1.0.0-beta.26" +version = "1.0.0-beta.27" dependencies = [ "bitflags 2.3.3", "byteorder", @@ -5812,7 +5812,7 @@ dependencies = [ [[package]] name = "zebra-node-services" -version = "1.0.0-beta.26" +version = "1.0.0-beta.27" dependencies = [ "color-eyre", "jsonrpc-core", @@ -5824,7 +5824,7 @@ dependencies = [ [[package]] name = "zebra-rpc" -version = "1.0.0-beta.26" +version = "1.0.0-beta.27" dependencies = [ "chrono", "futures", @@ -5856,7 +5856,7 @@ dependencies = [ [[package]] name = "zebra-script" -version = "1.0.0-beta.26" +version = "1.0.0-beta.27" dependencies = [ "displaydoc", "hex", @@ -5869,7 +5869,7 @@ dependencies = [ [[package]] name = "zebra-state" -version = "1.0.0-beta.26" +version = "1.0.0-beta.27" dependencies = [ "bincode", "chrono", @@ -5911,7 +5911,7 @@ dependencies = [ [[package]] name = "zebra-test" -version = "1.0.0-beta.26" +version = "1.0.0-beta.27" dependencies = [ "color-eyre", "futures", @@ -5938,7 +5938,7 @@ dependencies = [ [[package]] name = "zebra-utils" -version = "1.0.0-beta.26" +version = "1.0.0-beta.27" dependencies = [ "color-eyre", "hex", @@ -5959,7 +5959,7 @@ dependencies = [ [[package]] name = "zebrad" -version = "1.0.0" +version = "1.0.1" dependencies = [ "abscissa_core", "atty", diff --git a/README.md b/README.md index af14b07cd4e..e83171a7443 100644 --- a/README.md +++ b/README.md @@ -195,3 +195,7 @@ Zebra is distributed under the terms of both the MIT license and the Apache License (Version 2.0). See [LICENSE-APACHE](LICENSE-APACHE) and [LICENSE-MIT](LICENSE-MIT). + +Some Zebra crates are distributed under the [MIT license only](LICENSE-MIT), +because some of their code was originally from MIT-licensed projects. +See each crate's directory for details. diff --git a/tower-batch-control/Cargo.toml b/tower-batch-control/Cargo.toml index 709d0d77437..05255c0ff56 100644 --- a/tower-batch-control/Cargo.toml +++ b/tower-batch-control/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "tower-batch-control" -version = "0.2.41-beta.2" +version = "0.2.41-beta.3" authors = ["Zcash Foundation ", "Tower Maintainers "] description = "Tower middleware for batch request processing" # # Legal diff --git a/tower-fallback/Cargo.toml b/tower-fallback/Cargo.toml index 1c7551e3cd1..fd241c167b8 100644 --- a/tower-fallback/Cargo.toml +++ b/tower-fallback/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "tower-fallback" -version = "0.2.41-beta.2" +version = "0.2.41-beta.3" authors = ["Zcash Foundation "] description = "A Tower service combinator that sends requests to a first service, then retries processing on a second fallback service if the first service errors." license = "MIT OR Apache-2.0" diff --git a/zebra-chain/Cargo.toml b/zebra-chain/Cargo.toml index dad4cbc55df..67c6a39e24a 100644 --- a/zebra-chain/Cargo.toml +++ b/zebra-chain/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-chain" -version = "1.0.0-beta.26" +version = "1.0.0-beta.27" authors = ["Zcash Foundation "] description = "Core Zcash data structures" license = "MIT OR Apache-2.0" @@ -119,7 +119,7 @@ rand_chacha = { version = "0.3.1", optional = true } tokio = { version = "1.29.0", features = ["tracing"], optional = true } -zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.26", optional = true } +zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.27", optional = true } [dev-dependencies] # Benchmarks diff --git a/zebra-consensus/Cargo.toml b/zebra-consensus/Cargo.toml index f7c12e56aee..2fee6cf466d 100644 --- a/zebra-consensus/Cargo.toml +++ b/zebra-consensus/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-consensus" -version = "1.0.0-beta.26" +version = "1.0.0-beta.27" authors = ["Zcash Foundation "] description = "Implementation of Zcash consensus checks" license = "MIT OR Apache-2.0" @@ -62,13 +62,13 @@ orchard = "0.4.0" zcash_proofs = { version = "0.11.0", features = ["local-prover", "multicore", "download-params"] } -tower-fallback = { path = "../tower-fallback/", version = "0.2.41-beta.2" } -tower-batch-control = { path = "../tower-batch-control/", version = "0.2.41-beta.2" } +tower-fallback = { path = "../tower-fallback/", version = "0.2.41-beta.3" } +tower-batch-control = { path = "../tower-batch-control/", version = "0.2.41-beta.3" } -zebra-script = { path = "../zebra-script", version = "1.0.0-beta.26" } -zebra-state = { path = "../zebra-state", version = "1.0.0-beta.26" } -zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.26" } -zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.26" } +zebra-script = { path = "../zebra-script", version = "1.0.0-beta.27" } +zebra-state = { path = "../zebra-state", version = "1.0.0-beta.27" } +zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.27" } +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.27" } # prod feature progress-bar howudoin = { version = "0.1.2", optional = true } diff --git a/zebra-consensus/src/checkpoint/main-checkpoints.txt b/zebra-consensus/src/checkpoint/main-checkpoints.txt index 79b60a72f81..89a9f940623 100644 --- a/zebra-consensus/src/checkpoint/main-checkpoints.txt +++ b/zebra-consensus/src/checkpoint/main-checkpoints.txt @@ -10889,3 +10889,65 @@ 2117616 0000000000b91c891557df28d4173766562cc455b3b5ab27e83c9a03958bbc14 2118016 00000000013eb4b6e1cd5b9f19ad032670cad97fd4837b1dd7e876358ff8752a 2118416 0000000000a5e8d0c81b4fb1036d94ac7d16d192bd068258d3aa07fe903b8736 +2118816 00000000003fb9615f739bad0dac026ed1c0f7861330737c4b55d292da4d981f +2119216 0000000000b5e61882e7e4d69c75f335c96a6c1744e8d4602e77caa46f9ee187 +2119616 0000000000d8509fe19c295db85f6001031816dc05aff2674b95925b88c8242f +2120016 0000000000dc0337c69742d294267dd2bd434017827151d6ae7965ec19fd3cef +2120416 00000000010b5fb468a019e2b9115aa0e0ec8cb92017195a4ebd4e9b5a47c6be +2120816 0000000001206742e94d9b3cb8187b4c26be13290724ef97848c62f7d01e90bb +2121216 0000000000b2531dd904338ddf602c87ac70ec14e7aca566d297dff7278648ab +2121616 0000000000b5fca10a6ff18c158d38b83369405362f97f5de4c9bf2cfd12b23c +2122016 000000000121e68a6ab027b23fb9a5e73eb38fa6af0fef88931af48dafc71821 +2122416 0000000000ee1cb21aa6d9b957578ef6f3e45b6730ce7c6e22edfa729d3301f9 +2122816 0000000000fcc1ef9a8d0e4b71f55947fd094ac9254ee0f21e2531eec099a538 +2123196 0000000000c38a681500f237539b08b8d3f75d9ab0233e2b5252b76ddc4727d9 +2123582 0000000000c4014be9b89ef009959a45e4fb48c074881c7afe1780b760853127 +2123932 0000000000e4c93f99189deadd479ecabd3660de1041ebb4a0c5ef5d7cbe5e51 +2124280 000000000127f2b6c0c0ab4048b16116e559dc9b9a934fdbd7810e1ae49b5349 +2124602 0000000001bc3445533dfc7baf59b6294ea1d4585ee928ec18c79b6b49f3dabf +2124906 00000000001e2edad0443cb8d4df1da641c3c58f2f83893e361fa37fd121c29d +2125219 0000000001280e8b6a0642a896b7b5337aac71c543cc36b26d2d296ead4af381 +2125509 00000000001d565ed9c555b1b276dccaaa87a3c0bbed390a340c799776e37be0 +2125805 00000000017827e42bf3b99f40c6ee11c8d4e56dabb802ad56e74c766a31ae2c +2126109 00000000014e149e7bbed108446e842a5c25e400423074ca891cd88c16e18bb1 +2126422 00000000005bf996c990b6124d3a93f50dd6a8403104be774a659f0c4a1ee54c +2126703 00000000010b6fb36760137accc7926c3d8c314307816f15f84c63eefdded7a8 +2127017 00000000012e0ba6b5a5f0a4ff5310b931c063098f3e96fc997c7b6fb44a24ff +2127320 000000000116fa60015d9f21754b07269703129fb4af00a7b33f7b0fb5c128bc +2127612 0000000000df367879d160aab3f1c3834462110de823b7c2e1407014b49f5544 +2127899 00000000004731b6685e37ccead8caf23b7c1b229aab4407376a07766ea1871b +2128194 00000000013eeadbf973df717320aa63ec2b3e81f0b19e521c37db25ce2ad630 +2128515 00000000002b17686a2aa58e52433e4a11fddd1172020e1464e91ba54d6bef29 +2128803 00000000017166558e5f022e46f2a476c67c69f9963f48951f421ab37426e3a4 +2129111 000000000136b194b3e7bcacf1a0222a8c7f6d3f739e42fb7db2b9ebcf1b6332 +2129418 0000000000ade562bdb165aa21fbefcc0d2e655e213e5ddf0b2bc5459c0b53c7 +2129741 0000000000408733f90084aad38ffa5a356d9f220e40ad077178d492e96ee696 +2130039 00000000015295051bce1c94530d5c8341f51b7aeabed721c26024088acc033e +2130324 000000000047140460766777a3cc6ce71bccf3d2d1aeff7b74936f21cc9f666f +2130628 000000000010fafc22180689562f6447240af96dc3645a666d88655a15509758 +2130915 0000000000ff00e5f8d3608e0549e680f32cb9eca3fe15eab2f1b43f8f5f1f38 +2131206 00000000007e0b7952afbd83aa3f8bbf5277eb025a8d7c130f750b9a75cdef40 +2131495 000000000060944b74e2badfc81d0043c4d97577450193a72c9f640bb8925b57 +2131813 0000000000eb90d10f092764603621bdc345875631ce08106e7bc5cdbea23902 +2132122 0000000000fe437b14ce7ad15b01d31f85076d84331ac0fefad44dbe81246e48 +2132410 0000000001768b04e8620bfd1de919e9ae09b04c0a962b158b106a33986b9aa8 +2132711 00000000007aadf626595d2e46ecff42d199b02849e7815fb4ab499e902b7923 +2133000 00000000012bd3092c628405bd45bd3c3ddfd9d7f282c5099047ec456a83d4dd +2133300 0000000001affcdb85411d7d2edaae4ece70497edd418b6ac5a6b61471401d69 +2133604 0000000000cbe948c19907c592e4e2704ddb155c1da1cd3a2e6db5ebc8451494 +2133905 000000000157943224c2fc9672f4456dd5babf2fd7404077d85f9136d54fe067 +2134228 0000000000663b7abc789a86bbe3cb0a8fbe5be67c74302e9d6abeda775abd98 +2134528 0000000000449198effd2777d6a2157e94916e317b13eedda1b833c633cbdfb0 +2134835 00000000006ba2705c7eaafcc186ccad35b7f934da0c765e757f76e151137b27 +2135171 00000000010460ae8510ece826a72703f77ff0e7a463e33378c22e136f8152ea +2135462 0000000001195d894fd61b555ace3d99a6c1c124d985880279d0435263941135 +2135769 000000000054b8e03388a56b73a9652f3ff23e87ade79993c76cf6d65397e353 +2136070 0000000000d350786b28b662a544fd929f02dd778b46bf73c0944bc6b0b39e2a +2136396 00000000012230ee900503937205d1b6a6899128801a75b4b1d584f3c13e2fd4 +2136700 00000000002ae376a9bf93e1909594728aebda019466440037e75d3083e0b7e7 +2137028 00000000006023df4efc2a79131181cd46109b4bd788256ad10662edabbad5d1 +2137357 000000000057627e27490f20ff6290004762a698d7d69f9d818c2df2777d9282 +2137686 0000000000f52577e5e8392873b1206ccce3d4ea25360d297d3c3476dbd982de +2138018 000000000006e84370babab79c13faa64113afb4386a92375983d3a7987619ca +2138392 00000000010a100e0d11eabd1692eac1cb93989d2cd03b355e5b0240f77cf978 +2138792 00000000001c6417d7df1be185d6b0ec0657703eebb22e68a418a565da99dbad diff --git a/zebra-consensus/src/checkpoint/test-checkpoints.txt b/zebra-consensus/src/checkpoint/test-checkpoints.txt index e26e49c6347..f6fe84e09e1 100644 --- a/zebra-consensus/src/checkpoint/test-checkpoints.txt +++ b/zebra-consensus/src/checkpoint/test-checkpoints.txt @@ -5948,3 +5948,65 @@ 2378800 00192766442c4ecade78c79653645c5fb06cc99e26a900a5b1139dfbf470d6d0 2379200 004391bd427bde706e2754f5a5f84024536b8d27d763c37b5ecb9261bef359b9 2379600 0000a22e46cc27732a84126a2de66dfbe0bd31a8fcfbd314b773cdbb0624ab99 +2380000 0001cc2c2db186634b6820310aa9f7c5a1779e8027b446c966de2d4df1db119c +2380400 00138d1e655c247f4835b23bd67637a54f325074f48a6d5d8cfd198af1dd389e +2380800 0018c3e56d80300f2c933e7d605d7328706479fbbd426985669b67986aeaf241 +2381200 001eb8a8a29d3a665d7c9dd63e055950ba5f62e8cf9fee85dcaae129f72438c3 +2381600 00169effb224e50d189a3c80e1c8e20ae9ce89dec5d231f3cb4d9ad2ef188cad +2382000 003ef4a716a195588c7946a6a5fcfdac029f4674740d5aa71d7ad5c33530ca24 +2382400 0005cc2b3dead6906263c3f61f16e03b9e9418046a3bd1f44b1057e136a45513 +2382800 00462a258adde935bb9ddb900b595d340938c0a05155c6690bd0a2959b1115d1 +2383200 00187156e8329cc8f0374c7278c53a05dcc6b9fca8320c1a11eca1ea77fca05b +2383600 000b74dac7fe5f75926959a06d00a02f8bb8025766a1f484baa055dfa18d66ac +2384000 000df197d3dc51cae497b2853c6ce5ac8221f0e8fe20d6287a7d8a4cdfa6a9d9 +2384400 000af5b611047bfd10b5fdca61ff6d70a54fc0b94088f05e070877e400d7a551 +2384800 000e34fc2f2f29a8d32b04e6c8527608ca51ed3ab764d5e413ac14d86e0cc0b1 +2385200 001dd51a0f99105c819b165aa744867a05f706daf75b43fed397a61034ca150d +2385600 002373147ea295799c286bbcea88dcac18493dd7bc88c6e8afc1d649d07be0ec +2386000 000760d50b320d2143a522402f640f06774564e39b88abfe2638c4a3c5ee85c0 +2386400 000687e79efad695c376389d7b067a74cbcf7ff01d1503f40d13db6fbcc0f044 +2386800 001dba9917f4f3f03bd2113bdfb7606b374f583b26e1274234dfb014645542e1 +2387200 000f0e7482a8f65f5a7e8cc3bf9d3bc0b352c10f309a8e3048870e4a0d3f32a2 +2387600 001a75b87be98b8fc41bec67db3d5ca41c1cc73e86ad292d5498dafc8409f325 +2388000 000681e3c3dd26646a307943bb5e46abff6422681bfeb4e059ccce1d56a68b69 +2388400 001954d674bb1468c290ce6e41c9e30499c887b609a9feb5a13907744650ce2a +2388800 0026a02c112c93a3239d05f8c02f268bb9965ff149993f33ca7c821893fdd590 +2389200 00125d3b01118461734ea74ae3a0f0fe471cc2a86687cb767717f99ec702fde9 +2389600 0005876465b0416be5f26b2c3d0f89eb2575fbfb4a2739d16983f151f00e2bfb +2390000 00165f0dd4864f342c30158a6d7ecaad6eae3617388783346926fb477f69f3fe +2390400 000a864fe4954ac591be34451389367d06d70bd1cce51f35d21b4b199b55087c +2390800 0012a95faa1c1ecbc9328337827c3dd1b317b240bea483bd59bdd2f9fedf0b03 +2391200 0015984bead0ee986c0e55621f68c8c0b4b5cc0482ee4469b3179e5dfe85f5ca +2391600 006903d70ac042330094b2571789c62af43230467c8cf7982d0d76ffe6121d3e +2392000 00161e72c469aa7f8fad67138af97f4dee0d1efbcb7cdaa76979e065c908d8c3 +2392400 00016dfe99fbbc22672d52cf62fadcbdb451c1c8280dd829ff6f930833d8a905 +2392800 000690e3c90bfb4eb82b2dcdd0353d274a60a9cad309b0fec3e7458e7b65506b +2393200 000cc40350d6872b86abe8ce159ca0d8a5f275713b4d1c0d6d6db5f3f35d1d2f +2393600 000aae4b2b7d546f42fb3b73963cfd957324245ba470eebb2debf96a1e212450 +2394000 0072ce0c882d219ee14105040dac351f6b73b4735aac0ee699b0cbac8800307d +2394400 001913cae8692b3c6b645d86d0be6f43882bc11c0568017abfeee2febbc1e58c +2394800 001cffe7c7602e64a9cf6da7fa7b21ab475975b6aac94a2b4c9d4f6ac9771582 +2395200 00179a14baa1629fb4e937cdf6280375ae16312679ca27e23b236689423c0cac +2395600 0020b3e0e96d419ea0bbe1b45c1ab21440377a973d6b063368f47bf5dc4095a7 +2396000 0048d7587504d873ff5f25a419c258984acf384a3c3b7e3c29b39905f65fc610 +2396400 000296911fcca5522ecb2a00d5abb7718afc20c984c67446a1ac1255b3adbc49 +2396800 0033e251f1671667f60576fdc4736a3540a6cd6863646833b64b90294fcfa793 +2397200 0025444202d8824a1fce2f56594b5b215b41b43dab116526aa54f6fac328ec32 +2397600 0018efea056bd7e711ff110536e1fd06d72c3bcb59808ec78ecd6a8bc6f809e9 +2398000 001a8d6641c623e2217d8c8cd0ac1a4f431a096e4d4c234a23a75248e2934a98 +2398400 004e7e1176eb92c65814b208401b258149ebdbd4fc8e70356ce133ee20846412 +2398800 00104ca6428e88d1789492a302503471b8f81ec1d74dd4484621fcd836971923 +2399200 001a192b3b9c798ec182f1385e56245d8afb55698fe6e8f9ac3cbbe7c6a18a28 +2399600 0034e7c97a1a7755e9e83a69df53a7722ca3eeb7c827ca0212cff32a6ab7d224 +2400000 0014f90b82658ecce4b29d8c4e4b755324f5b75fb85b96421074c3bae652ce1c +2400400 008495070e1d0de594ecb8c74c0d79bc07c558661fe20f9492c60adff42983e7 +2400800 0009959bf6deb71a7198df94c2e607d23b887934dc0e3bd031c19b56f7df427b +2401200 0020158fed99b7bbe09e0470edc0632d05e7559cb31767f883ae8ee9dd3c3fa8 +2401600 00177c5621ac7a61674f66aa8e9015745c4b50f4c90b6b410ee9939dc7c2f136 +2402000 00018340c3a99b95f5de82c248ac47735cad3a0fbdc6006caa094db0673b38f0 +2402400 0018c3aa759949a1280b4f865c639609e82a9a1e27332264ca6f87a9c531634a +2402800 0009715feac14163c78a68f477608f0a6424efd02ffe936d7e9731241ee67078 +2403200 00058881db360bdf0245b5be0cd6bd85ef1650388a90eaa4a2f8644221b3459e +2403600 0067d8e71caef5f552904c15d8560500b810c2cce0928b630e8ac6382055ef89 +2404000 000a309d9d8ec47ed3be870aea4a03dd75822e88790229bc9bee09de86e0ebd0 +2404400 001d29fbe998d3039b6967f944f1b50e10ce70421d60d0ed87f39cf7d5a8e350 diff --git a/zebra-network/Cargo.toml b/zebra-network/Cargo.toml index 1df18e413ae..5430350b6b6 100644 --- a/zebra-network/Cargo.toml +++ b/zebra-network/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-network" -version = "1.0.0-beta.26" +version = "1.0.0-beta.27" authors = ["Zcash Foundation ", "Tower Maintainers "] description = "Networking code for Zebra" # # Legal @@ -83,7 +83,7 @@ howudoin = { version = "0.1.2", optional = true } proptest = { version = "1.2.0", optional = true } proptest-derive = { version = "0.3.0", optional = true } -zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.26" } +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.27" } [dev-dependencies] proptest = "1.2.0" diff --git a/zebra-node-services/Cargo.toml b/zebra-node-services/Cargo.toml index 11db2015541..76b2a841cb4 100644 --- a/zebra-node-services/Cargo.toml +++ b/zebra-node-services/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-node-services" -version = "1.0.0-beta.26" +version = "1.0.0-beta.27" authors = ["Zcash Foundation "] description = "The interfaces of some Zebra node services" license = "MIT OR Apache-2.0" @@ -35,7 +35,7 @@ rpc-client = [ ] [dependencies] -zebra-chain = { path = "../zebra-chain" , version = "1.0.0-beta.26"} +zebra-chain = { path = "../zebra-chain" , version = "1.0.0-beta.27" } # Optional dependencies diff --git a/zebra-rpc/Cargo.toml b/zebra-rpc/Cargo.toml index f21d7b6638d..53026d5828b 100644 --- a/zebra-rpc/Cargo.toml +++ b/zebra-rpc/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-rpc" -version = "1.0.0-beta.26" +version = "1.0.0-beta.27" authors = ["Zcash Foundation "] description = "A Zebra JSON Remote Procedure Call (JSON-RPC) interface" license = "MIT OR Apache-2.0" @@ -70,12 +70,12 @@ zcash_address = { version = "0.2.1", optional = true } # Test-only feature proptest-impl proptest = { version = "1.2.0", optional = true } -zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.26", features = ["json-conversion"] } -zebra-consensus = { path = "../zebra-consensus", version = "1.0.0-beta.26" } -zebra-network = { path = "../zebra-network", version = "1.0.0-beta.26" } -zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.26" } -zebra-script = { path = "../zebra-script", version = "1.0.0-beta.26" } -zebra-state = { path = "../zebra-state", version = "1.0.0-beta.26" } +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.27", features = ["json-conversion"] } +zebra-consensus = { path = "../zebra-consensus", version = "1.0.0-beta.27" } +zebra-network = { path = "../zebra-network", version = "1.0.0-beta.27" } +zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.27" } +zebra-script = { path = "../zebra-script", version = "1.0.0-beta.27" } +zebra-state = { path = "../zebra-state", version = "1.0.0-beta.27" } [dev-dependencies] insta = { version = "1.30.0", features = ["redactions", "json", "ron"] } diff --git a/zebra-script/Cargo.toml b/zebra-script/Cargo.toml index 697dd1a0d11..d1399b0d7bf 100644 --- a/zebra-script/Cargo.toml +++ b/zebra-script/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-script" -version = "1.0.0-beta.26" +version = "1.0.0-beta.27" authors = ["Zcash Foundation "] description = "Zebra script verification wrapping zcashd's zcash_script library" license = "MIT OR Apache-2.0" @@ -17,7 +17,7 @@ categories = ["api-bindings", "cryptography::cryptocurrencies"] [dependencies] zcash_script = "0.1.12" -zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.26" } +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.27" } thiserror = "1.0.40" displaydoc = "0.2.4" diff --git a/zebra-state/Cargo.toml b/zebra-state/Cargo.toml index d1d48717964..b55f0b61c2f 100644 --- a/zebra-state/Cargo.toml +++ b/zebra-state/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-state" -version = "1.0.0-beta.26" +version = "1.0.0-beta.27" authors = ["Zcash Foundation "] description = "State contextual verification and storage code for Zebra" license = "MIT OR Apache-2.0" @@ -71,13 +71,13 @@ tracing = "0.1.37" elasticsearch = { version = "8.5.0-alpha.1", default-features = false, features = ["rustls-tls"], optional = true } serde_json = { version = "1.0.99", package = "serde_json", optional = true } -zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.26" } +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.27" } # prod feature progress-bar howudoin = { version = "0.1.2", optional = true } # test feature proptest-impl -zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.26", optional = true } +zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.27", optional = true } proptest = { version = "1.2.0", optional = true } proptest-derive = { version = "0.3.0", optional = true } diff --git a/zebra-test/Cargo.toml b/zebra-test/Cargo.toml index 57ea54f3527..0f17ca58503 100644 --- a/zebra-test/Cargo.toml +++ b/zebra-test/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-test" -version = "1.0.0-beta.26" +version = "1.0.0-beta.27" authors = ["Zcash Foundation "] description = "Test harnesses and test vectors for Zebra" license = "MIT OR Apache-2.0" diff --git a/zebra-utils/Cargo.toml b/zebra-utils/Cargo.toml index a9b32576751..eeec69f4e8d 100644 --- a/zebra-utils/Cargo.toml +++ b/zebra-utils/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-utils" -version = "1.0.0-beta.26" +version = "1.0.0-beta.27" authors = ["Zcash Foundation "] description = "Developer tools for Zebra maintenance and testing" license = "MIT OR Apache-2.0" @@ -74,11 +74,11 @@ tracing-error = "0.2.0" tracing-subscriber = "0.3.17" thiserror = "1.0.40" -zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.26" } -zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.26" } +zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.27" } +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.27" } # These crates are needed for the block-template-to-proposal binary -zebra-rpc = { path = "../zebra-rpc", version = "1.0.0-beta.26", optional = true } +zebra-rpc = { path = "../zebra-rpc", version = "1.0.0-beta.27", optional = true } # These crates are needed for the zebra-checkpoints binary itertools = { version = "0.11.0", optional = true } diff --git a/zebrad/Cargo.toml b/zebrad/Cargo.toml index 4988bafdf06..a7b95ab6167 100644 --- a/zebrad/Cargo.toml +++ b/zebrad/Cargo.toml @@ -1,7 +1,7 @@ [package] # Crate metadata name = "zebrad" -version = "1.0.0" +version = "1.0.1" authors = ["Zcash Foundation "] description = "The Zcash Foundation's independent, consensus-compatible implementation of a Zcash node" license = "MIT OR Apache-2.0" @@ -128,14 +128,14 @@ test_sync_past_mandatory_checkpoint_mainnet = [] test_sync_past_mandatory_checkpoint_testnet = [] [dependencies] -zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.26" } -zebra-consensus = { path = "../zebra-consensus", version = "1.0.0-beta.26" } -zebra-network = { path = "../zebra-network", version = "1.0.0-beta.26" } -zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.26" } -zebra-rpc = { path = "../zebra-rpc", version = "1.0.0-beta.26" } -zebra-state = { path = "../zebra-state", version = "1.0.0-beta.26" } +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.27" } +zebra-consensus = { path = "../zebra-consensus", version = "1.0.0-beta.27" } +zebra-network = { path = "../zebra-network", version = "1.0.0-beta.27" } +zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.27" } +zebra-rpc = { path = "../zebra-rpc", version = "1.0.0-beta.27" } +zebra-state = { path = "../zebra-state", version = "1.0.0-beta.27" } # Required for crates.io publishing, but it's only used in tests -zebra-utils = { path = "../zebra-utils", version = "1.0.0-beta.26", optional = true } +zebra-utils = { path = "../zebra-utils", version = "1.0.0-beta.27", optional = true } abscissa_core = "0.7.0" clap = { version = "4.3.8", features = ["cargo"] } diff --git a/zebrad/src/components/sync/end_of_support.rs b/zebrad/src/components/sync/end_of_support.rs index 9e4cb2aedeb..ead7b34e03b 100644 --- a/zebrad/src/components/sync/end_of_support.rs +++ b/zebrad/src/components/sync/end_of_support.rs @@ -12,8 +12,8 @@ use zebra_chain::{ use crate::application::release_version; -/// The estimated height that this release started to run. -pub const ESTIMATED_RELEASE_HEIGHT: u32 = 2_121_200; +/// The estimated height that this release will be published. +pub const ESTIMATED_RELEASE_HEIGHT: u32 = 2_143_000; /// The maximum number of days after `ESTIMATED_RELEASE_HEIGHT` where a Zebra server will run /// without halting. From 55d58edb8f8c4d0f42e7dd6b0122f0f2fb43d3aa Mon Sep 17 00:00:00 2001 From: Alfredo Garcia Date: Mon, 3 Jul 2023 02:38:56 -0300 Subject: [PATCH 172/265] fix(docs): Add a few more details to metrics book page (#7096) * Add some more info to metrics.md * add image --- book/src/user/grafana.png | Bin 0 -> 95059 bytes book/src/user/metrics.md | 25 +++++++++++++++++++------ 2 files changed, 19 insertions(+), 6 deletions(-) create mode 100644 book/src/user/grafana.png diff --git a/book/src/user/grafana.png b/book/src/user/grafana.png new file mode 100644 index 0000000000000000000000000000000000000000..fa4e20a954d272126b30421310868086ffeccd16 GIT binary patch literal 95059 zcmb@t1yoeu-!6{&RTLDYrIipQr5hB7ZiY_j2I&SBks(K#A*Fk0Mi^2WhVJg}?%^K4 z{@(xn-?i>~*Sc%Hd$ATXXZG3qoE@Kdo@WOuD@r}XBE!PKz<4Gj4N}Fx_$LDcgI8_SQK8Ybs%FEIvMR6{)1PeVmvdM7_Y69 z99KD^RO@|qI0X%py)tK)LFZ|^F1|$n!>51SP1!U^JkP1RO5#1oTo~nLJ{{HGrRrMo z0C7vlyJ8=o|Cu%5Jy>Vuj0Jvb_7kN2;Cg;?8I1PITZI*mHp|Un`Pb(b+vQeS zudXfg>pGSmVth7O-4ku??rv{tQdZXb;VT5DW7^$?08Ra5t%f%kjYHkWRE{~&)3wYn z{I;8{_RQ>~I>FUhbD9jvheUjg{johC_B(g4%5t10-ILy2vR&ET#}FQVdiso+iCIvf zX{y`2Ekr#aML$^LXwBS)JZUhqT}OL*rXgi=W@g57acqniob%7+)s;66slBPGo~l!B z&E=E6aWmtgyHU-#FeQjDeqazP*~WIpkdeg)Kd?nCdc1Rxh-eiTO?A3GzTe9?7Y^M& zV7N@j%u7f(DakMPoOyR|;c~Ektj?L}=v0wRRu#VtvY{Ao6lkh#S5ZD2V3QQ!(Jyzws6W!{ba_cap`RPW~k$zYBk5diwh+4$CW4TWH+wEaF4e;d^4@2RTY< zu|J->i!w!J<_mP&$akV@u0OcHiS1uq7gyZdoyxuVwX?@-I`A_70p_`A71?5XQL*jJ zOwfxLE@u*+3JR{)-M!-P-)mw~2spWzj}=gCNF8^@;cF?Nu+qo z^YY_XVMzlih46LrRBtclCk4;lM{DaOZ@tfMc$aN37N`rSx$KDf-e~DqgSt^C zfykJJI74m^5!M6W2cy&N?cZ;&-Sg)Jg^*zs96FjxjTsp%yk}7bTv@9a=#4>>8oi44 z_V&t3(;Up4V41sz$MFVrgsUQ6dg(s_Yxev4bbDRMD5|QeSUg=?NhvrSnWbFm!>g@3 zeWCbxV05_NIhUXFs1oFK-aPS02pRE0Ku1Zbq&#EEX-DGA&!4k8I%%ACQ>c)T@{0>C zf@ri{eBH@%jst8r;8D#}a$e*8IZDvUR-r>%yTJ(s=SHo}OtGiiX>z-+4+J2SWrtZIv1i zi{i(k5mfaN>$tXj%-os^kL!XGG6r|{#1I*`9F_l^B^Zg5WDePmsrpvt2}~9<)wnzz z+isqecKXjILvYVjcTOhS>dhOO<8^kJfU~`qSI9f%fr*K{snvyrm2IInMM<-L18?UR)p6Ubl9nMDSaE{K)U?CG9Dpt*FS0mtR;oJvBisSZBx-o0p&8 zn=F9H$r0qXMSOqM(a-*|?)!Wowgv3Bi17E@Q_Wv`uhLei`TcHM!MPELC#Wu~fr}@8 z7X6ZtHX3r9X;Ln8O8!zM7B)L@Pv$JQCJ8ipQ7Tr}tP|xKO5AWTZkXGbEU>w{O8NY) z>vdWqSI1LI!KI_sjl5%jSi%WzB5y^0{u`cC^KEfsW44j8v8QA!P6tf}HR5xgsDqed z&;1YdgLCyl8g-RhQ&k=`WE{pL)w{ZJECxArSBpU<)WU5c4ztShKN1t=L7tuh(|0#% zs*`;WRCRP@k@K=Ht)rtYy}c&qd%I4TA0)f(H;GA|4+&e)I<4VFeucOeR z$*zYGmPjCM1yp~^=VduaRy;c1xRWdug@E@^?-MaG4(d4Ha@+2Bj0}8Wm@dj=oUd5# zH)@eoOn%X{_{quM!Z4!V0^ma&VPqCGDjJ;VG$;!!eY4X_^Rqma|pcKIbG zv^4o@s&s@2Nw^<}go#64UtLigKex59DJje(A+DBUiziS4waPdyw}NtE7;&XCdM{kEnWXnHDkyaz4rWUfD=;XCZ6;J=c$JdjTl(a9>;EvBFZ03Eo_c9|f zajV|jU2R;AK*i0?TtR`SqF%q&%L}FSFyNBaL)jTDL_o-DSUof_Agzm}#tSFqf?p^O zetB>W0lv?NZNAFRrsst1M3y|nxJtFp@~oh=s*jK2-Y<^$IW}}TJ^4)LrfAy8R zi(c-*s%~ocbBinC$k>5uXCMv`<^yG@x+7ju?p9TozsmUD-e(TYD=x12v}o&cA|NNn zToO6wyv8zvaF8-Fp@rJ;TB$DlIwXL#j7)BMUJ9_5j81+Cig>%*&-0dhOtyrN&JwW$9Dys5PGmmBzQJCcX7D*7uzqEj+}M=z-RYr%zN~ zB0Zf+JiV`5+YP*SZ0>&+%_y`=b@snF@@6E1>K|)3~Z^$ z5=V|>2qr=AH3KH1aA!w8t&Bi$D-JL|c{$a!%-9=mVrA1yOY@{ljoWO-G^!^NT)`U` z!7YF@FEB|6B>ts0w=;?DflXFzv5&)$`(D(9A&<9RFQTunPp{l#@_g5IitsUZRb{2HNDu=(y&ymTL&Yao zm+^P_DqwIl40bt-tV+r9admYC(M8H`ZExwE)7#tH=9QF)dF>3y^%&w|J$zC41S1UY zmv`eW&gHma5Ryx0{A{g&p0EPKxQK%%B{~rkt8#{b{>ngtCt1psuxpDQxiWjCFO&aRo7$%9_oz4etjRTs){F9G3{|Ght2fa{PtEB^pgO_K!*!YuTXL}Z+8Ps0+eNnuAh;+ zw1^_ZBN{D76Sbk)@-D?xc09gJeS~vl9C$*fm z783Z}l9Q7ITNE2x&i}5fBZ88N313ZJy$0#2gR?L`S4_!mSy}^=Kw25Rf1g^ZDCj9W z5X;O#fz#3N;MA2Oq_1?0Pe5@0evXE^W=I>2uI}~w2be+McQ>y@YP=B|G?5o!r}g;$ zlfQjF7ZytSe$XFRVxW2Opk*-Amy=UfLH-JQMCL4TwiM7IosDl|LeO*XrrgQV#Kc63 z1K5Rq38Ku0SEDyKGZ6;v%)A>AX+2pGvw_a2r<5710_$jVacpd;`|i}3xrw=kq`AhU z-`PtJW3+g4m3-Pv zIV_@PFPNW}s}NZQ{(_<>Q6$icpl%uPd(b_2$Y&P~Q(|QF(LIa>?XiJ@QuqA`88kQt zvscZoD8Cq(a5cb)Sx(8*$ja1A?$wN!7y?V??9=(lA?&413=!6mQkrP<)4ZPUhU|6? zauMsIhKBbF3a68ukPMo8cxGm1FUZIgu@hxLlany{^FGWsgdv$yIPwQ6l^>f`v&B8P zHKwu?u=N}BfUtFIYs;iP%z~JK-|a>jkyE6`6pLq}cy+DhdDs>o`1N<|_7gTXdwsDK zi0ZKY*2y-)rAzMFvuBzm`Z98Ieg_G_K<*TwNSiCz4I2`oqDE9uTU%t3?XOWc*Oc|& zshV%teu>$7x|IDonW^E@I;6nx!tSyC?21r)VGFlPifj$F-rT9aMz#TUZ{u zSEwI@-r&wF8+s=kYenA2Xo@zqvr3t*%E1^-W-d7N*xUQ?HK+Hfm}^m{d@!&f6e zZ)Izg8LAy&eOpfzjP=u5G2#JsXz4eOCNXKmcyAA*imtcT@`?AUmyRsWm3_KT6!I!Z zTCqJP&K`C()s0~2GB~p8Iz-nMMXI18}D>f!wnHfOf2 zKwkXxD#+BUx!D;D>)7eo{!eaLdB!4zB_rTm8&S#nD?BTc(wWMFaHgzOC&wRjfGz=A0H2`8iur z_BZcr`XFMRI4)h)+Z`r;)*PCM$!@iE4z8WxCK#M?40mql!B@Fq4WAM$ip2yT1bDtm z5#vMztGJDB>n#qf%C(4@lnmrT{~NkUt*p&D!87V`=yb+D;Hk7@bPkWl10jbgHJhB6~n zdZp{#RhF`Hsl#2Ln~nBq`%y*)1Nsy-Gl`9f>vnr0MlG@#lKSR@NRW`gGpuLR#|a%S zDhFYctE*;Zj`378r(@*%ltPYL0FrQ{L$$KD)qL|6S>u(JLoXK0G^MuA$MMl{1HR0F zbA7E~P_yJJq#IOslgyyc4)`ePl1BXnp>?y<(T+wkxEeRYJ1h>q8HQUzPj&s4ouhIa znaath=|2A?y1N^6_2@f@jyN7aGA<`B|BXX)9eq0HnQ)3cv}cty0RaMoLoYk$YP<`- zG%TVP1hCgw7mtep7nUkcr^m+rsoMpZPZ1G%9+ANOh@RD&nqN?0P&fGBE}^=D+84H# zsQAsUs>Kj$8XCei%BLqMIS(HlUyd#2M$C1iKJqq{x;%v1e zF|l&XL!F<0n!lJc93bw~uIM4?%4%?tp~Zq9Dgh)=7lZ}Q&g9~t>$CkBS$cVBuS#aS zaG=*cj4NUAv}7y`+(>#R8Ui%J)9#KBQCGs~bvR)@4@8#`KA zPNWW$il~tE;Ya4=jD7W#146;*h~{_mAAyl;HP&x*jP2p->FOm8l|VR`J2_R<2v_`K z<^W<5VCakzPg9fg!8znFUVNvTaK1vxq?ZnaIP|U_ef#};*g_GwX!7yn3t-3yp8X83 z+Ak(w7TIgw#W_>#G4yGhXwB+%)7PJ!MsO)fP*|XAp8M_%ZpBK|#m2@SnO|n6*b98V zLnlHne72>8>fK#4dG=d!M`H`2`2__Z$*3Ow{+*Q++<05(d-E%JkR;Pfi0$)rLQWku zpT8VvzX<)agZ#upk4ya+84+QBaH8EjY=Jizwpo$UCv|nH@H%5;BuPYz7Nbe*8&f>N zdIw@M;eA$z3b|>PBSHqI{boDsdBZff%AVN=E{g^m<;`8U+Gt@ZGBWZ65yJ0re*8uA_$pK}yNOzisCm*Eb?k{Qv1suroJzTyC$L zN1$)Ey7_{I{+(UKoIBNu)4Ba_89`74 zg-Da__`rb2ehO}f2Q+$2<~Poci0jp_0#j_~(DQj+2QxFU@{jZDu{5rI9#3*o(h{## z*8W#5t4C`IJ%iKenW2dZX;5NfVkmJP049KZMQ%|=x{Y^-&PGO}c*WP_4A~j@m_kyT zno#u;cIG$r1l)FOJ3lTTJ^IcOpD8D|pJxrEUp}s@x+WcAno&pHu|19UlXTHMXCuYr zTLLpz zZjguRUeG`-H1zc;J@;IYMlOIuf)^LpBMXCo4=E`DDaG}bieB`h8ceY)ItNQa4*oBR zLywM}vQ%Ycb)5YC_GtTime#fo{kO%22M39Q1G`R7QGs ziHWJUe$?Qu+-^j2vTa5jm05WBv)>uNM=A-wlzwWy$yO2^F3c~+BPQb96$6k;U_v-d z9u5zI`waP~Cngp)7U&3xdAUVb@;4nlpl32lN}HxJLCRVQsi{jMQ|pn#!aetVsMDSZ z!?nR+u#Q(Z5D?2|M?sP?pPfJzt@6zYuIvxw$-qLH5+vyD&WkDu&9pO|w=F7N-c!OUr<+irBnVFe|gPzi6Bz5tzwduX25uNt}Y%PFi z6A@ttTS7VpWYAy4?yd}hT+-@GGpO?89DYu00_2gFRs^+JWCu^QEPdA-8O@S5cb7$| zn~C?p@oY;k3Uzbb-++uuIJnYf_PgEf5U3zQC5XyZ@d2Rmx}opQl`cDgxZYgcIshR* zn~oqe5NymM#6IG@0BmxVrsn=k)oB%9d|KnDvuJ;RBa7{W0onTc4%URmmFDz%{TN5) z-u9^oh`juj|5?{Kkf!*qR*^JJUZicle^pT8CVF==4LIz04YsQ*WWV{N{=-k_wly?& zO+eb=P+xI$5Lv2TQfFpoSIFdcS+426e|rq{W+)eScQd@7AdmvUHFuK-;o|99$7u39 z^ok+xnA?8iIA@XlWoXG$a>1p-5;6Unt-vMY#cP{ATtt$v)z+l5MO?AgOEv<9BV4_@ zPm}xpH#bvBJ)t5004`H4sJ{G20hl(#yzc2<;y4P0TJP1Ks6a&YBUBj)@20PM7HQM> z_EG~L;h0xnD;^*a?!s<61ADX6K$^7HuUiC%r$#5$JD+RNOaY(_fT;yDvWtyRO+^T_ zO-%v2Z*~Ut6~cnTWQ^@z5|XgCwsu};0WvD@vt9mAmW@{a_5&jy)pvm&-dOSZU*G0V zX#`|>3Lh_!R(Di`g{nr_X%de-Vrl`dnFQ9gf z>gpQuX}lE*Uy$c$SagyvD+jtUslmuenC6IVlQQl3^?IKeXlaOa*h0h!^P_)bm!bb* zOGY3&kkc9AwK25bx)1Zovz^Z?%x7h1mqKoCb6IpQmc|Wl7WVlr2d&F0TrV0=(`2%T z2pFIvGTC5u7lO9!>jhxJ$_lTOS6zNG+D7 z<6_Omn)X4Xg=ol^uD9S}u%K9&z`|fHEg30ke-cu9jzx^rk6ZMWTkEHi#g;lqUSqJ; zKqU-7263rydb|4`AK(JpdV6c@ENG90=F88I{u^-K;`{hrvzR^OqoedRnk0x(hhpFT z6#rgW)jLhil3D5R&nP34+=Z4(J^>&_8@A!jxDwyZNLkd7Py@kDc=)v2KlA`Nt_wK1 z18|4Kx~X^g3DpL7UUUyjXTQl&Q%@O*QksAasJ;F5@j7c9Pq$*KU|d`(ryJ^`%%mOI z@A}Ai*6+CFHCMen1&VEYZ2P0J4o|6h%XI@Q4~vwV#X)k!?E99b$^#<{F(XNsxVy9& zLqk!!)BgTemxT;1P)=>vi+mFR*CbGxiS5BTix_v%qs;)2d5iwUl3^t&9flMx?`B{l zInkpzolEOeT0n+(L5lbB$f~^oLyWz~ZiF^q$+PIs z)B({tE9>s}?_O(e5{A~BU@~FfvAJ8JB?9B%jfKVA55nII*uL3x_xy_0P*s%&Q9E3e z=I2+;y@Y<+238cz>Ssu3Xn6$}E9)gpn-uUXRC}q@?l&d1wIL96h;Y@>>UeRnowKt8 zA0HoqN<|&)vYUaOJzho;@cXAOr>fv+;OG4qLcj$P5w5%;#Par*(x;rA^{(z&b)pf( z^E;{1oaf}_HF`v9G2d6`%3SX%3{0%Ed9N6WYffg`^BO%WPqwd+ZszWxAutQ;r3@c| z(36XszzQ-liZxL^rP{Scj+o5m$g0|Kr<%H&8rUVmdigZVrp8kM5;Ntv4?!zP8nq6HztQwfYAx<8J#lEOk`s;?TYPSQ zeidMPbE*cK=iZ2j1a>_audJk?NQP{jz@mfs;0?b{%-G`3>Eg5&96do<*6`#FpVQykzj^lg}E^L|WdT}res?VCsl z3+BP>>~~PA2y1+gjkV3rE=5gF9cEgjsX{pzj{%bR^KE~Z)$=rml-QI>l^3q3u5E4N zG)w>wiLkkNTT&~yIk=3u>f{OVZc+iui+iNb0=2W)`&+YgG#NnPqeT+G(t#k2Jk z`ug=qcijfWz`#HPsc=W%*xZcI=ggKSuE<=7biNvuDE?JfqgbQgvX0YHIAE*Ptqiv3 z!wa@#?d7D7cd$-Wt(l$K2asF!8Nj-s9A4t7tHp%&fU2v{B-+HYRW~^&Mtfa5 zKi`Y1<~;+oE`CbjE~(2{Z79=X5uF!Ajw;>lCIhq=`Pyl4wA9$`wuvUmIS?oro3LXc z-}YM|?Df@8Zw-NDy4G*%7SdVS_$H|s{d zl@-0&sMj>`TTU6Jem)owOcA{-kpcw?kz+Jf6uno4dx`8H-=_(=68Q#^&J51h?+^~C zLc5a#nR^&^63;a$flA=pt|^n(!w%rI?Ytj+@kDxN5iBHh1$|qD7*7cD$QE$bXoboiD+HA0MnESbp|Tp!ln3o`@HF zX2!CZL(HI_!z6OfO);oK#P&_Io3mMZBi(2ecdm4LbnC<&T6lM!WVE(5)2j} z7gs3|WySs~BL^sJ=4&+(R^A^4m8#Ocbij;+uCA&PlIN+J9>QEAA`jF%-Wiu^wE`5N z#|^fFt?dMLwZ-BMJ?it&)KuavbR>0T%K7a>IUqaQ&c08kqZHI_99drB_Zatd(_a+- zF_xMG(vdH=TT0IGk5}k@e|8pO=!d=qhyuQ7hi_?uRNT9M&}NZqH@!L+zl%@o0Ouzc z55H)H&Rdl8n{|*(dwuV^F41P8uk!CsbRQQGe0sxtq~;_$1kg?M{^!n8^c9mkZZ zFYbnVLM0<8cK6mUmvMEgZ);XvT7!f1MeszficJ^^k5Yv0EbFry(o^nifTtBtZ)%_F zo0*|$d~5iU<#8e}SW~kfg%ZBgV&m|)Lf%wl<`$;8a1FN1M)xE3Hsneo>|LeqDJ78s zG*N0)6y0;eF}@L2?aDglNZ~Q)^}I|r{_8+*i&M-f?_qt;9a~B-x(*?~rP8$d73i2h zkOQOSvuZ4xu>pVqJ)n;YW3h)*0e>`(RwI|49Y``D@XC|4nm)q{tP8dg+PVlQy+#mD z<}-Y&FDCZa74QdN|Bg5M5?^uX=y2(gTx>0ed_>OC)iuTn30buEaQ+pS+XoU*!T>-Aa^o2a6sIHm+}vC*C{>Sbd>5Jf-qhB| zeM>VlE|-l1Ab_D1uDd%Vuzigj9v)@SuC_8jxURl0SjOXE$p{HKB3P8taj>4fud zyf%VSI*jsBv$LPClM)nR_$R~t^8}IO()&9_x?=WdxS8f%?0utGcx98efI?xWj>0H% zVWTYI4zb52Hvf4n+;!Ko z^J5EbBU@j-Mh6wbU^;sGGt;c#d^N0p9s)S4d>3eCgb|rBlS8OFY?QmbbtU#LO*4aX#47{;y*enlZ)9qQ)8n$zD~Qz_>nkt zRoL)=)5m-y?h35CCLOr>ujih^1ja|(ZtE^=*BL}}rUkSS{@prn{htgw zAGx%4<$oEz)Zk5|=sh_hG`GS8q)A39M$n?FX|~r$XJ^O#I=-BwP~_Ia9x#UhCiB9p zq}eX&jxdKIB{j9d%73pI6{ZU#B_ktqXh841W3W)K9zIns0p`xPU2y{8&rlI@19+vA zb3X%-(9Me+GW7U-R--k^F?|D*lOt>*#6e$Oy^EV%M|%<=!;g;2``GLLgpqD8jfEej zQun#pZ%z)=`0Vs67XtIJBjX-iFvm4ki@9hrC>YpL0G@NIun5MG%;~pjVA)8Z!@Q>sr;i+%r?F{uKGN$On=jMc2@WN=lAbdK&rv@RU80T zWwY@CF&Wj~hdadS_}bc9ZLMIo>f+W?s^O0wml{-W&tb}fAy^lz4^{v@4D_7K^j#uY zO2{}mcBWVDs%#>SXOk){EX2IPoa^h_vrHyG{y^luS{@j<8us(Yzyt5v3zIRKCiT%j zs>bcCAwNOE2FkF9@zQhvV?8N+T+TU|>FVvlXnrfWhAjc)v9j5mTRx@+er$M?lLEr% zrWydDxWu-|BUXbzp|ePT3xJ?#<}%+t8MdkyzRFWE(Lpc*9Au^nH($O%F1Ge@qs6%F zv_r}OR~6sFGx^{f$rx|#sxn3q>Zzv}G4dH3YY1D;W(6({K*(E$HfS9b*H5L#2V#7e^t6Y41wt7l6+#w3_qM zn2CL{KPZ|{sZ`yyu8aKji|f5;O!V@*uKJPyfbI9Zta0BTv$nFqB^S)7t+hV*i153; z;;>2$tZ4&K?PUVvhhOiD&gTb zRbnUx^mF;J#_l!U{`=L{B|zZkDB57_S{>}@KU(PzYSp_??q9U!aWnAiJKfgQe}wGA zg2lTW*EXa!^iMni(vQo_niM?`b}yWqkDDMsF(GwlaD5aqzJ@YTR@bi*v(-$}80pgw zsrdxzRwAguJA+N|0zOu)h1 z{%|k|Sh2>az=R$W*e*acr1@VSmXjD7l=-OJ*(sQ=Y=ZSSp4P0M_X145nTegIa>Dt( z61wf?HFraNm82|3h85(MP`nWt%-T)k`ns6##NM-(c7 zf>XV+=9$xHihWyA3ZTFDk7-0j-Bv@!cWo*E(44|J=9|JbH|V$fQ-Wi$VK~%h3F?EP zL$o>$EV9s2mnSQeP}Z)9hLiQ7bS}~JaOHleyXXrz=Oy zl9@$=&CN3TYB_gZ}A%!%X1n$n!9*tf64M&HWp&w)vv)pFMrr5s@-5GvdN^DH)kVJUka0o8pNU zUl~ZJ1;i-yku&ukn{``*S0^xnXa4y@(p;VnAOV{9-n)5%>bq}hG?OH?6{n_2XuoI@Ru2^^O?UKf zQl&ftO3K-i#*5~$u~g5bw6yCnp%USAAk|->76*fS2AjJE9e-MX?ZsSOF%|Qh&j>O; zLM2Wm0{GVGO65Nlm+Sjg+&r*drAoJ0fUi>P;Vf*`5B6M9A6ygwxb+2U1s-d-F4F58 zz(6D=1*H@RXz5*b${qhv@bv(BX8{?0S}T*Me)GES@m_D?rKK~x>G<*gN6!z91}psK z@fT`7|4&f$|2^}c=Uh`a7xA}446mnh`T@h!9Dj-bcTkmeOUr7%lAYprf3N&x$SW)x z4Ssz2=d=Q#C7AySZ#Tw^jMA#7eDb%(35IK2;#7T{F!#paHigeeg0)&{h~GSU=JmHq zznH6roKpu4ed2!v2C3d#=j4Z4vfS(cxn+Muw_Y;tKN^i0@b;hM6MxE2hWx_BgST)0 zqhG*P&b@XnefwkYKeu&^P)~ft`>$R&!u~Is23)r(+n@+IJUu;~Ea?{XL8WtbbaYfT z7*r);INtdc1zTB z)&F#x@GGZ`;j1Sh&azZe_O4ir4VPP;f|chr+TQG~htJ#cfL+laK=eu2qZ#$}*E|Jy1=@W%K&@2R}n-n?PE8prcKWgK@0NLI2n+?)lVfOIFYI#g$LcXpqI0X}Tmx~uVwMc(=&Ol@ zAah6Y3b^-F7gO~4$SKHf$4Hd++Ja)%p9irtm-98iq9Ruw_Vxl=8J3#&cNZ>ro=jM! z>J$bJZnK&0B7dABX2XdVX>kbfd?TG#91E?(mj(TbRL+_wLdMAD*d?&z{<-A8O@6iC z1=#jKC6$(i)@1Gj?vk!PG}j72BU-@nC2b%9`U5>X*2 z(4Z-tr1H1#w^R-}3dgauJ{PMber$We{}F*fy@_8~0iWk35@uZ7|9 z=>J()l`RKo&)L>3Kc%`bQap~Dcz|G7h z{c}hVD8g3%tupPucX$7LLa2m#O}yb(v`n_)xVa1&o34Ui$XlQ{qVsj@vZlBOW%OH} zf{hK1{(r)P5Fz*F$UOG1VZoLILt-s%wvXR#?*5>PUR_d<%4m2zuv>#K&@wMT)!%)kyEz@nlA|9dPD&;`;woj-8% zaKHpErflVN_fL3_Dly+Y+U9<&N`tVSidUu|e|B-PV4VbkYS=5r5#Sw?+@hx`rs~b3 z2OIscI2kh3rz08(&wUT#<4v>7h)Qi;J717cNvX!h6E>I}53)P~HYv(`b1R!b`0|R5 zRVHMn%yGJ^+dh!Ta&<2Jy?HTjC%*sFxlifKWWf-~GQWGv9joKIQ`V)5(cLsfDlIK1 z_NCi?$W^^=LERd(toxS7qvauN5E5^oyRdF#RrHFvp2+)S&)MVjFbL!XGpNYRd+aSU zCG4gBJjWB@t)AoIbvS(3@X91GluTmRemf$+41c*dku$GoDh;b&6J^$@<8JPim(qA6 zFH(OCt?yDt;mp2&Uub}HR>nnV)wMgZoP>~iVhTFzLC+8>|GIvxkg%Ff-e|PB_N|&o z!nS6WpS`Kt<-7n9p;2l0I1(<^xs@F)vMcYBRNxG!yxT<_#Ch;nHy0Wt@wJa7w8}{R zc2YB&X1{hGuKXPy$_MH}i8st5#mjw7%GOoINN5Y{h^owvt6!+(#%FSU+OuIck0|9C zR#96sn5^*ZIh=V!vvhE)QJlEFjvCFLN^U)1O~Rp+ZC2hu&;UT8`YR-m!9e(cWL`=t}{HM z*9FI^eePy=YshfkGixZ2uXPSDOFw+Fh~x{JHKmdc3u4zt(lkYv(07GR;m@XgzK4;@ z2yhB5-r3|Y$YirAZ_3oUsHq4Stam$0lpc%W#pYA2ki6iI!SBn8iz`6>19gtfrjTKw zr73JOWA{{}p2l|*xUGIS-PK`$Uv_w1JctOfArEJ$J0C_Ggk+icf0MB2ret7$?Dhs4 z(Ot}d-YG$IbqMcIu-^$$X_|3(_MN<6UC(qPuXXfvvI^(td&9x;isOw`yZbu0JJ=U* zG786Lh5r$Xa?^7Aqw1%{ru^+T$qxYm?MWobP(0o(L8U~Ik*)|$BG(gXkhpn7r^_K9 zxj`jU_ypoyI;_b9RTS9rW>4OlK2m$2(a)zXdPr$cI8nI^H<_=Xkr&CmHn8e**|H3r zB3%yvQLLc;R0>26-UqesVLr(W;k`-&W)7zxSs@#whmf|5Ieyfi24 zd3ePpkGTm+AGM<6I@l(*HR*5_(IaBRzCY=Q-Ad}~{Lcp=^3VE7U!iNY7lX5% zhR!|D3;i!ok-l5+EN3XApS<#WaKazUhm+b5=cD4}8pwaWC++e@Z`^ZeCp}`UeW~+9 zP$t%h{_B4(w1aDPrPI-qPthF_P%^c)o^p0B4udldp(MK@GN<&Fd8qFF|LBOF2GkLg=KVD zX;;8e;0;omcWF0m?o>lb5G^%f7(DcTcpXz~J~)ick_LSPM0H})iKQ0)AmiI6&-YY0 zQ@sM~O6|chvlrU;F+5CDd|sR?+oW=v_l)aCnSmlXR5R-fZ!EmN*~vE$#|~BcwQe-) za&mUg*nDF(+Y7~y3M-HLrb^?sSezt7LsS0rDQR}nO-CU)EmJV#UJhypT6J$3MFWjH zVnOjg+Z@DG39gDsNTsr)!&9%yPsq$H>9Hc+W{Dn^%ko#cUs#^;y8;67_+rSmDvQtO zPiiov9gfl#kQ(DaW*iGN`cm=Re07|8LPJv!F?F(gCZE$4;d|3_C-?I=g~|gwqNDa} z7Rrr`4+5&&`O)7uu%PhCQlTIS4VUGxKgqH88ruUvX#K-|Ayh0v6K^maUmrQYuJmk0 zVmJ1(@7coIw|eBOq8||BrS5P<@iA`cERxm#Q>TlP_Re>u>Q?DsM&1;bbGFm-!C)w$ z3dfGHz2Pj%yY)(h+h4lqhJ`^U0yJHfAsfHrw~t-qD*jQW>3&NkVpMhfj;r>~TQZUz zJ5@4t*OeWg83?beiX2gVWZRfHl~623iHaeZ{c#v%d+^CQ9s7wSrM_ZpEKF2gG9@Cc zKT<{fsP{o_)qR>JEiKYA`oY1DWq8kqm)DVizD#71f(QkrKBZz$m^mj5#B_UoZz-!2 zLPxVS!G?B%l`&f1=E%i@k{6qs&xksJBd2oZVGv`ds=FC(qk`+bFwk025^z=+pH#O1 zX%^t{qyKD#G3UE3rr0FK;7|~w<8@+H5FIQq78KKYhNZR4-x-m?C7i3^_H(3{yg`hO zCd2YeJ3D#6+dC`_VREn6z0u~cXRR+ybYaO%KbyzgzQ^cAKKBKekD0`X2xU68@(X#k z@5yE3ruLLMuT%&mt7BsFFuVVk7NA9QcAjwwMi85$2f@EW=_6PBe~z52oP7JWkx=b_ zRCO~qU*>jEFP8s1v!tP#Hi)XJO#cSW$v&C=EGdIYs3a01%_c2lcUi&KBU?-V>})}o zAoe+4Y&=8_uah1h=Giope>SFJu4f};lkpTMD6eZMq@OLk&XYYX<3g9{PxP`Ns-of5 z6CZb!98=VNX;QQ+Kvz|B)`r3ksAbr{nZZ(`Jr8=RB0xb?US7Gzot0}^NvIr~NEhS&V=Xqm z^5jd5rgT>8;L;~Zkzj3dazZEhu}1%;)pTjr%E?KLjmjWB2vnYg=V)B=8TVY!BAda2 zj@}?d+IBk1g7|21E*s~3&dg5xu=)L9K(nPMo3DGY{^g<4UHnUWnrc{m6&noG-|_7Q zn&SwIt2i!Rfb_ifyKJ=TQRnnbV*l$`cwO`vQP;~LQ&rd+q=~3G!Urxe-}gR9zsrbV zQ|s`Vj0amj{Znl(*&Mpg?AxS&Fb-k>%V03Q4<}0^1ku$HC(-$K^h|GQjY?y7Pg2b; zN~xHgthq{I?>EM)KY>Wc)voUH;n#I|)F6M!Yf z1qtB}tWc_TE}zm+=ca<6KRhGn8)`3gwikC}2k&%~3a5{rOshE~?e3^(X!Q5hZ9bqL z8r>o2YqH&OgCLA1*Hd;+&Mh9r%E@MfuJjw8SF3mCiWQf_<-@{dBEo~~wvNxraiRpc zSMN1(Wm*ggQ%WgWD9QsH6!jgct>PL}8V8Z~;$f;WYB>l#ri*Z~aaZ&9kCf41eBNCj zR>|z_%oGga$C>jR8`{4A$}x;WIHd|Z6KWqUDw}L%gYey17x_j&I(ik5%Xw}t?wwt* z;ni5q+gf+B!H`6-|14(Xmn031v2G162M%)q9Zlayb_+__&(ec^7&%_#u{67Z>xfjE zt$Z9_8D@!=teO&tjv4>)swmY53PzqFNaxV-u4+UOq#;wsw2SZ#iFRw_kO`MSGW>SHr z#qD9G88(IucXlzr`;+uHx_|$f2g+IHoCzmz*OvrIeh|L2tsPy(dPu1s5fK}dR71CX zv3Ie|I-C?vYJp&LboM3dHRVj53r$8AC~JS!UTP>zd@*48 z-n07lrz5>HWj-yzvNS_a4u(Gt3FN=idVkN}Ko}nagyu_o69I(>0CXLfDsybABU&gL zlM=!{=$IrhAMSKcX<|N}_5It%cGT(cnkslG?tc;YRZ(#@U7H;t5J-Z1aCdhL4Fn19 zPH+v@c#uFE2=49#*WfO}-QBfucb~)ie&4@l=4$SS8y1W1I(4dcZF%;ycd4sv!3X3? zf!punh9Z)iP$INl_9sGCCWD$a3mxfY2g=fzZJLwl_nnl>)u(yP4sJ(dBEDaB1URO= z;t$!#uJc{IvmfmN-p12pEOZ?NoK>6u)l&kjLLj-hnL>za;%TPcOWiVFmm~tj;o6G67)mW z2*x(;neKXN0%tSPhysyZgCU@z>Tu%_+#C16<8QkJ%Ui<)ECaZxEJUm#172 zOl#r`vB}iL2YHsuu+Gj`%3P(Sl-s;~2{-#%tRzzx^J2{p-zN_t^GB`DW<8UO$7o?~ z211|MCnJBwtglFL_2U$a(^gT^l)W1dDAo)IB9 zdbMWOMu(Ofxq+n%cQOs8?PDVkab?~WHfhv_i%oajlP5!r3R+*%%c71F^q#~aJw2{o z_yHtmF{T%vPKp|0i}V5>5-q1>YmFm=!Hbd@064gyZ*HwSwKboIPvl}25nROG;_>i( zUW-X^A|Y7l7Y&;z4?ObIWSn@)z0(=$L1dXTeGC_YkaMl;Q%=mxt1nv&>YOrunzR+p zk9!Zrod_gny~9)Agu{kfAP@9!Cr!Rj>$jKUKD||UR@1FAb}=h z@>PGtuXO1hqYkDPsJ)4jp$d5?w&&Db#j`uhhgF>L%i;|A{!fF;?9<_osY6faCaayX z8!jgN&sb@|Y#!?@VUC4UK#=XDx%hWboc)U4lUi34gXm_sFzut&X9SY9ut9uXve*T- zJ?}k{YgoNc(~(#8&AR{;aD7T4SGsYKO{3{7JH5~UYmIE)Q!wq1=$_|~lk2U-wPI*LW^v|L!U*V$Y{p!kvI zpYyr~RE_U9fT2nwAV(p+v~i=enqAZQGCj~q?QxRuZLeXzD|}VyO&>~RNaK3O(;&%! zhY(p|=H?iT!$vV|iO2aQDoFHlu0MBK?@D9um~?lcda(X&f9w!q6_?J!!{58KEmq&y zjT43=FZ9z^ABy3@Dz(Ql!{L+bPBqz#`Q}@W{RDWE#R>->o~IR9@ojehXNTuCYQ@x* z$g3XzxrT$2Ccu09N_SO@{B2ME{^&`?n|bzFJY8Evp-th#QGrp=rM zPtCY&w4Mts30&)N%BuKz(dj+ciuS0MnAI%#n>R5!T!8;Q|EwTs`?=hSUESmI8V~VIuq| zM*t$u`xO5#fDHl(!?pbfVFMx1qWq^t66lk>_kS`2z~G3`|CB(2P?U)W{w;0<{YZcL z|8r=u9s`ZlRa82)qERz;WP$RTrp+NftYvnajQ&ktxBZn^Cc$iT)A@Dm<%ck_nnmAm zBoa}Sj^!hb8K%wHhrF-YOY93fRje zZB%7poQE`YkNO+!U6N&dmta#aXe#0pqX8t1$uq9jLYHE5iU#7GrHuznj^)o-x(TF(=k7 z+FkWs!o{V7fou8pXi)E)_LGUWHosw6(2*;s`y`C3WovT+oMAh(e`9P+p;~x>rwI%N zx(5Hxm=J4SzJy%a1-rXNY$hGs(esBZ!3k>vLa^CU^OLHYn#W^jE?ds<-0>JbvXEP8 z`>NUZ*6S1W_I6Uw#j~LC*~c<#Bf}SiOXpIG!X7 zajPJ<{D4@GRW_OI)0Sz;v@z%Cc4Jw{q}}QB8|r@v$2D`s$rv3{?oI)Ak@ zGchtgP75?NKVAwUWC;KC+jZFdb`>a^S!PI?JkZD*H*8Yz7V6_f@lnt|9jtbF$yZwH z>iJNP8HV%jO&9toyT_`*VcnTm&U@S`mVv!<*L?~#kLphlH#Rd_8!V$iVd3e_baPo? z#lKxd6sp@+zJ&WC@_$SQWC7>D2Lp=4Og6_0F%h#lNlzo-iIy8|xWE$V1$FC}-%-FH z{MY|dgvrXK`RcEoo3mAE9%O&P#_gw}9(X#qYNAWk+}si6CuE|ev_4*`Ecn3ktX@V6 z9Ue}f70Nk|zy6Y!^HHOFolAd9i|hCCZt*Z8-jtXCl(EmBhf`gD%TqhlXID$Lb$^2&^-J#CE-Kb@J+zhczs(>Wc{Icx#8PmTX9J~z|z zey3ro#eD8&zkq7di%8JEwdu!Fn2^Wx&EY zT9l|j=6q^?T>&FEwEyVf*0ASq5g?O5wd0jV9bNacH4B-i(7hg(H|?D_NuXA*C%wQQ z@2Z}y0t>=OoCPs zx5VG(M@LRS`Qe`@{jmPYjC}4``_i(6otCT~?|D9kKf7^|NsQR$Ns#`veaB=7XP@+_ zB7zt`j}=g9&-DTu({01E{`5ilcYZkLIqyQC zQgnNLClEO#fJ*6?B6*G1U`)bMuB+?zE=yBtax7+UKSzm$bb~Y zTVAom&*n?lNw0zeLP;jAQW(!2c02kh)3bT%tOI=OH3=}SG{64Ntl3Z$RxT70o&L7W zG1|r}Sj6k1@DHHv4l-n+lgTLtte~3jpMfuY3j0eslrGnfiDO<(0(}<}JChNit&u(rB=&LxFD!GvxED_;kYogex}V1ab_fn8&`!)*POiu$BqBhj>gITrs{XV-|%oB z8ai(*<^G2ywv0L>{^-vrL5*SUk8)Oi5_ds4=jvYh&4bL|lvX!A*MHR+O0}kfyn2Te z&r%sK%-3JMNW1Lw?@|8qp0(=vt%2014nsOZT{jy?8I}11RMff6{#1N+TU%q5O}?XM zySmD(gC*}*Zx}pWW=~_nNTJv6f4^9AadL2Z9Iv*R3?}ngY|L;ixx23X5KdaU56?+q z#2(uN$kf+^sZ9;)Rv)&JY_n%fs>z_i?_)~_2HRr{;`)ursF}KF04WExvb{rRsykYJ zUs`mqc9QalcRV-@R{|$bcgyts7kg6T*^u)BCJT4*qoQ^9!EW@6+en|#c~@{o|KE>E zH0Y5bn{f@g(ektqm+gzBu` zRYdAC)-P*GRwK0VM=_x=+It$cr)xoLyu@p2sYNVNWMiN2Jil0m0ve6LmG%*x2>! zt=HF3H7;x3v?L{s*~fWA5EHq5Sxh<#SoIYR#@i0X2tXg%H%ifQxg7L~AONSE6gWhn zR;%;8vWN(Uq{kaUutbJDDU?0&c#?_#;KQCC;&IYK1`1zlxj6avgwfG)z)_O5aoOu$2^Qq@(PTibMTgf2{s^lIb9 zjc@+p>0}-Fo=XHux8c1rof0B!oPW65kW?y3V!lbUY1&_=^)6XQg5%dPlCe&0p=)~y ziIGhR@3TOdOBIuwie=9Y4PzLXuye)&fyky5|4dA%a&d4}&U2?fwZ3?Oe=Oh24eid& zIiKF?OZyJ%{I>jfdYKcp@6=~vi1UWoZK75Si(d2T6t6HwU%d`LtQ()*%na0& zX$7}!NiM~5)ADs-Mk0?FIQoI%xJkTehF>}6zb;UMexQoYCMM!7JOT7xJp8>2H@{i! zbZ03Xp_dx?DV#Rzf-imZ%cT?I+b%Tv$}R6g3{!XDK`R*k37j|XkH5qCt8T`xQl!VD zW27dF5+HJ>#!HapOc8%Cy#|xtpNP$zrPbB1`!I*`L*D$1xIv4L9JATF^x&&G`o0%( zb-6!p%*-5&1mB9JO;Vlkgbm|R=Y4-R5n&%SJ&Ca(Q9)4t2L$4cu{)i~snNS~X86P< z`kqLdL+!2sYoB?qFXp6IJ1v{EMiu?{?)fPq=qmv*t*xO1y$rer+X#I|R&dXb15B6O zYh(KbbmUOatJ!X7uvhXSFz{@i@V9aSXY;MWEl>pc@bFlyXS1U&At@R0-C8$`)8eAQ zad1$^KkOs$0(w0PQ-FJ=lYvSL-fUIeKp@d3Mv{rezhfKra}N52G>NTvxZZ?9sxS2< zO2Jfl@Y%)jzkV2;sy!P@#{|toKq4`&mjo(hZcO;+E_I7SUZGpO=+wePE>75hwch~1 z$2zpdfej{jC}nZ(Hk48kLopCHTe!TctlM`hyi%Qxiu3acS=^2G{=R&%>^qmc-Q-@! zy*H5m#T8r78#H@zu*{cGe85 zWm&IjAC9g*dt)%`w7#x})4@gy^7Bb1TDKj61?{D3-Chf&y6mn}rMkucK`GR8xplV= zrBsG=A6yYXYmAimc>c{>bFlZOnSI^ZcwMyAB=jVz zz|zO)Biu2rHY4Q|m)ZEpr##Ze;{5c$@T5fglr9$p5`1CF4Q4ZRz;M3T!&PxE8(FUU z`d%ShiPBckx}vU60!?`#9HB(+HOMTeomsBb#yrJ%X<}kzV#2IOX|88`mdyqO(U)AZ z`SP}23w!CN{pu;jW$^-EPv^_{J0$D+l;J@hz@RC~l~~mXa8x}3&YttZ!qaT7R^nL zxbin5k&2*~Fa4WbLFe|h+GD&14%;tQlmOR82;JZSdkok)Z;jFqXTk5GyklSDskof6TE-r&q2IxhVi+9+xGFMW$mZi&%?al-S+$PHxlL2=Tcv&`Wn$P^u8?rIL(UPo`yD_#Rp3wB;zyy3oL&Lb!(%J|U#4f-I z{4|0<#6B`7zXM=S!`j7Be&%r8hd&{>`1H4KkkdRfqW_NP`=$m*!u8k0RcPxUyZ@1>*V6bvZt$X29C+==j;*sS+JL%VTBM8i>(AOGWx4`ffahyLtgI>4{c|#6 zac{69f###C1U!gY<6=LkUTRS%$i`!RFpfy;Hu^I<>~wUX#zi1~K!w zZ(My7a{pEl;VX8-t0QLhjy!w&AWNSX+Pin^!#2RCqLPQE=mBAjYgdU?8b{_m2dxl7 zU0c&OG87>}lLo|?B%V8i=<~lWe{#x%V97zwq;<6L7emOY-@XGND`p<|4UpI@-~skT z{BCcr>ZA~REJ58Cy^lv_gru}s{>6kC*A2h^~^Xr+rY3kB?w1ddDo65fMq5^wZv{$$iXfyrXsZ69{Bt;n?FPxZJgM zBDi!;NwLoo98V8q+VFy{F&6(!QBY5Qv@E+v=KqKp%9)yFQ4J)VhknUqAyAASfT zwehdWoyzBTe$sr$xr{v5ff-*V@`wpVIYfncl4_bA{tXURQ zfQTh_E*QKTzR7~YV+E#DyBCAGkK8*uJ@d}{D<`jW_J30>Kh>-+UDtlaIi9`KLY?wL zQPGQv4_|rsBk>Jtv)j%q4tW@nRO-Pa08{hw2JVd~I7}>bQ_e8F@sIJ(qb9}q{*v18%QXNA$OAk^nK%uf(#$~P0fA< zIi2t~mAj_~nbclG2pZik5RX}f3mXU6NQLBkheC;_;5+kbd`4g8d9GbxePki~wP zaQn8y&{*$}A)G3@E_*+n@R1NbSbn{FUd=%H#_TCk9ErMN_-||WegUr5exJbv`DXb4 z1bWU`gMk0~+UQwEPy+sBi1-Q%&FLscabiUK>kOcv0mHaVzZ?#)c=Y(-t9aMMWQFDjb+NF?0iO zVZrF&)(MJ2iK!5KiS2OFiu%eLeZ%WUw|hP2gry4+)~xY@(q8dh%lkLJ99^Je>*d{QBg7U6jKd&!ar z)lCj{&7yJ?J*ve>YVzP^0t6BWSq9J>7qEU@9FC=2VeIqd|3rqBTbU_xrRo<4&Uys= zm3oDfvPrxT-3QCxd72qZYZ*(g{aDx{ONk&i*SKku=#^_oa1uGiy@O}YV%$b0V>IZB zsJDxzrdHpXE;OsZqj3r*Qqvg@B#!@7<}F|MSNU05$p5g#G1b>E!vBqnPNF)Mj;ZQG zSb+i2$1bjBD}0?x(m7~Eg^c^ZT!686gtGG~+3?70f*s>`efowUiakJze?VS}&@~f* z9fuL8F~*An6mji<2*Ngg!=EYyu9M8jF{&oJ);6iQtbhW>;Z7d9U@A>W6||hMX=yw? z&G{{kOei>ZV`HggciEb>b(ijz{u}z1EWGNkxgjA4?W1&zai1trasCQ^AXK@aIk9-m zqJq_cQpU899VOj>a+q1C@5Gs*{)OAWitkGhCW+|ww{nz)HGTRQ*tT^uCMPH`4zv?< zwZT$(OU%WTq^(Lm^EqEhD8|mVE5i%W-@L4>?O7E$r#+H5Blto5K1(JYZNmzGy@?95Or_OF-wrk5A~WtnI}NDiKb27SfhKyQ5SNXE2t zr?jk1ZTJDP^z&2yOeF!4WXt|ZOmA;KW_|f!ny0*c{Bk`V4bA3H=cmQX6(BJDMma#h z1=GFWYwrJl0Ll4nThnE1JSE0ugPH*qSdP}65XQ0l zF1SJBw+!)Gka2tn5VT4a7d70dl|&onkRuL^{O$$}FW-Bf1Un@g?z2rfV76&wA=32y zORK>04ykLDNJ~^tylJmT?So=Ol>{%cT9&ikGS%zhp_=ELvbnwXCIvF81;HZCLr7lV zX5OA%yFUgoHJ;8t>H!@_c~0v0^wiHz+rJ!9NRBnt6z!k3&BN-SRB2U+8t=?CT=QqG zc55TbY~P``R9?n-+<6HArU|?~yw6tOM|blrFbFBG)E$EagDCBIw@YRVNZ`}`y!X^% zj__YId5B>iwjegnXnudjpJijuHkCA~>AJXS0uATQoHyrOlt4NMpeS|7$(F}>EMOG|L7m8G3lUP^xkq^Faw#kk36 zq`%j2ZFBVGf=45zq+qP@51Sbq3CY_!ZJ~o1m@RiGfsi&(LS(T~JJrM(*KeJ>@B|xd z+5xf|lu;3OhTV~_{7d8gYB8~~_s8p8hYaQ-{=*3zrHlGNk7hA3xKRaR)|vFMaQ{Af zKf0(BgjY0IcJaC4;m|GS($UOEy>gitRqK6|L>aE61Ro=yi)QtR6MTL@uG1FSOD)SL z;9uXnVrdz3HMy)@IwpP>aGgYA;tkeTn!UbF1;qEVsWV}nd$L63PN(t!8|r}1rw>2@ zkSdLYu^or5E@n|}?$F^x*Y(geuWbjz!pUZ49%IBlFutYdxfb`SMnEtgAL-`k^P8-&(pbFwXKyY z{=shhC;wAml7?D|G(x0NQXmuqLU9Kiw=sjn%r?JJqvu8LlucAf+O%^V*9}TxE-fmG zWA{j~~oB=8g4-dc9)C(=lekqJq$cEA_%Tx)}?*>S*>rE2%_L&%7Z z_5`Bs`8p4vJ6&Uq=`!O_Z`T6M?Gy~WWV1Tf@;I{-^Z4$qGpK=Z_oT8G;8c=CsFnZ8 zBXi^B3j4u<2?ANs3b|sDOtT12Zy*g7oyRNO3 z)x!StvXCbuVi${tLb47`Mp7jbXu;mF-BRycXBn!_Xdmt%71+Hos`wU*5PR423@S>~ zj%xp8QT{#IyU9}^=lJviVy7EUU-VMI#KB#{LnLNm~Z^3tM2A56hLP^d`UU; zS?l z>#7kN&<&smQ7F906Ildej`H8#U; zF8WpZ*Rm^0s!L3+Qq#o4@aCV>DVcM)5Yb4j)r) z{)U;YDL=eNrJ#rtAZTUU%4P_rqo$9{w+obJtW{q4S%Jbom_7romO)5Y%cV=_Y0?gV z4(!Jb#lCwHHNa#dM6IYiYDUcTDCNbuv}pnL?GAm8sj||8UJ$}MA*?_=Xu>*?#)r=4 z>el&J;A6_mb6QegR*U=Swu5KApZe8wgi?lYx@4AH|6cSR`f!4P9%U3}9LA|jW5Z#i zTD~#~^!3ZXZE!)=PYx>9q+KBdm>969%ey5^x#Cm%V8#4IW=};&nTx6#xNARkKa#jY zjxG@dRJUb{`HsycN&wC`^Y341g8!cN~$HkN** zFb`YTwA6Y|3OGL*0)|~W$*mpV#u`Wk8I-wP7U5T+Mj&LEE14>9_c6xBR(fEpfByX6 zY*79~InZ9|0RRVDEVmwq1s5%|-wd|lW7cPLe z#~VtEu=tH}BDnx8Jt0e!=>*)@Q%tU@SaBu>23Lbwk#9BO-ZQGXWLpWf@l5n~IQrB^kNEms{AKtg2k zN;ZRD-Mz^sCFvYEq3x^Ez-5Wb*;2DwJ*RIyQl@#uoRCR#)Cla5$Ub0oMs)g6%q;=E z-S#!cU&;gGq#YUo?PgE1&Y>)PUY*UZ)JqsiTo5_+KQ2Z}Hv9LUW$ROC2A?!m!A^Bu z*pcRSw1OICK;VTlgPUwYi2a;IQo#JgUX=#b?+o!KG3f?WLNmD zv3O|SIAxhG%SfmM`n38?6o$5UzqSTr=(`o>uWo(74GuJ%(M{m)TWEcNy_aS_RXa07gNXR5#Ev zR&Z!VNpA4gT47p-LpfR#@1=_qBM|8dZIEzh*u?pCQeI6iG|I98vFblQPL0+|Mnlmb zFfVAu0*7*Fc=+z%O2CQFab?8@DBHoIJQU`UsmqPBjvsWp_*8h!LZ%Jl^_p&*9*EhD z%me8+!(>rtuZd)P9rVE-eHr@v&LksEB+yh@#TQ!;9A31YO0SganT z0I+l-pPqdn`+QN3Y^Ly$)oWi702uj%k74}%sa@u#ynrlK)K(o7UWqP^8j;xae zQPmRuF_VBd+{#FIn(J4BqwyM0=n}Sd6nGxa8gT{yP#=Y)qr+Q;4~NOHEqyG2rc4hi|yQipwE%u5r z^S-}7QJlBt>JXumzrsgjbNvjY#&VgWJ^?Aw<5zW`=d4Hhxq_A`kt1vgVESkzG7ZU> zL+dqUda&c0X$SG~&+>f;vsKXm2qgu8t`}!}ABY6QQ#_{bA_CsNO?5)OsUwMkEma56 zZ06*n(cqW>-RrU9Mnq%{wb;=7S*^Hs!z1-Eh{9OwnJEk<`TJJdly#T_Z7gvqKj<^| z3D-iUmoroG%)U9S>JNyGMh6YsE{B+w*!LRZRL>_4*74}3K82E2q3Q_LPD^d51?WA5 z+r{}5aOuDMHW81MwDG`a9XF?CP27xcAuwtex74Qe1=mv-dlK;u=+|OhYJu7TW!9?= z+cZogh$Mv?<^Mdms48Z{-D2{eBzKtE@@UFkC&ZRHyB;8c*Z6Jt5$ns;wuO_sd10Y- zjmM4qbRKsOG9@@>&}T;8Z!0TLb%ZHWpHF}NfIH-9_}deWd`?J6e3N{h5zY?_2ByCW zv5J*@Nun0-4U47WwhEhP7p#A4!rU$?X$7M*NG9EcD-&;GIA*UASwzwu%cQQmm`+3I z4Ub%B#r!Ov$2e$NMnnv%*OEmB za?JOi2z}7im=d>ON%Zu!;Ds`>2vqa^UWtN2DdIc?p0gS@P&(6hi`k0FNtJJEzb>+h z77YCrHC$cS5|DI1AYcG{Cp>~!Lu}TQnDgX*x(UB|@vcnO+4o(VI)^*>FMLC36SbTl zCBl(pTy*pVDOB-In3E2p)g7AcgMD<<8KRb0He+p6D3QGVX+gL;@h6er9AB1LI1|;k zRq;}2>pjXJqS8VRs>7NbB_D@uG;q_YQS_C#Jb;!-2IDoAi=NK)Sp||09sakLLU3TS zhsAR8?8i@+DlMCUnqQmLhfl3dicxYIUF3>sH({G%w^znJ`f3fxtC)Y)=wi`l#V(I{ zDurKsgDq`XK>EEem0S(TO_u?#b-Fxtjr&Qy1T;Vthw1BB3Hz%~G$uw~q}m?kFWCndG`rtCe?#8J%xjvOSoDwILR~F6wYW$f)SNM1k}A zw6F3i>%{h(#M56Z88Bone{P)0fU?@ACwX*@E&E-Hxd7zc$75*wQm6z7%S-u^dZ%c7 zUI7Xe8Gs6X0HipB$^+1rC9PIiM82-70CuWxO$b_H!jris2s*@LbGw%ly03@aEv3v~ zeb^QnS&n(R5=hR_qB_hkS8l3l&)U|NU7`M!4=brqSB^>y5d~b(VYIxVx9XE0sL#+` z_q)?nYW5z}ZvQ2R>43@$#R2lzIKM*RT7$aBf^14Z3M5~!vU2+zO55+ciC;$gMm;JD z(*GElckMs#SOL70h?-pyJj_fuWA}+Vz*u`^w9h zxMeb`WxQ|&Ta^kXSAVQOX|t&Qi?{;w;rvq@`4Vl}R!_{t3 zg?Ip@|Kr7*ST)L8<<)d^c3Ksv=5#F|!HngC-3tJ2wB#3*%ZZKG!cqLgO+B|`E5)5U z0FX^|fQ%~xX9a=39T3ujUJ4sz{i*i7U}h3wb4vU)pb}WsUlr=f@>0d1faWU|tg7JI zadhPco3#-~{9U6hsmtm7j!Ph5Lo}|3lL13ww%gBgU{nRwaB|=z+jL+FNL#b?7&Al^CSU_evb} z%=q4M%ZjfZl&=6WFQs800FoEEX4hZcz6CR2K)6L~$RsP}tZOROb{`m2Ts7(x^Hf5%qbSXG(I7AU=0 zVCH!2fh#E|VNcT5@SWJ#>LQ-YW};cyrhoDNd6!P*VSwhMe=`&}6xHJq_v}ZoDb>Do zduFVjJ4r^k>j+Tkr;;4eW8sm6$gE}BLgh0UR7EHFXqFe-H)WsSFl$GANXcZZbCiZL zn(tj?ZFqdVAH{UI}fPsWec^(Dk6o3>ip@JPE>Gy6Wee22C(rQb)R9`Tw%=N zfVApo0W{h<1Vq>~4R-rM@%QWcHzkzZ99zln)~$@;2&CkrVm@LX8pd|jGnb#o8C2P%o>@fxr`u|i!3X5O8Nl3HCc!9SA;oaTLB{*96= zx<9#F)bl(Mxvj+4tU)G-yxgJEJG{0V_H!)l-wbpL({$KdCc5rcyjlj57h7o_d!G9M zgfj@h5qH4N1@2#`haQBEW}Z&`wca4h8kRBq;0u&rXsa|!P3mktK-{TSlurEC(>X~{ z9txzQ==4W`n(!Y>j4WXFp}}++s62$zUq4kh5v9KnmS;=!B?Rrf!X#lW*WN7_Hj_WT zX$sXYxpMG;4p~$eya`!5#L!;{)IePn`rtweX*xjsVW_{51u`HOPjGhAI#2RFn^z2V z51o#~OJ2+EG&Bz=VAcfpgTxRb&-2R^XC`{OhDP^UNNQjw^T`)Wz1f6=GHSKNbRWCQ%PAyv}HC*g5sR~rYb{o2mF|YXK0>p%0Sdrr#m;g*0r{nsZZ(T(k zQUmVT1c6F?RR?pECFE=;lfb-`z?Eq`vq8(vl{jOBu|tG zl|^N>YOKmGtFASeV=l24SvPo$W>=8G@-GtrOR!nH$xQW{w|^bLV4G@dl=wDrzEiPu z_f@IeGxE9?N+yo1MIQ?^5?nyyd3z#>kns!Qn-awE&=-PXJpXi;n$=;{W$=HYYygw^ z%NP8mOPoX}lT=>sb;W?!p3?B3x;I?klafp~fykt!)jh!rjzqMr{+V3u*0doZ=&a*v zy$~`V-9@QoK^sX{x1W3(W8U78;CdOG{O-MJog~M)N#%x-5p1>&=`80x`h$X5X)kXE zE}snX>V`7`A#Rv>C$>ql1q)yI1-89i;HPfQ_^Y0FIrl2ZpiZ#Eg5#t6CBQ^d%x4Ek z%g3!50Ph9`923K^lDCnJj!($U@xD6Lf46GJcs-9eZi~AvuFK%Qbnp^ z9-`@2^3au76C21G_rYKkbw{(d%!|6|!DosB=+jXw22GLG2LAw(B)&h5$P9xj8kU%n zkw=vv5ABNAo*W)&ft9T)7oV0rD-U9Tn7zvOuKUe{!d?Kb|66p*0MVJUb$=?E zC;x-x$q7|5=)MzhovF}r?J6MaU%reNOw#cR+TLFWtm>n-9Nu*97(Fg&*q$xJh8ui` zx1u;tFSw@I2>)UEa)gBi?G@R1XI*i@M+GEfq{N4BZ{qN+;&8hC88q6;KD@X z(Ztfd!*y2T2pnmA`6=uTkJp4wa^5J2_(DzRdsisngqw*qhirSMVY3q*nD38PaJ;|% z8pV}B<7!M*>Id~Y14pOJSq{rI1?uQwfwQ&Be6Ei?Zs~fvKN=oAkCfz?ii;*>QX-ol zv!@Bap9&{7(V>2p9;n;1V`z_VpEkjyvx{>-SX z*GNXpwF%NCfg03#oBH`LegysXm2S{o3&XQnlF1Ew>^RhD>l)6RefoWOG{ApncxE@% z-e_)~spP6=jf=yW64J1vt6_x(wrXww`-RrvYtc^{BajphI%i3u*7IS@n#EH<$6&pydsYCFgO2n> z7_zd-MaRa@987rsY)P|HJ2I@;cRsk=o3E$u6T;8~^?AKFwUjPv<)X zl)Hgv69lV6bt_D+uuDSAmct%|kjXF$0Ki4eb!`WiuE$oR|63%`b-YwxRG>mi>O9~< zktp?w3Hs4Q!k4M%uyb~5>gXyQ{Te?O^X9jL%rWm9rM3&}?L=A(^^OF8*$SGue;}6o zNF6N^(uuwB#8BvrQ(VH5Pw(G7J_e{5uj1b$$q+?0>o*O#2>3#a0tVv;Vf=v&u72J@ zzRXAaX&O&1$AS~$ZZQ%yph+Qfte$-Q)BMqQHU6qTB{&y;U1{lsmA~dH z1ofw%Y{-yfGL@$kI!z z=j6P(Y5}?$c6GTm>4u>s0(>T*pg;_v>9(srf=t}2!Li%_$RlSwrU7D-+QaR(ef$`* zw+vYOrzOvM9f0~|S*FgPvUc}Puiq#Tj%#tw`i=p~dcF$sG%WORd0Y5&`_S5qggvbH zED;zPicA1ji11qmt4~E9dY8>L3^1u{dtZaDzsP)ZR6;)+lmWRjY0yn@*a;ep7xPe& zS!wecn4{85XTZMLYC#0Rd6|C`(Ef~{`E?=PxGT%MP8zH+%;H{;oY1FISS_5x_Jgan zWMT8U?=X8Qii*WpzQtcFKdKZH?>Q(d71ur?RLcU!rI_aqI_{*hVq4HE$iVPN{GdE^ zGmmS1qYNlMS@5!%x%RmWE1+FOViFubSs|+Sx@x~X;>WyLm-9NvG~cQyOm%pAc=S)w zL^k{W4gtaby{_I;uaQB}5+2r;UGO1C8)p3ER+B3y8-;>FW%u1PAOD9Ch4|aRf{!Ug z-ERuA|7xqE2z-xtbXVTnCuI8#vKsU!-eKX|Go{ z)tg#_-|B>Zh;t?cr`ikhCi7?f(9fOi6LQ4`8b2;ec#3(R9(>VZ7eN{NZH}ErQgm_s z5s~@J)Y0KqL2=}p?hvwG8tJ!;2>5c8CV9ob!uF+wEMD_RYtRGrG-=kaIAZmEXnqoY zk62~VO=V~XjlL_3{hgtC+=~YNFBibrT*1*y!W#2tZSiGdKC|2w@d1CpFMVW6hlhBY zCiz)G`Q5)seZsTf0xyGpT~p@c0Fh(v28m)tbJXU0hBt~`GVxR>Z?D14KhHa9yQ1XRcdnw$?GCdS$Vfsz&QH@!7bBXHYfY> zVT11ICB@45W<0y-gaJm&4SIXzs1UY?PqWj3D-7z@kry}J# zWA76F36m5hH9cKDn1i{>=Cng7)!b{ta<;N_tJ0uvd_J2Ha+v>;2&mvR9%Xut|H!6S zL`?yQ&!kCZ2)K$~oAKn~7m&_;<`9LO1FXwWHqkgV6CAHa;{8!*OLLXnSp|lhx^(6{ z;)&zctx5pXHc{EyERGC>Fa%;|TY&MzrYPjYJtIY= z-YClPEad_%{>K18$?6^5^`XOab~xnMjY>Dlx5urri*KJta?Oi2+flS3)F-Y}Cm~S# z%IQNAfiVrRgP%(zK-(#bnXl`=zCEMG+m5Ez1Nx;9`K8eQC~5ra)I5;Udh?pDtiMzq*;1ljfn;vf6}P{Qmdc-NCl&?Z=2I$ zlK({38mL^{36wwCd;f8WGnw-v-=uyBABsEc*}mmbv`00QJX^L%JiNO&Y9M+FL*mxw z?cSLetBytfu80E@6U_tPZCa0H;}%jFZw~V=8oGnsQcmJFJAUy7hbOCSFlh>~+yrWz@ReJJ}goj*RNBen2(AN7E>Z@ZR1GNoBG>yJoh|HTT1c3N9vUV1tot&?7o!29AaD; zVk2M7QG6^V;s`S`5Frzx8oYdm9ht-EAxrP3*d>`G6L7IBoD??L-okHIY@I0wn@xKP zyUBfbIwiSQnyN5FKxQpKsXgfR_+i%aidGhTF72*u0dL*4{36`ZN@x`7t@a*PjT$ZS z##seeoO4DR1)p>cnZGjApQyy`T=X~u-rtcCeYU#91pML*$u?V+PdREo0Q%`KehjV} z2S=2g6o=aIFVy-smYSKZJ)8-grlD;H?Dq#$kBIk=)PGuzDNYtq=kHI}Y*Lj5iI|#g zayD|YCph~qj_>?Dq?_CcM%m}w!riTwRP<>jTNj=S!J1?hp?~{LG4Okcul?d_-mCY}0_HI%M{b4`i> z3O=9Vci$uNtMkziyL8k`YT^IdWEbr6wdlBau&dZ#qJR(k?|NLGrhv!OKucFjqzNCT z?>{jWQ>Mxp4b>RB@MMzO{95!nKirVy)fYEBC$z}YbC!gg%X9q~b@W)lq^|@?kz+LK zIa!87MxqShl8)rcFd#D@E7zr1%et+jVcJec-wC~MOas=3(I8=RH5Vv#H+G$?5P+8! zfC@Kl7tR+zLUr5ts?Nbx@P3IO`Xg>W1|%JXIyo zynE(&fwpBLNqu8NgmGXhQ+!c8>f|2M1wBS~F;j3VU3epyxu*v^FKT~0vq{A_@MsL< zxrkMn>xtT_{Fx&SD3X>((Re;5mM-|YI;1-6kra;h9k2@Xv0BZcEbX%xJTst!zW$4N zkW)zI6(Y4U%S}sASOUx!n_A7*(y(GL=O{vzgd}h<$ktMB<<5^lxO?Flx z7y#?~-!MLgy!h)~Kzd^Qma6ViGE3xUGj;-$%Q0{JB#iqb{A;&jEH;nBa)csw0 zfrEJI<05dUTbgdO#di2AN7W>NnKx&C-IG zpZN(R16>tagvc@QiA=+Ajfhxv1o<^4jIx+ixF36RtM_XFj13R&$$K_(P>yzaNuQ>u zo)yan5Q=K}Z;`Jt3pNBe1L`6wOzdF4{XyvZppGj#}Pfdw$}EhPaG9|a`Lv+dci@EE4RG7O)lm@1Z(jlk0XZl zZ*6eumktObD%znHShv)^n1rqSmgOFk zah2sr8OqzigIHI?&a_f__hY@i>fhwx9*eI$a7ZO6^9Z|Tu49Y2`I&gjI+N9eq(ZJ> zyAA%%b&?2mvzov0o}k-$*-~rJ^(V#X^h`knLw+dAKVbuBhsJ(8ERB`s#sv*{LOOFYe+=HDx>^)Z~0o$W!mqOQ1I@-RVK*Y@7 z2E+sFLmej>UJb&IIzCErZ;PCCbqUfwyHl-=@(ao!nRWcWaUY-oLB8@57yAqxAIvQflE z7PaHdj%7g3|2|v&9J$bLhxq#fM_(tc4k4Dw5>{_N+Eyfr#fI-g)pjI=iPVKw!GfribD(H&?=zXBFe=ZRic)-HH_IYAPkGuFlBlx>*W@Ugdfu^Es+ z_QU=|BSxVRP*D>u_Hp3ZEWzr?lyEZ8zD`da`(!J2oiZs(##s8R2mKC0O zZ7oGd6aKbgf`P2ukM046H_01YfnTui&hM?2AP1D73 zvyx=Dv?mFOuE&$7m{>AT0!_9!DSRD2g0G)rx$ z-t!j^J}ynEwW(*{nCDu1Y9?ur*7E8v?s;~z7oCIAuj!e}%;5O1Ui^&R1zta} zlx0)bWVa_wf7yI$ZrE_A-zJKAYs8{(SjqKgP2!mPKwlZ+1-zV)rZenwF7Y?LdIjN< zDRrpyA!rmyD)cbNxAzQ}mXAC&>zKVaYL$cCKOI!JZj10wqht4EX*2f>;cATW&h91i zk{HkK%D#vSkzs_J-Hg$9*A#4nf>=@l*J?FB1BY7|8kO1|+Flq+`-V6$XxP4*SdzfR zz1vaR-w-UfVEvV`ldC_3b$0H{(B+z)kj4LNOp?9Dy3fYNF?!J2!ZS`y9t&i~hjxDV z0UI?n@>#IHbtostl!;s|1v*5?FTYvyeen2W_M75}aLbakHFK2x&A`tmIw*Sr{fhpK6?4ojv|ARG;btTi&`=@iKn0`3_77Ed+urXi13+o9m{>|S zp?gu@kmTC0L0NVJ>TW&!e0>VCFcsMgCzb8&F0rl~(%m9DZbdB6XBOE$Y0Uh=da2{j zOblvLv>_Y>TYC^LJ)=*YEhI*$+p4FxpsUkxN(tG5V0LKOZ9HBB9j%L9{Hp@?7$Z%q zpt4rwFpt?BFs);2Ol2X`#2CohhMDt?D5(nVhD$j1It+WV<&*1!l25q7%vt04*Husc zPZeBL8%ow6h0;omJQ*!FM{==)j}$|4GfNF25v`4cm& z1E4%FfrkH?EeP>3ds2`0JXPDdeTDn;^!}mT~En{@J5wVQYZQLJsP(V#vV4lXmwf8wsh1=*McRzWVPvnn=g5~s| zbsP{?;B2V}N`EfadL2Ytd@t*D1B%N^ypAF;sOJ1fq^zwG`hwT*zL!!Z#m6_A?C=Dr z0f$DU-QYjF8X)4qy&m-4T2Ppdg#z&P89}K9;(H+XD)C)IbEMh#l^!4_jC2#67_NLm zTJtZpd8AAf;9LrP4Y>Y0;x2ibtc7DH|FZ@QAqCaz7w)zzAwLIOt`T*%%e6kz3mZV~ z>XOH!$@1dui?b_u-TfdKKH4HoDFy+OLV!nVd16thWsBmmJ0ct}l!)>38QxWKXqa7BWRlQ^Y^2BkFPI&0_Bk7qHe$6 zMJ=(eqCvqvEkJ<*5I%e#gN?Ra+<)~W=4oZ~o$X76B3$~#J?`NLKgzcIOwVg=fFjzv zrf(e&5QP89x!GvT`FyyDwK%b`Wu)%>*xH#>Xc+=8RD!j1`7{FzXWiHw2cZ2GRV9)8 zb4DBfl3Hn7Zt0wtKI@h7;O4rz(&h_5=mRh;F(LPVz_h5n0+nsocwa;p%$A$*R?Ma% zJu*1jKvNCQ)xUU8Wdrd1$`xY%crrRQ?)0I3m2W?3i)CneJ0A)#89y&M&Xy zuCrEL5XSqNw_OvX`h>e!GCkd(o4S^OP=LyO#n6Y7VNE@iY< zM`6KE(ZHbm3+o{nj@Oa*KIqpt0jr@UyzWy@vbcmvkC5BZ{B%lvxh5Eo5ZiUbd^#f@ zS=_s)Ec5qRTzLQR>}$#eZd&0-4evW<*p5A*;@a$$^M?40RTmH@{4TZ@XHn30d6n&C zLd#=;Yh9)8GJjn3g^9Z$J{7f4nfd(U^y2iCM2JqOVDfmjk$Ufto)Y!Ni^a!#R-n*~ zbJEY(mybm8lFTyv@LkxP!f91hg&FVofXI5>(sx?OOZh9Js6)2l5=LIu`RfR1;NcZMJ2Nv%DoxWaMY${N8MrIt)1cf{-rsDE-G_0>knLG#F~P+yoCa%Xk&7rgOEu9>lDAFK|3h;^7e$9VJ}3TI?*791%3t`ojl`s3@ZAJ7QK|hq5wjSC{lOp-0Duui5Jz z35B)l5jKCuf|jjfi7?)$r#f-I@Y2{=7$C;;?VFgPVZ%)X?~cWeDQXav!{DkTVjn}v&=Wdu-J$IEKHdmkpDXkA;(U+lvhVH24If$Zpp zC;FXD_f9y=RI6Pb1HF;?do^9y8&P+^V_s?%$W_j7cdhohlE3Uq?LCI&c)w~dLZkwm z$%dHP+Xz|TK)xWCkYI=xqbn64W(rII`D zufum_!q`>@mH?Y}1#^2;r!085{Be43J})I_EE+U0AnT53E%CYjNXbCg2fmeinlj9x zTte7V<}@7oDVy;yz*N z_?S$v?idF(NPA8@O~9RpwR6q!Tc8+6wT}5|rw$R6gwpx70E>@9_crqZywSVy`M#ax zMlz?bXK8)?^)^P^pW9p%vV)r+?p##O>m)91Z1h~_cZ+`Oxab@eeIasm_G#F87wI7| zZCNY86Cx)8IKHI`1gFpj2e}W7wk_rR*y@;)D-YR!OZ5BB3<=e^!YMeU5t>u@TovVc z@jln?4>0q-O-E=QoumWN?wd1hZtYN@n zx0ZmdY?1H0=}ESH=?4j z&|lz(>syR?t^vhS%yYr{M*rX4C6wZ^>zG<#0Xb>a?qwTl7k6qqd5i!xH~UGWliw~J zXD~7{w%s0=5bUtbo=#%M_PgE-DNHSg~Em2RVS};UO*;t_!o~w-%lgw#>1`KYT~Mgb1V&a%M*{S{k-@Lu;qT!SIKMF zK0Q6XR=r!{sNMxQY`bk8ZW+A)cjMG-Vm)B)$2Xnh%2Pk7#l<@AF7{TYE{j$k5(y#* zMXF{bP*C8&v@XUCrU(o3gPMcAw(WiUDbmu%>;EVDBSL1@LrDZb^`FLSl2z4c( zI!i7Q)I>}wxLt(oZ&6z0)z_eR{KKjmG3!FtJSmP(kkD_+X`v+Ixy#Cu=p#z}9H}AT z@eUIm6}2cJ=}swTWP}A+Y%1Ma$llrn%&s3yw&vNKeW~(%gM9kdGS%c%k`ZeE2XfDQ(2LWi_7{98?Jd~Vvk3Mvu_La zE}@nkv7L>PYe=)c(Q>cYcsCbr^EW=a`Y6K4WmFZ)<{byVJ%^hUjS z_AR6SNFP4#^C!6Qa6w!hxWyKT@bBCvT9f7MX$ICcr6bwJ_bu|l% z#FC=KwwryE2&u`}f+R?*2E$2@6RXl882+wWDy9%4}d=^jPnz>?I?994xmB=sqf;#jeRrj$f<~ za9G0bVH@I966H0%`6!Z+;R`?9cMySEb4zfUo>l-o@k&4dUS$JmECe?FCMbh6E~ci# zLArZUt=0^U`yOqE|F5)I8HtmYYN6v3KcF}32!rfA-}D$)9vEbo!p+Ux%77hbs%<4l zDmKez2L!b0TC`MAp+!uiwF#egrkI%pJ+r%Z8sdd@?Q1%+xU=0NLkGz$Lf&=r6G z>olNT{Ki)#pd(fZ4BxJn85MF`->giUG<-`5Sj->mlEWJVM!y$GlShhut|Ai#C2pZ5(Q|5=8`0TD&Sb$-3O* zTbvi)Vw4e;UgDK%`S{%27dGRt0_b3omHVBkX zPw+=5s@UmAv1U>2cu=?j6C-09_s-lu!*STg4IM4x`g|yHN0j`2cB6epknc@IPe8dr zIlm+m{-}=;95T3^D)3ko6SEGokLH8)L%ArX9--d7T3W7?trHx~dHk2u}v z;YU!9RXLc2LdIncu(KA8j!_`m7N-mNBLdd zhD@4^_`y3GgTQcd9BskgRzTrZr!1M{sOUO@Ii20lUCXke@fkV!%Fd4bQhmUQey0;! zad~bxHa^irkxxAtxRuA@eCkY(Wv3D+2Smq5XL_8f2~3bH&e2HR)7x~iBT(OZt!$Yc zw`1ZA?%;eEOKqR=>$+*PU)^$XI(WNKkM|GJPbV#eAD6BTS^Q!8(pm4cZ})ZXMKq!K z1ju7VHPc)U*C%a-)BESnd!vH&A_A5#aH!LTmJ3FaDEBWNr?wag)EU&v)Ppg}>WnsY zlH~JMiu{)w^BgjsNaR8fNP;3omRlL(_XD?10Or_~&Vi*^vxJV6gg!(9_?awH^fO12 zVwxgTU)$1cRyk3G`C2vsc%^{`KfG3_tZ~N=TE0XzQA`~>N}HFr)C`^3t7*b)g#&a* z%Z{}f7))0`1QVgJqEZSyfI4V#I{wBEHd0KN-zBNs*?2>#5C+e_<&X2l~bd!TYXK{p+PUmxO>@WF$+cadDG6`-Qi${Xl@EbkG`3 zCXjv1!Ps_VxdLwy%lbKN{PNjfw6rB@{7UHp26}oD5&;j5J6WeA#+9?{{0Y4-p1B?? z& z*gp5Ipj=s)<`YK*r$Or2(M?$P%b^Syi+Pt_RXmk)%vFCcFYZvexYE)ead9vS*CTIh z4Rdo-Q`T_9CGXbJ>Dum~CpE|l3s%*thHZss`ZtsY!h@Etn{t;l9a2YH3S#7B!-mLF zQ5_(SO}OKsu`eB1ynQCOGwvj`Z-RnB0v^Y8>z$*6gLx7XU45G>lmdyqJ<$3`1bz$i zYg8<ib{Rt@=p<6Ddkk6epIowl`*A43;D%vt%BPrsvOIJ>(Qv*smg z5Vk$2!|y}q&+`4Lsk^ta%wjn3!*O0bO9baKog1t(xQB=Bd(iBu<+TGc7`(huYn^1_ zeuqx54CKJ^{{BXI&L=fJE$yP~tMky^nT;}!6+)3ic3>|8WrPw& z8Vs+uvAq4LQ~a?Z^ zt;+|aud%V9PIIV$rZiTBm-FHzvN&MHu4tC`wYvf%f1cK*dZMa$sNIq)E+M`tYzf$y z^!4nbZF(fuqAHzKf4 zi^B1?(cN^mor4t+gX}mNS({Sz-R*t*vl88t>&D``1~wo-vYKp$1T#{uhkGh-Ko)Jf z&d;vm`%NV_HzwO3I%Ukw1;U3h(O(>s;oJ^;WI*+|<}XVG{g-_ys2gW4S^^&GuA^3( zo10exPBwm&UI6fy&SJJ2L!+7`ur!o!S=D2R-DB}C6613%1#D1!ePD=tLSeVk;&6wGD;MU+>MpQlc zHV^^{Y5%NNGEEb^H#8Tkto(R)_2%?`S>H`F`Xe>Of1%O}rc|2`4R{?hn&IaUum{UNwheHzk?2kWZ#20Zo5y6Hq*3qc0Zmn&W-@kzHtAhXZcjW? z)XwVmwmIAvY7>uqJkdwiLiB3z*d=k9yZ0R>3PB?14eXeHxTKWJ(M!Ga6WX&5>gzv(a`exExl;8gqP6W|y7jO*hHL8x6 z%~LGke4{W^cQZOIS8fairk4p}53ps2$FBhJh=eJnaitbzH0*l;&64(cRZ7Zz#V!td zCpr;dZhAp8(|W}m5b3{qO=!zSwCVgNsZ^t=TbReVMpMuMh$S9|yl53^er3>$dYx-s z{9OC`^~3ZMg*91FS65DFj)}OqAP4#+5MluM)q4y+)P91Po~VGkiLKX^O`dxlsv8+1 z9&p=7w~|I>$_Z=k_77PkJPY$q&ExjU>!EKI`OiSW!Io5zXLz!j6Xwu=F%uc z87AtzJJ-(%oSOj2mC__wB#E%8LH-?8zp<87PH5*A;QdP5tFR8cy^LD~Fr>`ipMl8V zZ~hS2D+UG!nbS>bb%v92W2-HOH=Ml`6-Q2*FO8rqIJIHco_zKn@Wlyn*wH)Lm;l^{ zz0-JDRWV&22+lb8=)Mn|kEV+BetMT!?o+0s`qaZp+*rG0DbLHR5!hw`AZqH}V~3a^ zk0W!6Uhs5_L{WcluSMUg5qjsvfgs&)Q8sGoxpmEYiWLhz!uK{~HC6@_$0bBT<#4m1 zN2TN1O#m46*Q>t0cYxBS3#B`jTE`UwXLm{01LbOht~r+iMg~(|p5s?9o;d{|l3cHb z_QC#(O;(A(JY2hJ3*GG-7stfJ)a6Aq?T2#{sKks?XH?cnwivV>c2ai+N8XtAW;ihW zBcepz8DHZOsEhz=flf!sWhJdQ@cGLrLAiYqy|ZK7DA8M3-3s~MQo;6H9SmhFw$hc; zPDIP?a%7p0ad4ELx(Q)n&+WNpW6e6tBiGQQ7SM2EACH#%k&uP@LqHiuwCsLa=nQzt zqN0e+&FbkkY^V^b_S~Blf5cXHP`HOZ!entaRM6c%ADJX50Nyxc1X#0*C`kb4jV>3* zLVvc=1-y%dC1eVTb?HV>K{WBFi8vo_=R@F6z@GeewvSqk7=@;Ja~`5gvwyh6*b|EBdd2 zGWU2gF-Y7B9WcFQH94;FQtGx}XcfuUjRNXRuYMf*ZDq(|=nc{&8o9A5Tg!?B)Y(9f zfx~t>hu7VC)G`38m~NH$K>uDKxIgPHH6J(qG<^T%_Uo!fnQw+mA>FW@npx}Nj%pvJyu0Hc#DN9B zRMPtSnC0cOoVt0dY8L6Njagva`W(w2i~?@SCDZZ&_E#Q;V29Ona`4UhB^0idqt0tp zcAOFTw9O{*1y+=M;^DE~$v+4PPuU7TrFo43*kM2H3DSGr@w#&L!ylxE*+^Pb8ir8@ z(EC9VU}6u7de;NZc}vXm*kCjng<%LsBrw@F4a|;h78GET62Yd6l`s;T#A~d8+nRFB z)g;9x*N6aF&Fz{p8$3ai-b6Yi{5kakK)&(I_SZjP(b)R`0FB=N+m-YXed1q&>=3{X z{BNY_clp14M=Kcr>uae0y|2jzP6*|+1CV8M(;tfryT77(If>9`wkZK;)c2f)qJ&OF zDiXjdqQzuhIagQmhc~Kg#zhD$R%OGIV*n0f{waXX7VctiHO7A${wi@0_{|I@<2RtT zk1OylP&rV*u4>lyJ0tKZXN!U6=&-;upl>aaJzh!;YXkqtpUcwv|8Ix=r+%$j=K?Q% zkB4j|M!CPEG&qqwsB=hJNzt^RFaT=;$J(|+$TY#zd5YfI_`b0O*yL3fF!jD>q7GKNqri*<^7Ga9 zm#xI8lqH*j?9|JO8jtK8xuKT=p=lZnq%F}&$s&eU3QZ3r$ z=w{}mo(lhsb#2%Vfw0H%_uY{B(2wZ}%?`uGhN?pzxXh(;X*>S+mzLy|$q@UKA=Qj1 zZ6`hG=_7%N&%(g?GB?d`OdC=4MPcN}*CO7>FcW0ta2ajKgU#jXgAJGB!L1+#t-Gr@ zkh_qX`NIgg=84=Sj&k1xMu~A%{b7FlMvjCyp z($fvFCid5QncRgHktE`whhQ=AP2=LUTIregQNdHgdjd(-yHC_T>@pyg7--I2D$zWY znVcq9>bF=e)#gM2u+=FUXYfO<|52TsN{KF*n9gcHSUsmoQv|~GL0TgFly+N0)cJFT zppv2Q~E$t*CyB-UhvaA_i&-Xdo)6DEm{H2F1M{fM%ba6n}P6qOtwqhRC9YVwJ`HB zhvxEH31L4pks|D}4~l^U)Avks^3{aXMDXECl+u`27$ zU08*-8t}dMJOC@Sk+pRnKisIk`|ErIw@N;`TJ-Mi%-$WmjQA=e))ULmdr`dV`#at^ zDsRi1SmDmU7Q?XMce=j#1%s=(vs?@B=^lxRY@ATe(H-2MFE)I+a z$Tqm^mbZkMeEu{LHmMpD8yllK;p&g$?|K_``OEr#E=vw_=t zXV#lGFnQKtOe6FP|Mh;l+ePBzjsxV1g!4{0qx4;9qmJ_dI}Z<~&=fzXJ+%A=rs=}i zQAYCoWZc1@n>MB#-e{#1)5Yp;Opf&g!gdD5O}fTse1b%#78yy|C~xkhVe&11#_rU{ zkeTFd^WDz8VU*daKop^9Q~jO%q8opu-+jcJS2*+OF&+*J0Uy@!@Q4>CDfZr1O_>U( zpXY4*KYS>KZ9OjVk~cBH$WDiH4P5v8@EZHiXB_aRFymN+dwSgC(&p2Cn!N?nYWNB; zs-!&(M*6+H9cclaPl&Z#+1TamT8z1q2g7wv_flH8&vgCgeNT}g2OFx{q_GITj09al z%XzMED?vIrai)ecvWkkbt@)?kbXq?>91q~Hi`xG*p7jSu{zI#~MTeXCEU2K@m0kg) zq*?uVr$j>dYH=Jwy>>hP9B9BOHH%bn7c=KNFA|KEy%LxtrNXdoxzm0ZAe(} zw{YA=QV-wA*pn=Pw^A1Epze?{U?P2z7H;2dJ=t~xkwSZ!Jx0;w>T_|%+totXi&(PV zC9E{-eQyRR+^oId;7~INN?sGae$%m_YYIlNunXZvmclNwO_dKu25lRDf zYBzyev7*~i+r5YzKVA^1*;rHFuHd6Vi?QJo90;tT_njMUVrxB(vWx^RSS7?xH3Y*| z2-m`Z(e8RZ-su&kY~Q9I!szg(-M#?+6C3A^vpZO?k`rv5p(TPU@-E~Wd+XTiaYQ*s z-B9Gf;{*lpCOVF&&DO_SX4Wc<#BV_74XvY2P>U%=9k|)`!$XDT1q6&w0@2(D`44ghO&gc z+4OUa`VIo=H=MY@;6EUyH*5C=Jw4BkmSSWiB(}~^=q+Uy^j$3hpflNR56#q+syZcc3>(ewfS&z$e zx9(jQbxt?gH1U&JZIh0N`P-Z4Ap%v@%}pD|l`Z0bNo;bO-E}UM_Vv zsCFx}BkGT`h&5UB@~Oz9Eic z8rb}2&pc#n2$ce#dfL_(m24EiA!ccb1W~tm@t=u#ZLNdmJHKAevdrI36m@e zTZZB`V|5G}Ay1?Ahk{mvYsZ1S5>3{Oa3#8mW>R7)M}twxgOy14(mOfl(?<){=ov+d(~-s=^!ux8(4W1G2YIrX*-mE-Ob$ynpZ2T$Ptl0^$E z4(FwNEs*i+3%>E`&E zttE}c3!Oozd!BZ_+4IZ3zubMbY`6X}t}69-cSo$NV$f9pA^!+D%0!yeJm`tCrgB*95UM&+q8M4?=|M|`Mb$Tx(9(>KNY1x z+cE4!M@`Y-Yvf9vChu|)On}n)2xqT1-=-E{WqUJ0S-zf*b%36d(jXC3AvlU3mUHLv zN`HmVWRLt6rSp%_ZTODF&_oZ~SX+?_@6GQK9^+b5*sQ9&I!Y^AgD$0f~s? zT!`9dzXSF9>*Hqw$(!31#CuJg(y|h^h~u=DqPHR~U$Hv*?>tVnb3QIXwuY9ApUB-| zp)RL;x6JJGM66PcHo7iPuoh3>q**YhzG8{et)ywaD^S;WS1hL(P=70<6pM{>CU!50 zLN~gznQQ^^2;F_3SQQEq^Oo>_gNp0SY-*1s{Ax>`3^C)w$hrrbO|wv-R>9>y{bxzL zw>NNKOm{IM5U#YPe{?QZdyOM#R$((SP@B7fCp#DY*dzZ@RGSVpoD_O}e1a5Dw&Js2 zl*bM(9XpB~-H^m2$1z_E5)O`g44DM>Q}GaqSE*y{X7v#HJg4;je|5y-g5Pv@Zt_(>&5R{Cyb0EX)-0Z zHkl?-^bXBa)E+B$O9a&RIO7z)`Z`i|k*+gP_17|Z7pE0PO7xhd5fC+vg6)|`Db^%{ zqcZs=#+M*c()WO2G+qD$vl+s$h+TDuHRYi0Burw;>o9kAEIUG`p!i!Qi|ded_)UQR z%&m|B&x{;V+i^p#eSZ@rqcp`mSe&4Ds7%26xW(9{CFOMMBVf`O5}zq^-XOfw!$MSV z8@iULnwJfVR&7$^-|LUlv(YV>|9ZYs zg-(BukblErvpsQ}{isP*-a@O0s!j!l3q!{$9wtpzhoBZsdG-shJk-bSS64A}o1Gb< z@6{Op4zVH%p{eyf&p+Lb+t$NPq;PTQCq}vqN8x^mAa)pSKn)0SUu101w;CGTwCBDY z)}}V}j&j1Mx4AWLJtmjzX|M1axVh>sGtba7JdBDelH^i?i#J6KS>cBhZOp5G4~KwU z1vd?6*&;blFvU9)?-0mGB|F6VU>nR$&H2*Y?0p8n`XeL4D-?X}Rd)h^hW*md4ha^W zn~ZCw6;T(LU>Y^ZkwTWiy-FQow{(d?M0*8c5ed0|*2L;>k9e8w9~~Sw|ITgzoFQw0 za(yUDDZr3=Yw?)=4McacAPpB%&mC<{=zV!YOpJ@Wv>=z@NH^J*66J*#v&66(BrU{I zVfYK)(Y~3zzNxC6`a7MSoxSq0BW+-$90AydHY3uVxcBI_OTY9{nDnBDM_Lv%KkOuy z1hiyPaKvfpLVu<(!;U7v9tgUqRN$0?agyGC06Xm1y|PWW@EkQzZt<49+{fF}^*=pf z0xsx9WvX**JxgOLdzq{qgZ@;46cIc{irah2 zWIMC*GW_HkZwe#X%8KX5st4e`Zhia<`^f@xp(ePvVnH&onXyqT&7Vj67zc;@@N7KL zyB6FGzm8($G0?!S{1dGAMc(s75(s-4%M%o2qX}w&=i|$%vk_!}QQnpvoV&N6L=oaH zunw)wpM>TYmj68>Ode5p42ppmU01mMAMVxN-`p#=o~UO?NS>c+0j$ubs9uQT4}|4X zL`yh*6#NfRhGa+h-$TXz6Ug(wj`N$m7MNCgm0YTxH&%WK{pake#w~Sj-Wmk+Tf}1ar!44t+BG4qso5up% zF;dOT^z=$5pp5l8Ryvq@sC4iM*T6wm7EoEe!hh^t-81*jD~72%FfdBK<*5{oecpzA7Qh$SHGVG258;a zicz%r33>nzql$S&N%a4wg1=H8?K7x{!Cpm@&Kz7W)Y-i_nbKy$UkM-uo;3fg#o)TF z8=Ni_uO-|)S*(_`Jqa}oow=Q zK%F1bVK7Geu#3C4Gj`TNR?^4!+!Bx zEK83CY@k`D0i@{uc?5BMN}}Jx&3N^Wq9EVtidKz;f%4h)R{+QZ=p~j5wm-Mu03>!s zMrfI#2^*n`Dwsqt9sBYlkV_?rm!){Ek>`ls`QllmHNf#ud172~K-r3gsy{z3FB<_?<%O?Nh@RKEp21;d}uKdZ@BuZ#)?niAqy^KD*D9O#$+ zEr67fXdxe9w6UE*hI%5DiO2yndxwHB$@x#=6H%k-atw(w|Kh_+xKdExb~mB{{L)lk z5v%jbNuUD&c5Vye@A;t&u!o}}2wQx+Axu06NujDr88W31c{4LJVQQdYb6bE5C%rN!;@`cOJfPCtMgkJ3}r^_wj>&QA5GsnKVTs#TTHq-C~i1dpL+5_mcVVQ5mL@k7{=fG_xhB!@|{nh#p352~8& zu#~zSJ$G3ha8Y3z30qHEOk-|4%x!skU8mvD7^CN&wEWpA0o19YM#Ma_UePx|LP^OJ z&aTB@CB`FXR?(c#n8;kK5=*96qJXyQQq7%^G9$C=D4ti#S5o~>RK3XRO#3ORe5l6! zbi-erL731oF8+_d2h|RkMyhV1@5b)Vvc22LG~<#{84dqz29OXgEZZKR-G>Fr0RA`C zgznUx+#F1G%A-6Q98$uG?CSf>hOcL$jI?NCyV1B4ly1wU!AIx>eCW z^q9)km7KdR*k@wwSi;A3le&!wbK*_YL4?8V+DU^(l?)LHHtWBQYq)uO*Q@yT@KXR+ z?l~6B#a0$lrt+;m%DnjI-$|0rjMb^LLvI74hibXhA4eZDK)uJ{>W9p_zOR#pHfH!4 z3oEj9^y@D7C6_cDIMD!q71x9VCrRFvqY=cH^&vgMX7_h!RTVPCo`kB_#_g z{7yC+{7&pn<4QtfdGt=uER&6(>~G52cx@s4z0#p|Hky`Dd74<@W+u}pvE!y3I4fWC z>Lyqyjd4tB&N;`|FFH_zwzz<7%FY+E8aDd~JoCG@1IhA}gJW#>6 zH&;hOC5)W59NO@Rl`b5%cxyDy+2$dCmvEJhxm!f5XII+b)w)x2<07VSBtarBZ231tE(GgG?i+jyM*B1I#0+u9tn=^|4LCdT!_q z9WIwKJC1qU$#Maw;geJvC*C|UEa!U|s64EY+~d-pKUZDlCR4s?R-}x@JJ_*w8q3e9 zNVCX+>r;98LT~k+Xl~Lty5fNrpV0)$XqRiqax)~&On2t7L7gYRkNCzfo|iF4r@W`A zyilidzH%-7S*X=jF^gWQ&16i-!_E}kZJa9fJ&a?}hTrQ2qbzeXcN{2nF$m90!-77K z$XGpolnp(6m<{O5o^j>O8LbiPS*9v?-SA;EbZw9^)l)PP^q)3OYdln6PB~ z^~3v}VV8P>?y|Bkj#5s=LsSs$>i$%Ms%-hwf+}EmSI&{(hYwcFG2w@FZ&%EcB;>^A zNZD}fn)I1c_N+5W>U^!O1t!@c*@RF9Mtyly<#{X|RxTYIiY~yhODN25NyQWt;exz; z`EY6k=n}1H4BMGP`lPBsUTT6PNg;_zFe0Pf0Y&`&tUYh^H!C1 zLOGSgosjceM@wz#=RU)7>0p^Pgg}m3QokJ6gZQP0s|4Ti<(6&4%7D7w-j?Q{S@AQ!9hQ z?*+v^zkjb$cS%kp_GQ)<#`=nb6Fp)XLx}N;V=9ID6^^>MyP8`vVN6}P%Ta0fbJuKfxDgXNVGUNpW@li&`Z5@sEe@n7M0d{e$vx!zF>@Nq^6QHHoVR>s;EeMcU^JwI_GId&a@U; zU3903<~~zY{yXfmMZpGBJvz|+-H%#|Kf5i`Z^_Y%@mOJcM=#gtcvR0o-N ztT4sOHO$-=!{gc<7^o6LEPzu35)!dkGRk$HCb-w_ytH{@5x?~5^)|(H1@+<(uC0uQ zszb>HxZo#1PbhX!GlPDRGE1f@;tB8u`Y#ql4<@nB1Go@V;cXSIEV|2 z2yAg}G%|nw03=#5GE$-eQsv&GH}?{aylr~ZD6Dzv@o+uC0gS=UP&8h7HTWn|{#V&w z|NOb`w5*_AB;v?{azdSMfYV-f(mugrF(!XSCI0n_o1~>;*iTDkZ(liRHtI>scA0zo&FQUZowaekjxuu9jFna>$V3dc)rA zBkqQFQes9Y2s4(`%tb9Y&5%VbHz~3!up{N??@V@W!B}kTz51CW^J4ae1*emDcL$&h zWs$i+mKq7EOpj3k#=xyZ8MCw9=i-7on#QBF3Gr%phTQb*8A|sL*Gn{4Vv&LMCiHL< z4xLsUBaUQ3d=op3w)zD1&FukL)fT^YRjRF*fxDz)-3JS!+z)Rl_xTp7?_Ep0x%q&% z!5kAQFoxWw^Tbp`z=pZAa%t!mP40VikVwr#=qcOpL(XIjz8;PNQcuvkVXQz0~ZwIBl zxprC@-dt7Nn=G3s(9KiQl+qqxZfr+bV3q9kp4!s?kkz@y*o!RxOP_y2=nKu-3Bk>> z;NV6S2_c7RgV&$p5TJ&y8FGjS3qvIUO1gg0rZ2+o%M;1~q`1b-j(*t2nkG;=J%xF6 zn5B??e7wHpx-+V;x|*q+5)+kvRNkSB$<6BJ=v>mt_Av?e=IZ8!&ZN4#F@k1SXp#Q9CIV)wyQ(i*NCd~{ zJ$R-YGx=Lx9#c^&t7Lq9cy(#`?eS+ z_xFnaWf)Un+phCumi?b2Ul@txwAJz$1^YdGJ|#L@N`jcD%qr`|@6;qHIHjrMD}o236o zS}!RM9Ve{qdNzknQbXszdYkRASA-OS^IzMNKDNd5Rf?h99WGnZu$j8v^;sX~A`B~By`u14I4|IKlk7%$bJ zLol2<%yXScN5)cB1q*KfMe zXR0-&Q`q9%1zbHCx)9LLQv1SUJN|f-yh-bKFFLX?_Mn!OTKT|Lo3jHqW~oHjN{XwQ zL*eihn#)rB^L-5qXP)FBl>fNa&+B;N+$AzzEuc>|x1W=EG|%j4xb$V5;c@%^O%?@p zCh7Fi#9;lHF+%ak+_+uT;&VG~;z84R5a9l*Sd0mbo5@Kn+_QC#vd)uP?6qMa^fhd- z>e<`uA)NGpL6GV{n7JIUds=vl-CCX3E(?_MXX)6-^@7Xo8Cb3PAL{ecn9AoNlHch% zt-1nc@y}|9u7)1yaaZF|(+}eIcp;g(%Vm`EwQuDkGwGiOvd_)8Jytm)3`2m%>4Wso zJuanOqa!2d^X#wxO}!$coF(P@ci6{GAbUv&(v+!J{FlK(mrR;1o*H{_s*blI^Udz3 zL)T#MzV4$LN2h=9S*01S1aqAdDe}3|G~sm(_y|V5oZa#Bf@UqZBg$v_hmWpzC-jp~ zrPKG#wt$yM)qLD)St9H3iP=v4gU(K`G5(_Bp z!+)iPl_FF_>mbV;g3z)UQ*Wq*x~Ko~ki`^D@>tOqi%v%<%}x8*{QGXbY;i+S?#Njl zuAWfXFAEOVJ)&D~PnuANpL9zR3zRS7N27^<)DQF++TROsRNZpvI-2GanuAzzp-Ye_ zvPK-EExtXT%N^S%Q38Qo3PkAD>EHaCME)z%h;94RRj6AJ2?%QB5lYY5=lJ?ZNVSrx zL~U3PoUC`mA;uSPPWo?sV5oP9z83a?YTqFgfAqKkWGI=NKSSZC%sv-|WIdZ%iHRY` z+-ocr==q8oR*A%M;d?Z)S`KuXIBvBA8mbr8 zr*DjuI4YTBzWOfp?4es@-2hbqqBu`11&42zj;<4Vz)}l^zVfE$$H#J zv0|3T?7udU5OYLL*MNxyZwL83ZlEQSurCIQqmv_5-+A~kNn(W^c{lnMK$g0CUD@;d z+(e6V1_d7@Z<{r&Q0W|<$tk;Ha*#NWK0?-mjaX7kmrld~2PO)q`_;hD{${DL z6>DOy=2nC0g~g!a=Cb?h!4Ori*k+juT(~`&JTPd-&Lq&`#MJWLeVX$duL~1hZt)U^ z%CKYH-`h1xNn3=ohCzeOQ1=B_-3pHR38-}-%eqA@trH4tln{KTfsKL=Rtp;zgV;!p zWJ`24(hXCg7#VVM=!Q8y|lK|RyVI9E?;u|@Au(d3=G&T&e|`aAor)lsSUaU z6o!$vHy9|P9UdZZu0+Ux_n`UOxm>mUr|!mnEvGd18L5s{%G+Nfg`IgNeFOo$42-*d3yciH_>C7zsyma|UOxn|C==AsG z%ydn_A>M(Latl)HZX9jbqsl!BL~xzw*42 zIELJBleREp15Uy?FGN#4UM-Er^>e0AH+Jjut}|8BLzaqu_!%+K9C$E>5B$&1ef#Mh zW%`?&R9T)WrWe=&dC|W*-rN}U>?qDBZ!6WN_x63;j_Enx(7`l zoI*MQ!%$BByy^|c=U9{Oc33M?K(8S7n)e=7#YxjjmB&q%Ac+P?O#N)i19gOS;dJ>v z7~Q|#E($)tn|#7&x;!#U?yZf00Nchc(4OR_E+*N_t9(L-$0Po4n?yT?J+8kBtRkUf4Xn}OQxcb=ccz4bL!R#lsS91N5d zf7W9CGYSW=3@-60QqZijZ0X~cI2WrH(<0b<=39cu=*THx*~y#==l_@k9;z5NN{lpBDx9718bZL9NE*ci)IwrXlDE4rZ>xV!vcD(iv- zIakK6{8@TetJnlNy)KCcERDHjhbqn#)8v<(W@NwwbQ8WZ@myIt$}fm~uA0tgY4eQh zhYK!FIr+)L*j7`_1PhqH|LM)O(C8&j&8pe{%qi6aJ;^MN! z*Xazq;C4wzbf6;IfC(qA!rhnjD(LQTAMFDF5#QIs_KC938L^vHz%i5VKtny?b!2Q# zR%oXHh-4se5k$8Q?ZSn{naZv5;p8Ye9G{3Zau7a#)(B~TGU!K{E|M=vsp;!_^uM1J z07X{3>>etvaI7WTdMlCN($1>9f+#$VsGquJAuFj}Y`h0)QK?l;nXraauu-6#LJOf8%izIW|S==4nH^FV^o!ks0NEMwSM|R06kpY3ls0^Fr6wgD8DHS^c%79T4LXz%evc5 z5pWbJC)(HY;@a!7D&FR?rnl#n6e=CWgsJG|2Z`4 zn#@CQ(AZz1+cLvtOWO)s2aOBYrKEVi@vii#=xKx?+|;m8J7?r)*{k|&E3gyD=gpiJ zUJ!gaPkgV@yFC$*kr~%}HQY(F88 zOZj?Ulv=g`#sGsIOJ*i{ygZH52Nx?fJKO1-+Q)Jxt_PBA8Z}ux?yOssxk+V9vlCC+ z!BQIo=#XQ!<~CDPJ2{AAba~>}YY^I1_Fs^ahhkJ$iT}bj9Q28?`3ah%uhaNwIsSwi zv1w5W*XZxQ?`Aq^p%FOxrB1FZW`Pzx;j$IC$_rqUWf!hLoc*}36vM9+vzIMg00gM! zU%q2%6H9uSNXqLcaBVU3U}P7$%y$q)qeC!+N$FB&`D$pVu!`=9EPLoQzSTu?!^-4G zpPKAKC{5X6zn*3g6t}I?))ArSm>cBG5FihscX*}wlgkkHs^^vo5EUy-_1bS}z!#=8 zup}zw;Msp=ORGf7D5*UYFuLd#ekEJC=aETJXw3&dca_jL?4z#2pQ(og=UXux*T2w5 z!U7=F<>X~`i+q&WO$QC+EOwtTYiszlR*ih)gCEP8*=XD%xbUEo3n)!%i4{oN5=0cc ztB+-z$(55fS0Cf7idh)eC>0 zi_I~UZ#$Db3K((n>GcPhZcIXkUv^4#KXISOr{DGYkiFL(EQO%p*>LtnBJ?)>*t-qP zK2iMXXW#l;9V+hk+I#xT4wKfE?Gu9qciqRF%&@-TPdSH%;LGu|!gKALiFSd(Xx-aq zCVL)(zJ_(EvxbnnL=+F!M4_5AD749r!f`%&_6=%VC09~MYvN^`v!#tuQ;M$JS%M|( z^(7Sbf7-PHVZUT>JJiYj?<~N>Ya_~H^~+TViEije(iX?Qo#QWbaJ_b|u_L1=Aio3Z z5V5<10)Jdw-0U#{xw+e@Db`o#eG8xabKlEAnVSR9ztvM#_<;}O`)XKgTK)%8@0$x! zXvMKVt6i5`E;E9W2&2I#+*(=>gq{?Z}a1hx`9@xtJ-GDT56Zy zYMO1JQW6H1>YUJKTPTN|ckbi0ZQwSEB$ZnlysC~g;p!QNyZAa<+!hPL_naF7jH%yltQZr(qePjIBbeFcbmIW1GdmqTj+;!7E+UQd8MxFH(&-*I<$%Kea~J

yoE)hW67BLrI>4B95VU{M#@pmn0ZClwJO6F$KBFp2St>Z0_wi?l zxqJir!qeRq301LkAi_+UshxgAZ(=^U7UNce0qJ*x-f?+G1|`)LyiJ1l_yQ7hVf#m! z-)gC z4fcY!1igk)jpwZQ(C;FttmTYD+pu511LgY)jBu33{qN`Y#{dB(G(x$x#w%o{b0cgY z0mD$zkSCwK-ecF%efgUb4vtyfb0#4n79;mon*L%rriFd>dWJeC2KM@V42@)|O%DD- z8?8m!B(i(e@R*41alF;fTA`fj8P~ikd%;B!(9J+s*F%qnTfT ztvhdN2)PWzOb4LMh>I2m2gpK0m>aHDW7JSA72#ihef@!Q@5#Q_B-LL~ImG2rI#0^V z%w(ggNXFkwXNF_{ScIsMGt8ZnwXTk|=EyRnz8~_3x&3Po z&@Zy(*0nkRqmoE(Q%<<2+ru{DzJ)d8n#OAKAao^}JP*gv8}GtjZ(Ugx44$q3s(HWi z1rVRS&|Jqx28fJ|lH7D3dg^9i6l5-}qZd}@HUc9^DD%zu?V|b5c2*o5tvP1h2)Ys%Donh3K#m()ykx;ytvr2^ruq2d`4W?U<$AaKq z^U-|VcS5W>f(NifX^l=NB{`?9`F+=NX+AS;X6{x~m)sPWWM&rDtyCVvX}&(t9mT;) z)h8bkl)x5ZrCa6|Gpa=YU1#S)kgcANtt=`dU9a_#TB1$}h60Pwtm@-bZ;~oSTohgo zE@gr;Xo)GB62DSDpPg=-M%o{B9v2*D;&c z`_l``SE^AyFPGY-G=~MaeCh;EIjpohT@OfYdY&$n4VSUEZr&a2XaTl{XM`<;xGzFa z!`{3edxIX7Mn@*N)5YpPf=b}qT?zJDa2A*03%IIq&1$^A%IDft#t|cT8K=ePE05`E zq608ku$hKz7>(EnqUo_hRfk3hvzr*#ilQ_^Y-KZx2lFE|U_|shNG6oCroVQo%b>7C z=0!tRPJo@fU`5R=Hb#x;wm~$l(JhbPEBs&6X^N%fhhSt>GpT%tpTR*YtGs?LY{dt9k)#cWSH#QdxXukz(a#Fgwy$pWp_Fbpftd*eGhT zd7Rwx89FJux80^a!w(Hcns3cWzz3*+`ynopr_QJ3otHZS6gkGsq|*Cv4GH!iHMRVa z_47!s6z;T^Cdtmldjkg`(V#UH7{?n*s=J5v8*;$w<9h8Iq%td4B%uKcR{_$M9a_g#|H}IN|6CDfE)v>W+#6x&-810 zF=xl1Hs-9cVCh()D)Qbg01!zDUpcNBX5@7kTdqc?-u-&-+?_C09UbpM2mx!ExUa## z>MoAy&!%rRw4fonC6Q>4}VtLZc6pP_oM~H zW2WwtA4i_()uUP~5l;~C$tK6!GP5ET8>cDvTbi0#fB*r$Xd>aIz#m`P2tFQIOV%Wp z?e3`%T9-p=Kz8DQk$<-*1YYNF7%>O+q>ENgW`8hj%3X2ZFwR`?4hR^inMwD<#^KjFPS1Gei28+90EBRTxX}`}MUz%zh+wZ3=4t-sZKC+y>Z|c$ zuc>a=&GLsVi&F>Kx;)qteJVLp41K?XeY=V{QrRPaeapXn!SHS1ow^)!H41%6yxQ`B zJ|i}NQ5hM12xYyJ=`5FuVk4b}OI!W0rlIM#b>ea{?ypD0)Rilk=`|I+m~}~M_LZ%# z+FkrQ=3iQNUzzgz4zKq~J_~epDVG!pY{o;l*cb><7S*sg_%$-Q=-bA&)LSR~jLy;{ z8)2D-4bjX>ZSHFYcb%Tw|HNl-(d*!m*z>+Ss4Lkq_4C(`!vh$27=HEF$;n>p_A z8>h^JOK?b^(WkFt(&FUxzr7gXMaO_**Ypg>E-1nG5y@}>O3W$bTjz3xh~_y}wbU|L zz(DLsF<34>9J^k#CizHJeN~+A^dRznVcGgEXWM+ZanQUi9w;h}7PLLld zx6bTiNc;7bvJiOKLjg2td`ZG|_KW1^ruu;}yrE`GkF3L##8rjae^^#b){&iT`@1|M5;+SGzeWC<7I-9aXN9sXmypm9hOc5gEyr z_)}e`hejeqXxM^RWxGqnnK^Yt*`I~(doQY)PfOS!n>@WSr)uflMJ|26ndv?>wK7`8 ztJiOSn~(j`eB#QOGR@73FE>oFB%4JWh@gg{v_P*Wqoq@!Lya!0KDOODEg)3&% z&<@d~a>|gX!sVk)wXKf6jgWVXlGYe$hFpIOUHwy{6c3boB-Rs~3jd)Wh?H+jsL>Qn z$A=tzXwpoIW?H0|SyKkG!x2C{!-=HeZOe?n@?4}T4tGQGWDN#7ky6xtfT4`9o`oX@ zh#Z%nv}37RU;MQHIv1_MU8!u39-?W^gCuB3fidP@sI*{x8`)ooD=}0tU`->Vj;= zg`11pY%wvCcYh>RK>J&X(I{l2w@zr1!gUA*26|*xFvOh^rQh8Aw$*WaR6eQ0gnmY; zQf5{od2uP-nw}^X(8vb8BH7YtEwOQ#K_n1lvCJ|{gUM&!HqF^i;sogmoGUzW?`>X# z63k_Zs!n(t%!ne{{4nbDI^^JZbvLWU>#Fc(%KdPFf$7JgZx6WV)%VN5P_y|NS&M2q zN(~h!u^lX@OgMN^Y?*(Qr#E)x+1PMv4g8VY#2^|9%GqQti<&Ha-y)oH2zBQ?l?swL z^))CSE1w-2;4r$%Vuq(s)n>b9PWtpKMhW;>sJLdZ&{9$IYi!*Cr*4p|25r!Wl{#}KMaGn+Nf^j#I{?>@?=EDiKEc^_xZt{pco&Ks%|H?m*YK2NkB z^Sg572oTx+Vxxebt6VvO3cLzA!&TD*o+>i(<2OtC+$DCUn!h9)$(rdex8P0}xe?sK1_Yz>Bz z+492Zi{z6qa44CPU1W?(I7SQU%_t|lJ#%+2f(+Fxs~BuIQG#R^s9Em_QMy=MjJmzu zJ{HQB?6h0UqAj6>egGn8u0QQ(prQ+Vp5pB$yfd$1cgDWPs}{)?__5?%GfvQ?#v7*I zg`Qu#mc_M&srMCP-_?N^w3>S#xDrhBQyA7}~XHA4e15=tQS!|fm@fHo@BFd~*Vl{4Z@pFhw6)N?o`JT&FrB1?-I zM*SDLo=T1OQq@VHZ!{}taqE+c2XX=M?gFD^15(!lo-T;x7yFeC1| z1fgqB8T;E$&Dunl`47B(2Ks>uW|uU#vt&*vqIZ2N{N%h;QC6UG5hl^BGb;)ys)hV5Ppa-YM!XmDgy7pPVc4MFk zqDH}h%Gd`-&kC3v1l@G4CDGA#dfnnw>hk5Hy`x8QFeMv}9AjdKKI{z{2Bl|0dY*I0 z^=&?!n?^pOJ}uNI;xTZvWq&}#yRM`83^uB! zF9R#T|G|ki0Ekr3rW35{?OgG%>4-TZ7bzvwE*A?$))L9La6CC^0HG@EhitNPD#X zb0p$=pV2Cki@BhIj>gq}58mC$%HeN(gUtJ7@zxhIt$jhBIBG=5?$328w0EaRM=P`9 zDG!?QeCr;|+tJY1NCSLb_a>owrQl}9%ejm@9cvL~L(3cgrUgFEK;4>{JS0ixW}>M~ z{oHS-{&M*GzTS7i`tWF2)AG0jbiU-XfhR`9UdRpyDZE>R0Y|FwEna z!*R2uy2D?0sv*z`P_T0>-gr6?*u}MPMoffLs}0@1+M}w(k3Sa&FF!jl zI$F;bUkNtR_#22#LZU?brwRSAuhEj6LH>KUZMqex53$g>|Hxd}H53Yo3e$+CK7Kfo zrBDA8Ua|2P9>$J$xa}j@hIFttmKS8}3Ph78zKq3nZhea1E*XngS8MLtDzoejDwpJ4 zjvx;|QnwTT@f;fFy*g3TFvqpO()I7BBU{k@F~)^7qwZ!aaKh&yMMFygL)N>jJ$}MIaU=rn_kYuf!X|Qgordy**3gLQYb)@gUu^~7wi!sEyBFEunFc6L zmOFiy@#gmd)xEu1*uVAmPoRq}c{idkSMt71s{fy^&4B#EAzBOfKdoM0b&R`LFpLl) zlCUeNBMJu(TY&#WC&7o1GD|ZMVKU}cn@hQ%i5Kap%jqkb|7zW7xz)q_Bw7O!%EQ5+J7Xy}5_Lc#f2IyNgMpZ*OR`Mc8A7d(qq8&)8?ep-rZGAjP zT@Yzl;w}ks?gs-Dnk;-eh{%V8z&pGOEP6^Vn@0zyuQ)3`%O|jPct5W6TFUcm!qzTE z^&kH2x8i9xu}8-KgS6Cw79;)mVk;TTY8lIfZ=@20Xee2}6hMhMxI*rVEv(0;7C#)j zfpr6Bq=^mwkq!rL!5Y z1;Qc3wjPv(waG*ODx<(3q`bJVTPR?&cSQ}f+@^?tL{6&WpOstb9l3i39PTs9h~Vih ziZc=B`VRD-hTV6yY?$aYoJ2t?A|FZH2ZS(fXZCsCdC*c%*EYWkr}*996FK}%%IW^s z+xM$fQDgCgZD_vwSH@jSxriaUrXw#AWgFKSq==)HFu@zF+hhG{uvY)i5a zpCP$`pqHcn`Bh<%RGgpD{p+ue7P8g)m=t8;s}@+X$aw~=xNKCI^cc*r{DF7c&QcUm?3IUTWtZXJ=-Wa99Msw7s(2T?&rbCTxe1`#3aiiK=jrG?50KiXYnB!|iG~UtfYN6_sVnm3UJw!o_#t>t+}Q zs*@V*+m>i=R`(4u=f7EC25N7sko>rb&`9XC#6|D?T5?05sD~92q`68T5|_s%)G=u> zAw@5!=;<{3Ni}^EnaEJ0e9o_*{ZB|k8dj~^+o7}Lhrf2p+XtwAheB$8ui8(z52~u zr3PDPI^aIII5icLnTP%A8%__6lT98yw|r(=WWe+kDKlnv?7nSfHr!bD?`4Yn)4q-V zyQOf5P?J12AD{jcko3S;M47TLBug62zP@B&lusmh`1#1BOKJ$~2P3#H1z)F)FJQ4j zBwnnmR6>nquv&?d2;Vv;Wmr)~FYxkAZ5&LdoD{FoUNwx^KOxgOU4-9S>|imd&0v(A z)@;%*P|Cq$X0DgG@y}nN^@7h`J2dYbv9xVUlMuUPsxIR#j!XH_JsGVR@_VcCQ~2qJ zE%t{w#8PKHnR4k;UoiiNnTW|(?7V#{bqAG3QKNXxGg{yq~a2F}`(}ivy zLwV*$vq>pub!w$+R(X8LoB&*IFOUfYp)jvTSHWR5 z(r_&;<6Sj4tOqFrHGa`6dP1N z5St&Boa@HjRV<*NugytvOliXMQ$p*h?oK0I=7+Z$ErgtL^&Z&^bv=>(c>E7rr!j zk8lY&8X36Z@F_ua2xUB|0j3aVl@tIHkq!1{r-(_x_e8Oh{1l_yrY!(4f@{M9!Yb$* zSEnIZN?N0`Uf!$8hY(4Zhc~;&Gl*;j%8!Fl=;z6^R3Y%<$bw^$x|IvaZriZ3v^&Df zq7?v`)USWQ#h-#V$)osQ&5LF)VduyOs@#5vqxCyQ=1v^_F70il)j!S<1$o5ZF{IJ zrn|P46ui+H4~&1m^Z2NLQI6{dWs2?FMhazhMRGLgH>1`Y4Ce1v7uU+<=)Ru%r;^=4 zgW6pWZPa0SFeB0wLnASP_H8}3Pab@oQvSj`m**@k-dZCg6-KU@k_0jH#`dI5R?ail zl8y);OR@tn!e9Ti^8IyRi%1-zRq{36*UEjk;2{?jyqR7@$+W%MI;}==K+#st@>HWZ zq6|=Io?xkK?mOL)txGA07Bb9}B)_chdM)W+{#_iHDm4T}#8eaUM-$R@4Vdj}nHs_y z9v6>1o8Tj484&u1AS-;}YJz?m>Vo>DTBfB`aw*!Gd3s8@QVfqyfX z&)!Bt=c#+0WH4HwS=Q)?E9oe)`ASEnLz4vEnsv~6EJDEx+A!GY-Z@H63QU|C{xd;` zOKxz|T-Sg&%aG-Z=)!KP$}uYyvn#Rlfg$}5U6V5lKv^|?iupB3N!F}NI$5l6E+gbi zw2E`c6a%q@B$I|grsJ~?xcENu^B-|`+1#(Zu8~QjI<0jqVwBtBno5#5zoS4y!-OG= z6zvtxC{*qX?_Q~P!eil?69@1pxhSo;1pjp&ouU2uWOOmllA>kF zrbWooun9o1;dcZKASF~Wj6KbDq-+r4La&_lL+GK|o!cy@^8swM(V{Atv`rKdh z{w&u>prjjM4Da?{KZp{O^;O~$k=m^-vOgwRj(21lS8|*Su3a%p!^^0(T*R}bnvr4w zY=F~SF)+Da(HNrE)>b3Jxn|hpyTMH)y@{QUpV~l#)_CSW!lt9;rGbTsaYwaVtSU6_etpTyjFp0@uX- zZt38Fq)kZWM@>T$96Psbys#;ADLgtGm&-f6nxe>L>HLBeGcO~r91F}Ln4udX(tv%d zPdX&b#f32o34RUKte`SJlNE}tmmIrommB_DrR%nphDM4=pGiUORTuFvgH}`0wRrpt z(xAwsd_RMYLrlkUNQH;`!aOG{0w8535p5f8Qidy7cR6a;Y7l#+-hJ6bqK{VMjg}gM z_Ap9*cTg`AB$e~*T;4{mWebJ0V+U5NYM-8t1$Q+BJZPX7XuwDrE8xs6aG0>rK>KUZ zfe|=k#~XFVJM5d>v&Ci$GmYP~aVdK_HWSYrSdnyy+wv0Bu=UA|x@8dMyd-RE_*sr3 za6}WbY9S!zhdNWj5K6))W4emTe#!EXO6v(ISq|%$%o~>uiT|AiAeBqE>l~T!ErvA{239O1Z}EEf5fva+v9xj3Z|JXc&?3=2qofQ>+@i2m#FlHGIeaX-P2 z`GwQw;0HowOZD3KajT|KS3kZs@S>UYYBv`h+}dXaO2U{TEb799tOGNwd5Q$a3ZV;> z{2vAOL6AiIxY}_psA6JvSD2Yjmyp>S;Hx?LCIcjbB2C<+ke%ry0^Y0vh z7Cz>oLred84HCDpw(@q!A>KUUiDnIWqtYUmFm56jnzMT}YG*$d$-k3P2i|_76#II_ zWm*(EUl}2d8mT%*idJscUn7n3Rd@exBN#D|%L|a#v{(wk#-p=vyXGdcaKGlJv#Gz) zL_h#FM1ZWB_JAaf7`&qFM#` zG7}rydXEpP6eqWLG%|)8uKDYi8L=odi)3kO^e7Pwi^)xV27L5wCEVwpzRiI94|eud zsS8CVsnUU_Sk%cF9G_fe_wKKOFgSsT!5DpNh?vxrh#P0yRLQ}cW-c9l`*5xJe+)ME z_xDsy>)-7tWH48GliLQ*-94!|A>NIbu2hnd#FXbAQ5t}NJym6C8OcF8OH+@aU%&aX zI#c0OrS?Q-q{sPE$9fqc6BHaj4yaY-s(T0Ms%1^AQ!@5zyL)N=7QiY5Y7ID1$r!!! zpW6H^%dUZ0>5kJDJV$XerpcM$q(Gng$JJ$)o06LqV<-??k4dlFM9u6ns1E!w^p;(~c2k+pRQmc(q+?oRV4LVAl z@}yO~=Th-0C4-SfH*1^TiEAWF=t}?b)wutS zd<*%YGlHWiJajMg8G)sUz{;Y1^xk)W1&pr+?OSc$1G}i~+s{_J|9M=jte7@x(b>QR zBw)!BRx~!;;cb}SrB^wY4A?X|-)V+{iPrJ{P}E!GX=*h~`EtAbjzwLZ*LC;5p^%`I z3?(2mp_XQ$!cmo;SBnk~pMUZ$9sUirJm$**Xqs}$d5pk_KV-#EKz^UGVxoBb5K?-h#>3!egq7O)?%>)A|P5xsSPB~;J8DJf2^x*k@1 zvF-{UE0Y`hkyaW^9xoP2KnRfwpJZ^gYOV`P#+l2g$mudQ&l`0CS__nLv`O@Pu~KRT zi<&NLPGI0AI2xt{xvo{1cDLw4V}MF=y0}X=Q1*!PX0dWZsyGlIl8F_P<`L!OVkn|2bi&U>&~hga$xM{&Z3*V&V<}}% zf*+7QHI_J^j>U{EShoCrpM*0OI%*U}a%zXflg9P71<&zrP6**rULOg&b zOlrped8!K+hBXf>A7As&qB7%07M=%rFxcFM=i-(E0YGm#T)v<6`QkG07lJ$9r_Mkl zi)K1Z2o`yvh-85}je2JUtI;M@IXjRBV8OsZzlceRm?I)BN#2-Q zc_WgI@+oxdW0*&!fa^BDo|cnks!8@o`&%y^Wu*0T*xv=s<{b z5iy1s5Egms)sZX@hfBly)LU2j%`3;zD^l|&zU=lzWAWBbh0Hv2m z!0;D!i&6?H$J(xYODH!}iJ6>6EJ0f#tdr1>8;T9NOQC@$#0?aHg)VrpIb`m@WRiXW z6DQ0vCy!VB3Ro?H^dZPmj}veyLY}`+57Rrj2-9?N?B(run;6%FK}h&?bc+NKN!VK@ z2$T{wjH;Qu#1oy%eo`|2v9 zbSg{x`c2o|rA$o~UUoR(EvN+astuK!2n>4JeBUC8lX4vvBml~W)l0$Hg!q@AGa%i)P@Qu4Q0Zkq;t3CaXu z9I*rzrN(CojL%b4TbMf-eK4269T=EEPOKtLCg6i=qOWb$<*5}{-;fKXlO8bl0B*}` z;3gbl=ame!<3$((GT2U35~E5@J@g!anS!YUU4l#k4`EEH`&_b8K9HcglM!?&Ur2ml zZ7{37;BeF?v;%Be5sJz&$jZtTQPULD6G~7)wK{+=uz(*3m^XnR_HexiqyR-D5;Dbb z9pX|ddcX;(=`6@jjDhJbW_STJ3)v9d7#w;p%#P=l=T@K=hx6od0j3f~9%3Gb5*0b- zGyn@}`^xzHqGWEBk7Ywb#`+F51aFw@$| zhmpW9F2M;@3PKIB5|Zc;D_5F@{_gLp(e1ahmF?7nxXDCirh%u$Usbc@_wM8K)RKup zE_zg~C9)+vJrzczZPEJbX>EU}z4SSEuZtl6W+Wgw=D$a9>>*w%-ovIYL~`o~nGP=5ZXb>^>v_26&;CsT`D+pvaNx#R7!|dtB)H(vL* z3W5J%Q({`dZUycjM%vv9_jz&Ic4cFHU!y(DR-^{GDqR_nVWDLr0MIZ$7XUEB9}=?S zw*06ss5}jsBX}imkst`c;v2B^Q#m#G1}ph~@mWdjTl+jvAt7GdWki1&)1vN%2H)rL z5tEMSj+^e-bRDR8BrVd(0({iX&D7IofzZP^Q)a8J_Rpi=P_Qa^fA^!-59 zd}Vq=Z{r!FDEz&>9KWN$h?_&9o99f{FiF@ZsN0;cd(+YtbNd!qJ^wz$|K4`p2o+Zl zdh9Loa>@CAc9;6YycrOPg+)ln<#Ui&+w-KV;}b}zlwL8qo&EQ^Ob(z$WYKGZ^nSy@$$ zD7gbjjNN}2UuA_vM^e{<|3v_4437$Yu_8;YG$paIAm)gwveT*%k?^9B+<0Gul!B04 z{4RqOyvl1wAZ*IMuR+euRcG8cLE_VW9sF8VxrzOL_UuvZ^lI5h_#O_hbtPuHO;IYZ zoe)GEsDObbJ_{73fJjnHgzKOIfT)y6H-Y5&6ak7O-dtH)yEnA;23$6?>JE7j40Qjl zmo2XxsV&$&AP}^viipdKXC!e+Ob5ZumMF^i`JqzrMUJyrHCv~EOXX0|UtG0wSez_a z+JJgj7=S zR{~VNEC2yRNK4l;aZ8_=66ORZ<3P0hft{0q!m6eR`=+MU(=$dB4r9N z1rQt1qv#H_$C5!ZrW+8YIGC;ci=!IfuVJ4PFB{_4rcI5#GB!p5E@DkVP(5nD#Yjp} zW+(UM%nFvDMA|GT4vxkwcc#kN*2lV3>4kkB!jHw+k%)Fkhew)uJ}TL8Ry=RhIXaI=}lIdc^?htZp2j2x-M(>Lx{afHL*d z@+}|P_a@WHD!Nyq+vD}t&H~$^NoK(1-I8I|>$Oj{rVU3#XnT_m1-RP&Y|3Wxz!a`G zRH0|m_j8fGYM%NdA^#q2>R8*`l3PA|cc4hQr3e1cy#Kr53m6iTfkmQG;;6m_zp1-511MpnExQj=f*lQmfhsB1!#1J!(6 z`kB*j^VVlQ8*d_oi?%E6+b?82`(E7@4JKoYF>hg|--f29oMPgXhUU%e_*1d+h@{<68YBT37j-vlvvbuEQWW{ljv*;wCKf6w~28}1ZeO-Wf2&Db~ z@Zyyj2k&6$_iq)P1#-|W)ae`7?C2CpGeK7g$%cL#J(~fyx8q+OSU=XI{+B^QXL4o7 zB0bJlzh6!Zm8;+1sY5S+EP}A?<@5qY=aUfYi@Z=a+PNA{uKu?sD$jGN#52$_oM z@R+aSi!-&zMec(6|4q&DMPP7k>kMW)17|NA<$3187z4lyl&b3!sL9pu0%$Ju@$p2=7_|hd}#K}?wfZwkg%|S8N1C9rcGyWUJf~mW-F;z8<6$> zrdzognL3=87soGZJ9EfyVb8@UtL{;t?}6Xz_va7qp104sOq2`iQbfHklykh!;TTBj zfpH@eM9Ip^LTS_>=MT4?H%EE<#MQy=EHf=Ft-!U(y{qdTBUO#b;MXWAu(!_W$g<*{ z2}$pzFxgJ;*pICBrocmG>tdGdNrJ(5afyi*EvLMz7MB@MLqK2j1krZdJiQ83|1>qH z#R{Ig$wt$A0^r}VLS!qSF7hvKQatQD@dIy9;4+=QgLQ0DY*^~3Fq}^EKeKiZ$kj8% zynyqDeht215l_DOE=syRnK~lYBDlBEgbP&~&DdS<+Gb{D7*^E2Ly=f06?=XEnf&SL z>_b5ZnBRq8kE0h(uQs$rVu-w$jqh&Pt$7}O5H-5#BQm)L^Nc&ibhGsQ!3Z8OAa~5! zWO~0R!F~M%QlXwm2*BiIHr~6b8Cbzwp>kNxJSbWoWq$>n2!SC1>Y#WHC#bTz+H44y zhRti(stW%Ve9NV;O!0&!?dTGUF);I zN{}_|(>eB!O1Wg{2JsGLLQ_CWS+&psTg;+Z88BC4zOZZ#3YU^HFv#Qm`uy*c*!9@h zh#g+~*8Kh^-S1)L4-Wngd4W6)s!0B>5AX`bvz@iS3H!s7!|Hv-KNFvjJt4?*d4U)f z78W9&0zof@if%Pvq5a}pqf1v;#iq)`v&SDfPyV!|o8Uipb6Kc%Y3m-iSp|v@K!77P z(lMn+rRVdm`9ElT;|d;S=7Ti{W_V`Ea0;jOjrxA0ewiXKWyaH(ynJ_l#D%@N{QZrR zmRmv&EuFr&Q$8oBha#rv`upf9Bjli4DpYcP*ejIyqXW&b7hwp%p`BHLqS18Ls-2RE1cWXCD7 zi}yA@|1CXJw_DHr`#?qn^mt9wETpzPViqQAjhBCF5Z#c3gLb-IRQLFgE7dyZwKnB;53u z!K{gG`?bqU$FsYkFKH8k6XRoyQw1V1dnqpU{%Pb8YfQ}X(0zk0bHa9z56}Hk|B)-0nd$(>=hfFYi4vNX%c)fOdEG++Tlmtq+M%EUFrAByj)7T0J;lO0#W_IrP zC8VOm+PWceb~vXhCZ8#>44Bxyz1pT>*R~!qs7MWaE$|~lzoNE+rx>uSyB0OfMpsN+ zTnvn70WZi^U3`#hx>hZU?yXgS_BCQXBH4Xb?dTR-RB3bWCMPtqt!ZG8U;@rM3)^;8 zmj4o}TQ$?DaJcz>u?l|1;%|hQhBMYMWtSJ&O%&}SpzKBnYfJE)dbAnL{WN!O82Y(ZSBzs?dEzMn z&~TOUf)(jC5|lF*A4X(y`*kz?{Ol#h<-5Ir+N_Tz}qX-2VSzp-caNVK{dd+5I_n@P_;$vn zU*Krd#m#S${U&Y3G2hlnkssEzU|C$B)SiK+&pw00B3ahMMQ(MzKZ+_{&)ot-D|Ppa z04kN2liiuCGCUq%C~h*Lg;?>Jeb`)KKR#d|W;@u2n2khwQm)TJ!%(K_1+wF z<~_tAc2y#QSsYy4x}$)7@ZOOPNlno9(_-o3-6Cxhi4s|1!va^URmE)`f=X-9VLbQk zOIJAi+Bt&oG?*V58Lau5WUd;%y#@aM7I-BOn->;Rxj73B71MH8M@q;bzX1OxVYTh= z##>kD1EB4k&x6lRY1hu`vbfllb7k}^re_4_?DtfVAv`t*S|%pvgXg^O!t0NN9Z^5N ze7WK+Ex-VV^nXJ-xZ>D-dWtbE8YzsccZ!zTeaS4w{Wtm$t>Zjn_6SIHQUEeyQQ?uH zj**Bz_$+Qx*`9iW(|9mV=xVf3<%%LEq8E=`76@LSm{=&V)4FP%I?j6d zFrZN^6sR38Pcv!A4F`@V2kl<(y~+A#a+?J5`p&Z2FA>ql6=?d3vpgrPalJQzKvU`E z-KvGC`0%2!tt?JX=bp--#nFd~8Z6O-RI!1+nfg`R25yI`;ngRpVhOJI_H!W+Dmt#~ z@%I@QmFvQRb>-Tf+qjUv>!q?W&Vl`zrt63SZXg5pfWkPq5U~Uf*FAGf#%`8;wq{0k+r zT~K9rwlSYCc&-NO^WdQ33CH!hDlbWXOQq}{ld7rRbzR=Eu`wJZYa3rK6|~m>w#cde z2kx=#{qMNPj_Ds6=;pO5UoRXBAR=>fbCKPd{@K+K)6zAC0|Mak4?;*EqTVN7L>>WS zc0KgJNar7@81u~L_+L;6R9*Q$sV9RLE`8siLbfoy#OtEk>wMvs z2T~}z!i4O$`_6R-oH$$0R6-Y7f>jPL<5x09&2TL=*7{wx)?QRq{A`w%;9S_Bc0Uhv z{F7{sav>5P9zIb#;~?k@LPOQ{8HJdO18^Sfodx$z+>` z$5^S#W`rewyg&JpRZK+5SYca_@?$7dzAFe{>QV_0Usa>87__A|U;KmXQ>0#XDwSY> z4=}=eZt7H_)XBMmL!q=Gy+)Ng;~uiHRk#0mennbR;Ul%S`*enTM2pPC3P|`C(k>t8 zPd(wQYP|tMnR7KmfpTQ?N~%)M!vKD5QaCK>S*;x%2dIj6tv}2*xb8BjtvC?Z*0ri= zPT5$2LXR@_DNW;zmV<-Ny0|K@{s5f4-SA0vN`kVK(n}LwkKcoDhycNYg3De%Muo>? zegblUi~B~)9WLQjdqVv-`4mtTuVP>rCHqQq50CAvD2!oURP6KTTF2SjLgoT%#%R$PKr1G&7K8E=%+$(z?9o4{z3M5-SEJ{tsbkjo$BjAUP%FfkOlIi?+7_ zI|Ss93#(Ru^BmdbsAPP1bJsDp?Rtw15hO5A=#-t**?hIKIyvWYF{H=duUmfuSO{0b zdtDG03@X-I&UvPOZcZo*1eZ=zn%)}@rAzB(W@W9eu2Q>PGZjyy%j9MAuy4n6Pn6=- z0j>ye7TpV3ZQll^FRuUP1*omucZ-dk+$qamxID)-sS0@gT?m~3+=sDAi0+tt4~?v< zs8xYdq2kq0I09j8`QNg8zzc(9zt(bs8O-9{iS2oX`ageCodN15bgsT>e!7e$Ff{u! zl~*7#DyCYcU*fI~4Qf<6Y+15ZqoATnrgNSz;<34CyEDBVNEZy^e{y%-HSBzC3;V&r zVXb~jf`phDpW9+kr)+I4`4sn$@A{?3?;FhyWL+&QPV1|cOHU^;p8US6Qx*zv(l`?I zKFR^qm4XB8z$xz!G754W{(Xbip|vMG3O46XUlqoq4`6dpWLO(;wSb3^@XT&oA_&Ix z$~z?RRL>UfV?_lZEc%P~>;0r3a)#n5Y$YqVRguE7norvuc$YQXN_VCb8R&*ergSB$ z_Ees+zT`LOocRg)scZ7_jUJ*4K1e|MV;-or_l-|Mn!_tOTEtW7TI&=yvE5)Y2o4>< z{WKCTuBZw=`LHEqp&ukJ;EnBK+w#u0a1f<(ty7DSx@ps`c&<$CN<-DvDK6{J`%~sR z({Ur5`r*Fg6G@@z4VX`QxWsQpMoV}@&%5OlL!~AmGcd^tEvUpLqeELsoRVCzvCb)TVmX?7I@O@N~1VC$sEyDZ9UTtb6b2q zW53X_EV#go0Rdg1{3w5wvggsSpQ$Wjcsx;(ayiX5^o;i;G)u83+&L;{AOrqIPZL4SXvrRik9rS7WMmgJ=22g$Qy?3OI^&~ zIG#ccSvAxe)j$Gk!6rk5#-pt=Mll-Yt4jpJyXyx|){J_8r;sJ&&bj6O=DNMDos5(i z=Z^h8LHQGEYsOP)Q*h|;?xE9RTL+3nGP8cSbI^>@#^(bgeG4-jBaHflOVihN=X-eE zMk~iO+(RqGbn#n3Sq?!0h8OE&IH(7D{}KM_AHTIvKECHNY+pw9b-rmeAK+v$1?A@S z^z6{D-j20Pr^Z9{4zA1I9dp}xTjK?oyC6WG?oVN^4a6UJHk@-t#)oqow`bTcD0A+H zP2DLaQ3KA!XgKmZn1Ji1xa#hBQE*>8?#hktEk3;+7KGHOku$KK1mFmESypiU=6HEI z(!Cli!u$as9>wD=QdIh9=`CJDfwk5~#mUYD&PGdtiV+CN)s4+N%BD59*+5jixgH3= zzv@x9yL?wQ(_l3f?(zE(SRq;$dt7y35@Qp_{6QJKIzDhu=DLDDoSQTxiaP>s7v_gx z2)AGe{u--XFD;alpPzI#*%}|ebr`Y7?R>?zteG>yuV5VACB5%@-{RVrbW8j(lu@kd zXlc9Y_O6HPJY>Go`FQY+n6KdzkIxc48)Ib>=H{vQog7KQ;3L$29$fm8@l3xdLx+0f zPQAK}9KPP*-w({kb?}}xpxne^YqBu3i<67wlL)_fGc_&pj6@_loIKNc;at@b?S+z) zaKKxU$}f7-)8wW5yuGoz=N+eU15SNdCw*1iw|m#CA&vuy*NN+i$FtkXj{Ww@w+Oc4 zN%Ybm$ntV?udm^DdCGg*#!_o*4fsFMLZP3Dn@HT1Z3~oF78T^>^Ro>H3!hb3Ga(@F zK63k0q1w={MeBXgG(_e?iP51`5~j(-WbN7%g@C-DyicEAelpVis{;CJ$ffPk#drUK z28BkKjqQ>j5tAtH`SW%)zv_*}RX)<53%kogHQYah)pq{3NO1Q2#Gd~CTWCDzp(7nO zCJXmlvZ>isBI4p%BlX_U>-X>3O^c4ZLV3lh85qivDbEx@!!!FPib+T*_oN`U?DuIYdL5y1^D*n-lBU5>~v#Xm3 z&A}nQtFu#1#!2~SXX=TF5DO6uMC$p*s{>JQo^Xw&c1-fi!*$2Q(936C-$E`a^s!Dm z$2h6RoPR$h`kZ&?=L;$-D(E#OH$m608Te$CEq8OSIYQoun= z`|X8FH3X6zi;JrLRrV&KY#9AF1H_2iyn;4m3I#ifNFs>&%H|~lDeG*#Hjf}@F3*k0 z&xRBn5G|H*ta>Qf2fAk3xZaK~6qq02U=hz|)F)oeTf9n2@nM)1v-LY@)Nd-8rHzcU zUUz6LVye|sqGDiR>uQyL4FgAC73Yvk2n*9PGgDFA)Ax6_wplZKHs=)Nb#{K1m66#J zf(h`Gdy29wBFqi*Yb@vIq+yk}H$N5<2E3=Fw5+L~UNx43&-r`slPu^m4Vu-fzS5v` zG2$3EVmN#)=usZ|W79oLZoDKRawcxq8IwcC5zBkn15Y_#LfkR@=JHq&`X1aM{5A~c zYiX3RmJHUnUooEs@Mc}Nj-rmCFE@9)Ri@g|7nPez&*_iZww2{p(NQt)$jQe?=Z4lhzpNS zR*^fCeL_Y|Q%tM%Q|D~NDoOGxJp8fekmeC$a>>+A^2_b0){dv5q-CiksT;f7A3bH7 zWStlYOt-d}W17Fp$hkO%C+EFhU_zFZl4-Oh!)6l~k!W;bz<~-V3tNg*_;A=r&$fQU z5tzHGvM?l^91J$6lDl-_jeyI#&-5?5M(t<5AkjPW<*_vjG*EML)c6i>arObm}e7p}{wOlULSMNF@yy zb*_a-DkEZ~*{>UuhNYKJP8idra#0B9z=Zy#NeuncDuLIFAZG97{rGTQQf*iVB0y>3 zznAd^PMu?Y>xbEB5@8{}w3LBzRcJ%7N@?{1McNMnmiw8j1=bjS=iEH;4*L<(1}r5*0T{1n*T*7m}Uxsljf63e#Hh8X~_UmZO zhFKccvv#c@BU76@#wsX1R+RLJnUaV|UUqJ*Uv>;&$?#5UQj_HstLG4I0Lbn5(#tL^UMEo=Sp@&d;i5=*PzhKO0L1z&lf zBK!6ZUfI#`N`2@1CL$u@@-ewFc`@`2jyKJ-SkdU@qQ=JLkMm{OkHBo*H+D4?*ZpJm ziSO;<_4)q7OcRmc*jQAehMKH(=0G?4r;45*9i4oboVXA&{@nKB8vYvUZOzK^>d#{m=Zoaw1~twQL>S_1;v^nR62_gx zRCh_CF0z#t^hd>{^z;^q*H!-80&Cu;*(amm(EPsYl<4|;r?@8Vks{Fbs@_iVb#?G9 z9R6DBkB94~%zmQW| zSUgLV-qZ8~jPFV$pCH0|dI!oE&?Gi#Z*{wyi}l5e7g3RsV2beyIQaW_e`b$pdx^HC z)ZBJ>JT*CLw=8F6*P3aP2{fx!%oV?Y00e3j8ykxTJHLTM0+tjJZeigUPW92Jwy`Xz zd;&h!K|+cq4!bl&ov!$@=WyhN{Lw?RBSxK2%uAPEhq}Ut3k}Z&W!y!J*||P zkpYsZ7mg7TtINhmIx2y63Fqmp2$UGds^*ebqI@40E%%UuRID*EFy2ITfyi>FnfQqn zk01mAYFv8`MMW)~?JWeiEv@z1sMLH@{Uf|Um@iJKZHD`YWqgXy=8z1oL_x3E?of&k?@5=!Dzv;7aNF(bUk;`Si*=vn^w~cDj;TSp8-CP!0}nxx;DqylR%;!tK(R^>Itjza& zMz6l?E+!tpQumw8Es=0KS(!F2KVp(St=Fwd=ZCPTkqM83oE^rv_RZ2`t54X4RO*M=*cH zhD0=8ywK8AEB0q%W@=ps;IL&kT5#V@;dXubg*{4mu0l8BzEl?GE+6!eC{58hSj#QS zb%>AHrFDE*_n*WGPnB`7uw6Wb(=#(k8l2@HLvM70;R+QTsgBMjN{1cK@1&%0kshF+ z?f$Lx(&S}UmQVEz3+e#6dSi4Vw<9I19&8+?vfSP2&LukZMd^y|d}^dDn-}KKTwU7F z1H`$!L1Pzqh>yRmF*@q3#Ji>@X==V$%+?viW{beGf}0*cX+aNp)5xeW=j+Oi&cg=i zfc%gZ|8A*! z{OEAy5b?L&O^-ayl8HKkTlnz6I!*F?r8WwNZ@~4ogQ_zX1bQ2e=mt}%R~qD+{)+k@ zkd&mFl4;kM(DqFYnWM>Nj@jeoqB8%bUua9cEG=E~!RdAk3xDyd)+=MHGrT`i3a5kV#yk$Ayh>ReSE0W8YZ_S05ig-&F-c_5Rn4uDKZOgS5P&{;BOikH6Pj zn&K~arxXh*E>*7a0D)ys4t(IGPD4U9lr!z7u{&E3^%5^FC(zVmL{&ejIRuY+^H?x= zJ3t*7BOu^OfVH*l&6xj9*W`>CA+uGhpS15C?i;WM=PI3@Gk+Ny-AhGAApon_uO8dQ zVyQo)hlm_Drg_E1+gubpk8c=0VH&6Xg9*B1WaSMEQ6eHEufXR?el4lyy_VS6+5pE> zyzyj05(v4x$i0xK7(=wEObY{~HBr$X;punqA~gl>se(=PwG5qvK**0*7- zQ{8*FzP3$D=?eiM_%`RW_y+_mA6|g}chR^8=`~0@E6W*Cyv(d(I}n6~9-%QjUKIVD z`vtiXLyyXFjpAXW`}F zhkQH^IC-if#_jKn?6g{`6cuZZF(q!%n7;EZC9C?j5K#|pp16mG?DZ|feiSws53J#7 zdphcS2K9c>bf zqxYtI`g$RFxe1^PMq+;))4skqX@~hS6m@icpn3iqszB?$_t|Di4}%oRdBScyKo`vc zm+11*@($;Pt5Z#*`C6L~tM^zuRoIfK?ckd2a1lg3yWcgOlgjDur(MWdxBe#{v*6*EPNF#t=8Op~=a8G{N z9Q9p<5<^-=Wp;r^ZztBg(6~@?;&+($rie4_yGWh%lJ;lM{P+C>kO}6ANJy*~RUSip zJ`1z9X1bC?6dAK%@>QD7?3YaX_36O=>fc3>8VRpX>6DW4g6@B4@CpLUPC(4S%8J(I z)rGQ>0{D}Fm9RTqmMP+1vK+>>cfPj(4~;xs%R;AAFv{xx+%R4_UbZgL=zNWnyS#gm zz>w9aQ8vY5!q4Mmg#xjjE)`KiqNU|FmZxYjq17(SIHm!IRF9ww=Fbv^kAE>^ajlK4 zhxi8Xl+adLBsYEk@EW7R+4|(d<$YAQ^2R+D(^#|ar%y0{Rt}HJc!N0|lb4rvbaWX& z@_%CQGT#+S#E3IC{CiEA9~({VmAk3qw-SYO{&}#s>L(>Rd0Ibm^DBwVu0x5Vygrg$ zJd8k4R(=;A?nT0Vr->94OS#r?ZTqcQ@Da~;+E5Nxm#w9$)1NaGE zvS1T6m)VsgQsb4CmPS#i6joJTR_33Yvdj)$<;CG;jJbq+_nsW^D+FwfDRoy(sEt&x zt}K*cpB*mq4ti_QUp^ywOKH=xCfmtkk!_EtrM)VV!qq<^JmBS}qr(jpDwkV<(T3}L zJE^r@oMXRem>zl9Us$|EEE3s72ohjr`>1`dMRVEIQL&QvO~_r(quGuJ+WMt;oQ2@( zexsi+*may~gyjJ9?65(jF6#F;y1FrBzc0iWYj9A8G6KqYK~XNd&C&;u;HYW-7|O{) z@9h(%0^{5n#t!JFYm_~{PoIyY25K-baBv|}S~^?RKW~UAl;zG=C;dDD2oA>Q9xKl{e*8Fza$QXm!^WQS_cukvV2gBlS5lV5?2=gG`Xi<|Wd``1LYa(gDn|m_ zacI;RcNbSV?S8iRVp~GL7HD$P)91$K%x8k1Clhc_E494PCnZI%GE!e}oD>ojot?K) za&}JIm#Uhc#*w!b?8icZgqtDJ5MEUC%MuP?5aYMu)u zbe#d+?04IUz61dwk3H(sCuK&P&)_{5x0r2Dr&lf4SwS!npicbyEjsb2v!&;hU_08$bX%@WmC#Y>>83+~oJE}?_2KEYmXS!@kdl8RLQj$%Pdx+-(qq*v% zg8>OXe#7$wIYRbQU%t1OR8MDXHRs- zFu$DANIXB9HRp8oHl?`4&)>g4VcUs*B{ZU^x3_>SfGZmMpswV1j(xE(_2k*#t2^Yl7;jB7 zchomz@6F;$;a|qe2#lzyX^3Bge?5ytvX-+uVv}9G zc(%MuBpC^b*;%ia4wt^6Vdlst8=ImheC{)H79xtdNuV17W83}KX~5ap{Q>3d)RYiu zPw4kyZEbbi-KpPq9G{+e{QUWS^Kk%TjQGgr$<~>g0p;1^$qtm;0l)VUUwr!?O zkz_h()Pv|o-F>$zIZJsrfl89pZ%c$J~+(n7Oa3=6_L7rm%?FI99H#uQ3 z5fdW=2!i(nCxVX3jKR|A~lAP*&w<^hz zQq0%Ow{O#y14XI5lYxB`$k>~k@0<(5`pFxvuC6+|LMDxD&PD?By8Xk5te_te<7}T5 z#bjg{nRX8=V!sob45uKQ_9%#!3)^9-OC9+EY#8)xWP$;Zx%S~<2F%TP5Xdcrh&QYD z=b!u>t~)oewHdB`H7Y%E!0&dj5-CWDv3;6=3B}gyvOxz|eIni@P?Ne&DW&V+}fp>Vkm{!6gV z?|N`mg#Xcf{U5XCKHOckob6xiy9SJX=;z{F|CfB(wr@2#AuKj2+xzibsh@W-x8tU7>vH>23)ctRl%z(sGuM*AW!{Npmdj;b`C_~vVE?8MVc7I={jk|j6B0+w&d7i(Nbm8Fki1# zDhh6H{Uu#xa$ssh(ntnUVkqj#nb}6yC$_;3!$;!X z!4Y`1==KvJr!>vCwQ!KJQGvXi9AMfWLBOtWS5NExDRpF1t+6O^u^R;>hS&iu&*dft zCm5cvS!T=>gW+4J@k(q$r8kJ2FWS9UZ=eQYXz^xxc=#Qtw-~m53^gSQ|-H(sYqqJ05Q&Te}%Gf~W#!>jAf`XSQ>9Ik-^5X?~z}M}H zPyjCOztVA=fo{F^C+zL8kp4TPP@+X&Uwh=TB=Sg%eUc(0X2&AWi10M}1tYFa z6|_RRj1P-r*7(i0e5e!iNdoqR>;7t^Z6C^$^ct3IZ?4h{CWxVY71BU)N3@d>!s zdh-q4`(2Yr&)acu0#8v+(xh?$MDUa^+$XKfmP5Ni;`__`f4o#bDpo1)5;K@y<}XSn zMict{xpQPrx5yJ6BF-kQp*|r-^Fv!gOJLriWvBq4hpk4DTg?TCdXX*Yf`MMX9M=h!~VfL)SQQ3)h=Pf6Fd3_KALQ(&QpE-M=)FqJb3IQ803*f!&G zxn!9sDEj(s@_@*Rhg>z;f@$78{QM!-e^yu0??mlG7aR~0M$=yEOx|puQJSG-1_m+U}!N`DF_~F=u^3LX73ng?Hxx zE}*Z0LjCAhdh7Mn6n3_R8Zu(gJBs@&z`8(QiQMnszsJFLAzE3eCTCaTlJiEJHW3O5_4P?5_jSWG;_8RJg$ud(4`nieuyM}@t!;LmWgfskr zdJG>{&qd{_fAcB>Q9X1_6=SkvQT4TlR34_DC)2{2nJJbjTwIRqC*7-9pqmkZ5YC&V zCc$$Vj1{Gi|I)z9Fmr5(UzgC;g zUE7}Jyr@bY-aNstV^LC`QVnUW({Jg&xTvnIl!K43XWO~CvYtV06Tb=F|0A!VtKZPq z^*Vsr^#d!*ziBhxor(*#vUR}@n_XhccbBc-=xgMmvs*PEf?Cr3sgd#V=zMvT?|yzT zfI|A>+Vlesk@K1&5q1GLy6$CT#rEl%HZ2y1jd8q@wj;yY{I?JifDtrHVx1ZH8#~tx zjo`co)S6t(4^>wD(sdVJHLYsxRYYBI!~P?we{p$=rC<7~I8J743|M~d5T-YE_4vQ!2P%|=g`eFl!$H&LLyi@MoCw~fs5cVf(a_AP#B|9sx_gjx+XI{7X-$Jvp7d4X# z5{vV_U=K{r%bWi2lJSSpbg2?AuVL$fWRud1mys|C_-m)0NgT-HiLCE#w%XJS{{iyC z>Th~Nfh@RT#s07#7ArQqV`k^RvDhINF(ihyT{_V(3u^@|RS_bmH~|MC`ssuULUxy2 zC1$^muX->O0S^rH$zkG3e ztU7;@C2_6JLW|nWhQsEfw<6zMnc3R=y!Bg2@?ImIWK~N`5pE#!j_K~pl~+$6f3s`l z?`g^}GwvZkw4?wgX!JVt7}oyI$B^yRiA7AIb3YZnq9uvS{v`DpXPZa*ayrlQC%n=vruV9h2sW ziOKhFq;q0Vr52T~YZ$(Zh=hN#RDU8U){!hEDTT?>T{AWn`ZMQyHFzPw-`^(x5kW|k-CbT$FgH}7UI+S6 zKQejln1@N0T{-@<2eTM#pl>&W#7+e?J1nWIs*=tQrRgjJ+m>eM>UnAS_+*aH@$ z;l31)!;7cu;=Z5cM9iA_NjY~($W$87-=AO z@R-A3ExZM@)CGg*+=u_;<>ps9R^~cG!E!)xJ6_w$SJfC!6d#x!kB^SRyJ?HagZ&b` z26Ffmj*c$90=jVfz{P57T=^>myMdG{v!As-z_=5pdF|9t$9W|z5>3WhqTDg{P5kcp1a;MMX(XrW*S}5QvSfZrEjn zVwtta^_+QAGEH3W2ol5KA<>eQ!-+oM#3rzlzuCmCdOV_69?BpnA}j(53?;in4EhQDHt*H&AoaXm6J?EBUb&;^> z{S7@^YunjMP0wuKkjim0=*fZl*zE8QuvXnSCrM}bz8v)XOFMkLM%lG6x6UalQhI@k z1QDZknF{?G$KS@VX9cv@)+UZs{-Y#u2{Ey}!h9IF+4}5#p89>#=qj~Yw(bA0QmJal zqY=!PRVDb~i3ke^(J=}O*#{=#gs_ZZXEss(>X{f2la^MNlS77XZsp1=D{HI!=WWf_ z%X7;@6KCmbWa(4%tgrj~9`0o%oY{DCit6III(kJ}k`Cjz-Q_b=6q@yBPbQuIi*+_m zi;SYGiBe+@7GN>~Wo3A*<%ke#e@+}y7OKlv)xPu}H^@Kag8!-kM`R4(SPJ55@4rd< zFTD^=!Kv^PE4fJBe#4>jgkoyM8p7EEXoIf zm^VK-Xhs+5YL;$Dr@B`FNaHK1;FMKbcCT^PFZO=(7yM6 z*z9Vgj&P8)k^uF;$%$&}!65+F7*+_2wRJ+zS6aYXatTSuhQ{oZMkr?j-&(!H?IUP! zu8GU?3QsI3H8J;hvZ&P&al$ss+k}J!n6E$z>|~?|wo#O`Bb8d~YloFOPS55KcwgS6 z210IK-^}R2=r{jp`L~q$hFRJnNXCz~a@R4o+tD^0=9m2<9`qW`Z+CZrZKmg4BnA{3 z5n*a-y3R@i9MkK36Mv9{I{&*dC}509sPpMWvrZXcu%NFwXJ&-^+EU?CfF%KWQeqdj z_(v;vd|&iOP$^?gR^>CHoqY%F=g+BRWh){&Xw0Nc+S-68i~-AvAC3fo&4kLLl9Ea? z)b9?p@EY>e&%TJ#$`~MI2Ll_DFFMGmDe~X5V&~OxPLYz5BqFhiz76|C!}kVO=WQ4{ zDJh1{CQdlae=-FTn~^}HqvKDo*220>XaJsKDJphFsw93HNv%WXU(GcNtG!R^Ghh669D;Vv(ZJg?U@0cq@*NE$W%=7@+bm z@ec2|Pq*biQj9-CL_{=`@$@)Ca6>-+2XVL{{-J*r%O%6g$-4j&QZM9)s%inZs|wTGBSP(rTp*anIi)kEKs@@ zOOZNckmuRc-2_;ZGTM|@E&6wn(J?fLySrJ^>tGMlYM3jEsSi(MsZ(R$5TdDS#T`;ne%ISqd<-J?_HZ~s@ev8D z%d5T}h$2ESDvhQK8H7Uw($}~gw{|N;pZH44%9fSv#q$;BkP|XV$tw!+e^U9a(eFe| zM?^HHbM%s`Fck}_-OA%~EvOFq`roqcpO-StZBx&uN?E&xvqHFe*w}=Khd*K3?mZpw z@Al8z>i+S{5S?Idxls#XeV_|HTQpl}a^2tA(JGq+ypK0cvSaNvU{px>wF8>YsV#a| zr**C|IpO@I<79p9f8>E|`(&+64!29g zm-&JyDQk7u#bj32Cmih4AXYDC0K#7(UK#YWVKG&Ex@bnDwhH?TkOsfszU^$US9@h> zWbnNSJR_?~J!jbFOzGTFRY&GNYqLgd$Ax|ygR>E&c}`GnD`8oRuQPz)7CIux&(BZP zV(OJxf3iC>QMK<6z5dTh==t8Qw~DNqn$7vVI$3ZMkIf2FHcg`w6`S*!g4v)Ckd6W9 z3<(Z`+<;%XW*|A43*TYYA}}LEtM|0iME2M6A#=p;7@*02wc0&eip!E`W&6j1V9sg$ z7C2+%xHP&Ws$kH??TnUZfW4wgi3oBtRz7pOIsrc8goo6w+s%`DdWQvg1}rR& zfU*M7PIcR)#jTzt;kuf<0IUQlm&rqyJb z_Z8o2ee3}=1YbCq|nV#dZ|3K#ZPfgLgK+uvS7 zuRja*#nYYp(x8OYpR9@q?O*=$WI!H5b{7@EJT^KC8(oOQsZds%wKeGnOG1(Y{c4@; zZJG0>q>{dA+-S{hIX$%o#u|_&+3wFJa~os8!>&_%arSjRc4K&x3Iy~xF3Y0Gmx~`i zagxmkk2pPAQpBf1&ya-m(}@WRq+_%=w!&ey^76hU zZk3f^c=ZQV>Z&Bp?HFbG`}=bb3~*pucnPEva5xDF2r!>~8jU&-sj!xvZ7Z3gR3i~1 zi%p1A0(3}=ZxE%!_10QpEOalcRl1Yj>iBuA-^**>FC>9OxAFV;jgiCxk<@H#QBvD? zfWu&WA16-BlJvFt8-umLRz+n+GMDQSSi8_`YU;gxOh%jDYM}n-7%q0UYd2B>&1VkN zer8HM)7nNY2#LSd-_P%oN--=sY+O1)PfxEdDvaB=FtME!p& z?0tP=fSCkFa{uq&!7(;RCpKZ>Zy#SEJjZ4CX#T2J@3h7e$GMY5tj@44jK`5YJ!DR@ z^vz&ebEE9zKN&j=6t>86jz6oYHh)@UxMt4S&s_=8*f@sI~B@FQGWjF*kGUa zp}S|Dw)X#5-I<0%*|2?lDobukmPQDXrJ_V4Tkb@XkYr!8W=r;c?UpoJY=yC8OKxVy zn%#)(BqGBYOCgLQdl-7px}TosexBa<%lqX$-Z>ojz;VoSUFUV4|NrlQUK8FFg|Q+6 zr`yger3cjC{w{e+ckISN1kkRpW%XD7gnfJ0y~(Huqxh-fNuP zxZTxA^F2N`|l&` zmo#Jdp?IUL)qO!vt6-N%;-TfF?2j&N3cPr)_w$frSx(8#%?9zEYM$W?30P@o5`=yioF{$r(skV=&O`ol{D+j{xKlp;f3$yscw}m#6lYSK-=8ubfIb`#9Zec#+nUVJU zmP1?&3=F$Mfo=Em;~bUXjl|jJU@-A;eC-O&&hCQ`s?twZaaB?V7*=MUK}sD#7C&b6 zu6-5pF7Du};1zZCL84C&U@Flq=-KFA3*^`&`SXjsypu$ef}8(1V!m}#x9<7LWvvj0 zjGNX5ne%*Vj=Sn-vDGKlc>w+DgG$S)~edEoTI z&Rt)whwYZ9cdCLKtuhnFuK(hfAN-L6a^LyrAeU;cqmvdN4=%xwu14~YVcN+hr?kAh zW%S{vgl?j6)}>kC-(U!?w=3~jF4nlB#Q5P$$W=#UcWd1}i0SCwd>js9HTS#h?3KD8 zCv`|xITCkasf(d@aF6(pgF#H#SV$pS{JU^1fx8qjL*|4*6;V(0!J$Td{G`cqbAoZU zmsMtpi*eO!&VV@W8MNCD&4QZ3hD1uFTJj=;!?bb-`t<1wOs^YlU@Yj`p542*S-gY; z2oZV`g4h{RjDcc$c5ZU=RT{*nMK(z1^xW!8^G`w-%z4^8)~Ay2mzem}1!DXQGkH}> zwLaA)adZ$%$pvgWHq$*IU<<2gGD{|JjuL+QmllTzs_6ZVLg5e9d#%Fh;xu^*8fClO z0J=N|6eXasA5)~I`gS1zf0|hi5RB-ugY6%!PbOik$?My1qs|Q(Ehflp)Qu9WsE^*w zm_;2`o1G^GWmi@*AP}cd#tW-B>**O7L6Zv)MW=ox+5`V?DQ~S|I7G|>6KD`zlypVc zl#d8PN?YURJ*-Rv@H>)8ViocCn*HCpxtN_bGQM|N5UvRkW>xnDfqRbbl$uB^7i#OB zNFKsMs$u+7VPd%^c<$gnYu%fjz4DrC?F|!W9^#-;?2{t_PIiS_dVQlVwGGAy3GJd#q6n-51#Q7Y1y`u|}ppNVUWjTNX5;kxAM{0C70 z_erfA)J4!5KCM`ezIM=h!zpL2Lc3vOuWOP-E792<*g&&Rqo!VGcS(?yNW_g?8OTv5>lBlQT66Gx8@3=b3f`krSK67IjYj8BN?=9&#SP@nG8q}n6F0?x>{ zk^+Co1{5!eBaXmrU|JZDx+c|76LIMzs#*G_{ z78APv%!fEnM>W5oQ>C?igDYh<&}Il#c8H1FYWXC(?uMT>L#BR3AXu%`RW9849=#Ie zcMg5vjKAl1I60XMHHv36uTGJ7E8RI1OxGqN688I?YZRPequ0<}uEP)ZKdx9wH+VdD z9$d3?A^a26h$lK(D{ynRZ?j%EH5fyz=cCM9d=a562@RZb>rRO+VYnm)M$~ zO8#FFdz%zos-69~hV0p}@aI+{(E&%|wS;LP5K~EqVDw0)!`_tFNL&yW9fd2nTIX|FeVWy$;&fsZX&W zNx|Bt4EUhDqoSf_U~VxKd)r9Lg^(hsifIKe4Rjy?Y|2@ozc+GgXFI^&USw;X4BK|T zKjKFt!+556-a6c#owhY%DZW**Aj!$p{KyO~Y%(|!U0Y7DZkw+q0()#UIKY;>AGdGh zaKK4(`}q0!Ad{_pM^Iql(hH%z`%L(ZmoKB8pZoth+R=yB*lU}Zm|)E-`Gtj>E2%B) z6`ULwg*#N#nlVPT{0m0=MYexX_wT}K(?}0K<=a?X*3^`%Sok{mZW?E60m|Q(rwv`p zR0STM?djHY1GKyWYcAU(J3I&oD}Yt_kO|M?zSLXpYB5?TI-6Ip#juDo$5(jn%=R?q zvhj{kQ2sYgcnzd!dhn#UHjk8-q$Y2#s7-|<#%`zvoyp<|xQKMwNeq-rtpf_uNwk{W z<0Ioj;N;_jF1vl;$i~#1x|jt@d5m(dPWsEFQ`<~QOZZ*sH|D7HGXcS#Vtjnj16B<% z8aAnr%<xjrs5rONY28;?FGY9|>vRLO0^u@H1a{LC3mbsgF51!ke3naMYA=v#jBvMcuSlGBDy zgxb1Eaf4-P2Dm>s{cMh(L#Eg*SfV^h$l2iq4it|JqIDaSa+$rl(9~f{;)&@eDb+s+ z9fRo#?bZXtaGSm~D0I6*fpeVaq2GQQuqHZ9E=cmI<=BE53ke08JpX3&0luY$d$9hs z8EqXCXJ)0_ws*P^YEwx17e7~y)BS79jcp03vt^SL6I&IIwIZnBOnIq0+a@FLG>1Q^ z3aD3C`UA-NrLo28_X`_l7aFs&s2_e!5zHV%m}n<+FABuiT}@9-S(-Kw4G)(bdzrtl zr@w!hd1a^n0TcUEZ`b?JmR*J|1hST2^-U=J@BWv&|PT`%~{s z9VW_pP}b&%az5CSx5r3)`g8PS*oN+=X8+&TXV;-rLpq!;^T_KTL-l7i>`ZQ49GWM1 z^`pSRJSq*c?I)?C-=FGDNHPG%043qK->yKztMROxpV#!-HM+eFZc!SavwpKs(DqOH zlJ9ll0_UAp$%)=xRvYcSii!%n#XCaLr#v1W1A$X7UMS8#g)pL&ByDYzT`i^bgLHe?#p=KsDm^pr(#-qFAbRiK znbcpgZvha0|BaWwn59!>SgkkpT!3UFA7-bdQ3x7#_XYj zovD+FQ(xJazEVM0RxD7!WSW)hbWKk$CMEeL=Ir-m_Hc(ID5o1+7}LrjX2jL#E(KE1 zeU;u0g9!V#slS_^GD$R%?Y4kiDr%%o&OZHXU`9NPE;fbX--AD_(Uu7`mLvOEk zkqp*NYA(+_B?agr=>j6Mu$LoA7}Du7T=M5-#JMX0)aLq(7DJ-`W0c*CcoHv2gE9R6 z%*@OnpPrqU;q`U;{Pz@3;rDk}T%PCv?`~n?avE=4D;1rb>|bB3_s_Y1by2ryL!~2h zGJlt4bVdw1mkr(J?&e)mk_0Hvrh?LfO)JYTWKLcl&otgxRdv-b_!R3)FkgZ1&@4X4 z!lI+Y`-jS%$rqqNHKl26ImI12hG&>Q4Y&CnbjQ`%ziroA*wW8Qza0F!P$9tOj zx1?qtxolBfa!Hi|PXC;WC6b~+^FQYZ)Al%-(k**ZbgS5^|2l$DA(VJ2DY+(h9E+8#EA7}qnP*K&u-czl`2i7*grEYbF2^zr?!_GAJJ)xVM za-%u7!K^TzD?#kMS1kT%LOWZYwxi5;LGaJ*+}sDv>MUykRz8vFGm=K!H8YKo-wY$e zo2=;bnK?18_oJt&+u?F@LuMqN$;pH36xk(;$wrHI6GZruO_I$_?V&q!CRVLmccb=z zwgv_M{NoC28?Jn09ID&=fI4FUi0FIyV2_1OJGM?^&p zEG#}RlxLQe^*4FFs;T+-ma>IHp{m*@*g^6j)z`NUbf>mi0#$XNT|`i@)N45{k&xeZ zJW9CP4#b6}HM97%7-8WP=iReU6%&n2oGR+(rxWt2$1o?!ADmy6qrFAa@T$#SuZ6OW)q!<`K2L_rW`6cJ-{!6`O3L3X(f>Dtt|n+`X< zL{F`4EGt9~Y9XHE><{p8JGj`=Bj~dBEi*3B91}hSX{L%A;4WG$Pt)waS-AP+=~FwK z%~LYs)qiN*$8@oqpieCf{;zj|#2k<}$ZD{{k&sB_&C`a5m@aAR)n;UjTTOG;aR{(E zAwr|2126_29&94^9v)}boekIC7pP)UF-4^bIhtCxz3IzDUml0|mH$*m_Dd?XOfYsM zeuwodzBPIIL{n&)jW#Ii{(pwo1j;~76YIicVk7t^@;@%@lu6#%_b;_!F9KN6&NNvc z@PZmRzOiUe|0R_`6)Y$)b#fAAZM=t5g+WMK+DjeI$E}(7^pBpDFPI;-26gci-Cg)- zhjnvd!NAnN9PQnaiWzRI)ub#XB@iIe z7C&`W)yh>H`H4h%DIpZpA8GW%9}DIX?`BfP^alhX^{%p?;QZ?UYTpIbB48aUpvQ^0 z6X-caoSW~gm@oY#X2-6mx1cB#UM5U?GnQ5Pf~~rPx&Vz1zgg+icezInmPmC-8uIgV zsQwu_G>qBDpT%g^>{#TX!E)G+GG1|FR!cOo!U8|~e)w}rW7wIHw@C|Rt=#wAqk~+u z9w!nU9UX^C$2)1`79sSCTN8?F`_uydkKdH(V3E?}{??jTuhy#Vx-0G92KA5}y|T;B z$|_I?xJkJ4=e}ipeyW7We?zZwkJb(A#EYBbxWuvgq5LHj#vC=d$K;SVW*kwvzyrq z658`JGL%M4Yv)(b<}XEzb8*vSFGw;YungaZe2Wa7ue_f)=#$-&?x&INM~#W(P`fDp z^h%COW@TlikR!%MQZFwBhBTN}WqS^93oOi~q;*?_rI%==nGU^Qo~aXG11Rp9MIfeG zbU9+&S?$!O##*UEsy!bcIkRgSh{UZdipj`m)o~ zz_HBu)H6DIJo>$tYo+N4_T^_~mSqVYIajBK4JSX3K4M2T!A)EM&-#KXb84S-&$;gB zxtO_(%FM_pg4+t_nzQoXwH*oF5{nJX>I*!TE9J{#+o>_E^YedLS=^?Vd(#sGG?0(CmC=cqTjX`bcX8A?@ z{{8!bM3R=0x|E;d(B1cyur%6qb!)Zf{FAnnyw}S7AzyO&oS5$^EbP!uy3Z7eAX6c& z8s^9N!q%fmV(q0ee>&xyLC>J`e0>;Sb4h#p`nSB@!BrLDdL;tYr8^^ zB*E{9v2V&#@PkJ6fB0@|nj~`_1Gv?<0n}~;n{rUlcI$L@FNeoFnz0ivgHCe|tV}mq zNdSiRs0BBCeOJB8+3Of2s;MZ=>u-O7Kp?KN@4)SQ5C_ Import > Upload JSON File), or create your own. +7. Now you can add the grafana dashboards from `zebra/grafana` (Create > Import > Upload JSON File), or create your own. + +![image info](grafana.png) [metrics_section]: https://doc.zebra.zfnd.org/zebrad/config/struct.MetricsSection.html From a8e71b3429965e6fbd7fe3bf0414940402e57a4e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 3 Jul 2023 05:39:10 +0000 Subject: [PATCH 173/265] build(deps): bump tokio from 1.29.0 to 1.29.1 (#7114) Bumps [tokio](https://github.com/tokio-rs/tokio) from 1.29.0 to 1.29.1. - [Release notes](https://github.com/tokio-rs/tokio/releases) - [Commits](https://github.com/tokio-rs/tokio/compare/tokio-1.29.0...tokio-1.29.1) --- updated-dependencies: - dependency-name: tokio dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- tower-batch-control/Cargo.toml | 4 ++-- tower-fallback/Cargo.toml | 2 +- zebra-chain/Cargo.toml | 4 ++-- zebra-consensus/Cargo.toml | 4 ++-- zebra-network/Cargo.toml | 4 ++-- zebra-rpc/Cargo.toml | 4 ++-- zebra-state/Cargo.toml | 4 ++-- zebra-test/Cargo.toml | 2 +- zebra-utils/Cargo.toml | 2 +- zebrad/Cargo.toml | 4 ++-- 11 files changed, 19 insertions(+), 19 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index adf26f67a42..bfea368f82d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4517,9 +4517,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.29.0" +version = "1.29.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "374442f06ee49c3a28a8fc9f01a2596fed7559c6b99b31279c3261778e77d84f" +checksum = "532826ff75199d5833b9d2c5fe410f29235e25704ee5f0ef599fb51c21f4a4da" dependencies = [ "autocfg", "backtrace", diff --git a/tower-batch-control/Cargo.toml b/tower-batch-control/Cargo.toml index 05255c0ff56..a16ac617615 100644 --- a/tower-batch-control/Cargo.toml +++ b/tower-batch-control/Cargo.toml @@ -26,7 +26,7 @@ futures = "0.3.28" futures-core = "0.3.28" pin-project = "1.1.0" rayon = "1.7.0" -tokio = { version = "1.29.0", features = ["time", "sync", "tracing", "macros"] } +tokio = { version = "1.29.1", features = ["time", "sync", "tracing", "macros"] } tokio-util = "0.7.8" tower = { version = "0.4.13", features = ["util", "buffer"] } tracing = "0.1.37" @@ -41,7 +41,7 @@ tinyvec = { version = "1.6.0", features = ["rustc_1_55"] } ed25519-zebra = "4.0.0" rand = "0.8.5" -tokio = { version = "1.29.0", features = ["full", "tracing", "test-util"] } +tokio = { version = "1.29.1", features = ["full", "tracing", "test-util"] } tokio-test = "0.4.2" tower-fallback = { path = "../tower-fallback/" } tower-test = "0.4.0" diff --git a/tower-fallback/Cargo.toml b/tower-fallback/Cargo.toml index fd241c167b8..ea62c333c4e 100644 --- a/tower-fallback/Cargo.toml +++ b/tower-fallback/Cargo.toml @@ -22,6 +22,6 @@ futures-core = "0.3.28" tracing = "0.1.37" [dev-dependencies] -tokio = { version = "1.29.0", features = ["full", "tracing", "test-util"] } +tokio = { version = "1.29.1", features = ["full", "tracing", "test-util"] } zebra-test = { path = "../zebra-test/" } diff --git a/zebra-chain/Cargo.toml b/zebra-chain/Cargo.toml index 67c6a39e24a..d017c1e35ae 100644 --- a/zebra-chain/Cargo.toml +++ b/zebra-chain/Cargo.toml @@ -117,7 +117,7 @@ proptest-derive = { version = "0.3.0", optional = true } rand = { version = "0.8.5", optional = true } rand_chacha = { version = "0.3.1", optional = true } -tokio = { version = "1.29.0", features = ["tracing"], optional = true } +tokio = { version = "1.29.1", features = ["tracing"], optional = true } zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.27", optional = true } @@ -140,7 +140,7 @@ proptest-derive = "0.3.0" rand = "0.8.5" rand_chacha = "0.3.1" -tokio = { version = "1.29.0", features = ["full", "tracing", "test-util"] } +tokio = { version = "1.29.1", features = ["full", "tracing", "test-util"] } zebra-test = { path = "../zebra-test/" } diff --git a/zebra-consensus/Cargo.toml b/zebra-consensus/Cargo.toml index 2fee6cf466d..b77bcf5f321 100644 --- a/zebra-consensus/Cargo.toml +++ b/zebra-consensus/Cargo.toml @@ -53,7 +53,7 @@ futures = "0.3.28" futures-util = "0.3.28" metrics = "0.21.0" thiserror = "1.0.40" -tokio = { version = "1.29.0", features = ["time", "sync", "tracing", "rt-multi-thread"] } +tokio = { version = "1.29.1", features = ["time", "sync", "tracing", "rt-multi-thread"] } tower = { version = "0.4.13", features = ["timeout", "util", "buffer"] } tracing = "0.1.37" tracing-futures = "0.2.5" @@ -89,7 +89,7 @@ proptest = "1.2.0" proptest-derive = "0.3.0" spandoc = "0.2.2" -tokio = { version = "1.29.0", features = ["full", "tracing", "test-util"] } +tokio = { version = "1.29.1", features = ["full", "tracing", "test-util"] } tracing-error = "0.2.0" tracing-subscriber = "0.3.17" diff --git a/zebra-network/Cargo.toml b/zebra-network/Cargo.toml index 5430350b6b6..e96998f3ae2 100644 --- a/zebra-network/Cargo.toml +++ b/zebra-network/Cargo.toml @@ -61,7 +61,7 @@ tempfile = "3.5.0" thiserror = "1.0.40" futures = "0.3.28" -tokio = { version = "1.29.0", features = ["fs", "io-util", "net", "time", "tracing", "macros", "rt-multi-thread"] } +tokio = { version = "1.29.1", features = ["fs", "io-util", "net", "time", "tracing", "macros", "rt-multi-thread"] } tokio-stream = { version = "0.1.14", features = ["sync", "time"] } tokio-util = { version = "0.7.8", features = ["codec"] } tower = { version = "0.4.13", features = ["retry", "discover", "load", "load-shed", "timeout", "util", "buffer"] } @@ -90,7 +90,7 @@ proptest = "1.2.0" proptest-derive = "0.3.0" static_assertions = "1.1.0" -tokio = { version = "1.29.0", features = ["full", "tracing", "test-util"] } +tokio = { version = "1.29.1", features = ["full", "tracing", "test-util"] } toml = "0.7.5" zebra-chain = { path = "../zebra-chain", features = ["proptest-impl"] } diff --git a/zebra-rpc/Cargo.toml b/zebra-rpc/Cargo.toml index 53026d5828b..9605242563f 100644 --- a/zebra-rpc/Cargo.toml +++ b/zebra-rpc/Cargo.toml @@ -54,7 +54,7 @@ num_cpus = "1.16.0" serde_json = { version = "1.0.99", features = ["preserve_order"] } indexmap = { version = "2.0.0", features = ["serde"] } -tokio = { version = "1.29.0", features = ["time", "rt-multi-thread", "macros", "tracing"] } +tokio = { version = "1.29.1", features = ["time", "rt-multi-thread", "macros", "tracing"] } tower = "0.4.13" tracing = "0.1.37" @@ -83,7 +83,7 @@ insta = { version = "1.30.0", features = ["redactions", "json", "ron"] } proptest = "1.2.0" thiserror = "1.0.40" -tokio = { version = "1.29.0", features = ["full", "tracing", "test-util"] } +tokio = { version = "1.29.1", features = ["full", "tracing", "test-util"] } zebra-chain = { path = "../zebra-chain", features = ["proptest-impl"] } zebra-consensus = { path = "../zebra-consensus", features = ["proptest-impl"] } diff --git a/zebra-state/Cargo.toml b/zebra-state/Cargo.toml index b55f0b61c2f..0ed04a2a24b 100644 --- a/zebra-state/Cargo.toml +++ b/zebra-state/Cargo.toml @@ -62,7 +62,7 @@ tempfile = "3.5.0" thiserror = "1.0.40" rayon = "1.7.0" -tokio = { version = "1.29.0", features = ["rt-multi-thread", "sync", "tracing"] } +tokio = { version = "1.29.1", features = ["rt-multi-thread", "sync", "tracing"] } tower = { version = "0.4.13", features = ["buffer", "util"] } tracing = "0.1.37" @@ -100,7 +100,7 @@ rand = "0.8.5" halo2 = { package = "halo2_proofs", version = "0.3.0" } jubjub = "0.10.0" -tokio = { version = "1.29.0", features = ["full", "tracing", "test-util"] } +tokio = { version = "1.29.1", features = ["full", "tracing", "test-util"] } zebra-chain = { path = "../zebra-chain", features = ["proptest-impl"] } zebra-test = { path = "../zebra-test/" } diff --git a/zebra-test/Cargo.toml b/zebra-test/Cargo.toml index 0f17ca58503..3376c566c3b 100644 --- a/zebra-test/Cargo.toml +++ b/zebra-test/Cargo.toml @@ -24,7 +24,7 @@ once_cell = "1.18.0" rand = "0.8.5" regex = "1.8.4" -tokio = { version = "1.29.0", features = ["full", "tracing", "test-util"] } +tokio = { version = "1.29.1", features = ["full", "tracing", "test-util"] } tower = { version = "0.4.13", features = ["util"] } futures = "0.3.28" diff --git a/zebra-utils/Cargo.toml b/zebra-utils/Cargo.toml index eeec69f4e8d..7ecbd146cc3 100644 --- a/zebra-utils/Cargo.toml +++ b/zebra-utils/Cargo.toml @@ -88,4 +88,4 @@ regex = { version = "1.8.4", optional = true } reqwest = { version = "0.11.18", optional = true } # These crates are needed for the zebra-checkpoints and search-issue-refs binaries -tokio = { version = "1.29.0", features = ["full"], optional = true } +tokio = { version = "1.29.1", features = ["full"], optional = true } diff --git a/zebrad/Cargo.toml b/zebrad/Cargo.toml index a7b95ab6167..673ede15cf5 100644 --- a/zebrad/Cargo.toml +++ b/zebrad/Cargo.toml @@ -149,7 +149,7 @@ toml = "0.7.5" futures = "0.3.28" rayon = "1.7.0" -tokio = { version = "1.29.0", features = ["time", "rt-multi-thread", "macros", "tracing", "signal"] } +tokio = { version = "1.29.1", features = ["time", "rt-multi-thread", "macros", "tracing", "signal"] } tower = { version = "0.4.13", features = ["hedge", "limit"] } pin-project = "1.1.0" @@ -227,7 +227,7 @@ tempfile = "3.5.0" hyper = { version = "0.14.27", features = ["http1", "http2", "server"]} tracing-test = { version = "0.2.4", features = ["no-env-filter"] } -tokio = { version = "1.29.0", features = ["full", "tracing", "test-util"] } +tokio = { version = "1.29.1", features = ["full", "tracing", "test-util"] } tokio-stream = "0.1.14" # test feature lightwalletd-grpc-tests From 4713994e64763998b5267f900358581b2ae4bc08 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 3 Jul 2023 23:23:33 +0000 Subject: [PATCH 174/265] build(deps): bump metrics from 0.21.0 to 0.21.1 (#7131) Bumps [metrics](https://github.com/metrics-rs/metrics) from 0.21.0 to 0.21.1. - [Changelog](https://github.com/metrics-rs/metrics/blob/main/release.toml) - [Commits](https://github.com/metrics-rs/metrics/compare/metrics-v0.21.0...metrics-v0.21.1) --- updated-dependencies: - dependency-name: metrics dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 16 ++++++++-------- zebra-consensus/Cargo.toml | 2 +- zebra-network/Cargo.toml | 2 +- zebra-state/Cargo.toml | 2 +- zebrad/Cargo.toml | 2 +- 5 files changed, 12 insertions(+), 12 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index bfea368f82d..74144ad3ea8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2485,9 +2485,9 @@ dependencies = [ [[package]] name = "metrics" -version = "0.21.0" +version = "0.21.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa8ebbd1a9e57bbab77b9facae7f5136aea44c356943bf9a198f647da64285d6" +checksum = "fde3af1a009ed76a778cb84fdef9e7dbbdf5775ae3e4cc1f434a6a307f6f76c5" dependencies = [ "ahash 0.8.3", "metrics-macros 0.7.0", @@ -2504,7 +2504,7 @@ dependencies = [ "hyper", "indexmap 1.9.3", "ipnet", - "metrics 0.21.0", + "metrics 0.21.1", "metrics-util", "quanta", "thiserror", @@ -2542,7 +2542,7 @@ dependencies = [ "crossbeam-epoch", "crossbeam-utils", "hashbrown 0.13.2", - "metrics 0.21.0", + "metrics 0.21.1", "num_cpus", "quanta", "sketches-ddsketch", @@ -5741,7 +5741,7 @@ dependencies = [ "howudoin", "jubjub", "lazy_static", - "metrics 0.21.0", + "metrics 0.21.1", "num-integer", "once_cell", "orchard", @@ -5785,7 +5785,7 @@ dependencies = [ "indexmap 2.0.0", "itertools 0.11.0", "lazy_static", - "metrics 0.21.0", + "metrics 0.21.1", "num-integer", "ordered-map", "pin-project", @@ -5885,7 +5885,7 @@ dependencies = [ "itertools 0.11.0", "jubjub", "lazy_static", - "metrics 0.21.0", + "metrics 0.21.1", "mset", "once_cell", "proptest", @@ -5979,7 +5979,7 @@ dependencies = [ "jsonrpc-core", "lazy_static", "log", - "metrics 0.21.0", + "metrics 0.21.1", "metrics-exporter-prometheus", "num-integer", "once_cell", diff --git a/zebra-consensus/Cargo.toml b/zebra-consensus/Cargo.toml index b77bcf5f321..6ea989ef321 100644 --- a/zebra-consensus/Cargo.toml +++ b/zebra-consensus/Cargo.toml @@ -51,7 +51,7 @@ serde = { version = "1.0.164", features = ["serde_derive"] } futures = "0.3.28" futures-util = "0.3.28" -metrics = "0.21.0" +metrics = "0.21.1" thiserror = "1.0.40" tokio = { version = "1.29.1", features = ["time", "sync", "tracing", "rt-multi-thread"] } tower = { version = "0.4.13", features = ["timeout", "util", "buffer"] } diff --git a/zebra-network/Cargo.toml b/zebra-network/Cargo.toml index e96998f3ae2..39aebc70d89 100644 --- a/zebra-network/Cargo.toml +++ b/zebra-network/Cargo.toml @@ -66,7 +66,7 @@ tokio-stream = { version = "0.1.14", features = ["sync", "time"] } tokio-util = { version = "0.7.8", features = ["codec"] } tower = { version = "0.4.13", features = ["retry", "discover", "load", "load-shed", "timeout", "util", "buffer"] } -metrics = "0.21.0" +metrics = "0.21.1" tracing-futures = "0.2.5" tracing-error = { version = "0.2.0", features = ["traced-error"] } tracing = "0.1.37" diff --git a/zebra-state/Cargo.toml b/zebra-state/Cargo.toml index 0ed04a2a24b..210d89326ca 100644 --- a/zebra-state/Cargo.toml +++ b/zebra-state/Cargo.toml @@ -51,7 +51,7 @@ hex = "0.4.3" indexmap = "2.0.0" itertools = "0.11.0" lazy_static = "1.4.0" -metrics = "0.21.0" +metrics = "0.21.1" mset = "0.1.1" regex = "1.8.4" rlimit = "0.9.1" diff --git a/zebrad/Cargo.toml b/zebrad/Cargo.toml index 673ede15cf5..d104d4498ad 100644 --- a/zebrad/Cargo.toml +++ b/zebrad/Cargo.toml @@ -166,7 +166,7 @@ tracing-error = "0.2.0" tracing-futures = "0.2.5" tracing = "0.1.37" -metrics = "0.21.0" +metrics = "0.21.1" dirs = "5.0.1" atty = "0.2.14" From 3d32cbd718424b8889598a07db97c8d0da60e858 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 4 Jul 2023 16:53:33 +0000 Subject: [PATCH 175/265] build(deps): bump vergen from 8.2.1 to 8.2.3 (#7130) Bumps [vergen](https://github.com/rustyhorde/vergen) from 8.2.1 to 8.2.3. - [Release notes](https://github.com/rustyhorde/vergen/releases) - [Commits](https://github.com/rustyhorde/vergen/compare/8.2.1...8.2.3) --- updated-dependencies: - dependency-name: vergen dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 15 +++++++++++++-- zebrad/Cargo.toml | 2 +- 2 files changed, 14 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 74144ad3ea8..942b07cd52d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2729,6 +2729,15 @@ dependencies = [ "libc", ] +[[package]] +name = "num_threads" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2819ce041d2ee131036f4fc9d6ae7ae125a3a40e97ba64d04fe799ad9dabbb44" +dependencies = [ + "libc", +] + [[package]] name = "number_prefix" version = "0.4.0" @@ -4470,6 +4479,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ea9e1b3cf1243ae005d9e74085d4d542f3125458f3a81af210d901dcd7411efd" dependencies = [ "itoa", + "libc", + "num_threads", "serde", "time-core", "time-macros", @@ -5114,9 +5125,9 @@ checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191" [[package]] name = "vergen" -version = "8.2.1" +version = "8.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b3c89c2c7e50f33e4d35527e5bf9c11d6d132226dbbd1753f0fbe9f19ef88c6" +checksum = "ce38fc503fa57441ac2539c3e723b5adf76601eb4f1ad24025c6660d27f355b7" dependencies = [ "anyhow", "git2", diff --git a/zebrad/Cargo.toml b/zebrad/Cargo.toml index d104d4498ad..202093daf9e 100644 --- a/zebrad/Cargo.toml +++ b/zebrad/Cargo.toml @@ -208,7 +208,7 @@ proptest-derive = { version = "0.3.0", optional = true } console-subscriber = { version = "0.1.8", optional = true } [build-dependencies] -vergen = { version = "8.2.1", default-features = false, features = ["cargo", "git", "git2", "rustc"] } +vergen = { version = "8.2.3", default-features = false, features = ["cargo", "git", "git2", "rustc"] } # test feature lightwalletd-grpc-tests tonic-build = { version = "0.9.2", optional = true } From 6ec0c66aa6264a39082695d6eb56ffde7c702dde Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 4 Jul 2023 16:53:54 +0000 Subject: [PATCH 176/265] build(deps): bump serde from 1.0.164 to 1.0.166 (#7134) Bumps [serde](https://github.com/serde-rs/serde) from 1.0.164 to 1.0.166. - [Release notes](https://github.com/serde-rs/serde/releases) - [Commits](https://github.com/serde-rs/serde/compare/v1.0.164...v1.0.166) --- updated-dependencies: - dependency-name: serde dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 144 ++++++++++++++++----------------- zebra-chain/Cargo.toml | 2 +- zebra-consensus/Cargo.toml | 2 +- zebra-network/Cargo.toml | 2 +- zebra-node-services/Cargo.toml | 4 +- zebra-rpc/Cargo.toml | 2 +- zebra-state/Cargo.toml | 2 +- zebrad/Cargo.toml | 2 +- 8 files changed, 80 insertions(+), 80 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 942b07cd52d..609b5bc00f5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -35,7 +35,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "55bfb86e57d13c06e482c570826ddcddcc8f07fab916760e8911141d4fda8b62" dependencies = [ "ident_case", - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "syn 1.0.109", "synstructure", @@ -257,9 +257,9 @@ version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", - "syn 2.0.18", + "syn 2.0.23", ] [[package]] @@ -268,9 +268,9 @@ version = "0.1.68" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b9ccdd8f2a161be9bd5c023df56f1b2a0bd1d83872ae53b71a84a12c9bf6e842" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", - "syn 2.0.18", + "syn 2.0.23", ] [[package]] @@ -424,12 +424,12 @@ dependencies = [ "log", "peeking_take_while", "prettyplease 0.2.6", - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "regex", "rustc-hash", "shlex", - "syn 2.0.18", + "syn 2.0.23", "which", ] @@ -803,9 +803,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b8cd2b2a819ad6eec39e8f1d6b53001af1e5469f8c177579cdaeb313115b825f" dependencies = [ "heck 0.4.1", - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", - "syn 2.0.18", + "syn 2.0.23", ] [[package]] @@ -1077,9 +1077,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b677bcf759c79656defee3b0374aeff759122d3fc80edb0b77eeb0fd06e8fd20" dependencies = [ "codespan-reporting", - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", - "syn 2.0.18", + "syn 2.0.23", ] [[package]] @@ -1094,9 +1094,9 @@ version = "1.0.94" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2345488264226bf682893e25de0769f3360aac9957980ec49361b083ddaa5bc5" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", - "syn 2.0.18", + "syn 2.0.23", ] [[package]] @@ -1127,7 +1127,7 @@ checksum = "859d65a907b6852c9361e3185c862aae7fafd2887876799fa55f5f99dc40d610" dependencies = [ "fnv", "ident_case", - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "strsim 0.10.0", "syn 1.0.109", @@ -1141,10 +1141,10 @@ checksum = "ab8bfa2e259f8ee1ce5e97824a3c55ec4404a0d772ca7fa96bf19f0752a046eb" dependencies = [ "fnv", "ident_case", - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "strsim 0.10.0", - "syn 2.0.18", + "syn 2.0.23", ] [[package]] @@ -1166,7 +1166,7 @@ checksum = "29a358ff9f12ec09c3e61fef9b5a9902623a695a46a917b07f269bff1445611a" dependencies = [ "darling_core 0.20.1", "quote 1.0.28", - "syn 2.0.18", + "syn 2.0.23", ] [[package]] @@ -1235,9 +1235,9 @@ version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", - "syn 2.0.18", + "syn 2.0.23", ] [[package]] @@ -1556,9 +1556,9 @@ version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", - "syn 2.0.18", + "syn 2.0.23", ] [[package]] @@ -2026,7 +2026,7 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "11d7a9f6330b71fea57921c9b61c47ee6e84f72d394754eff6163ae67e7395eb" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "syn 1.0.109", ] @@ -2226,7 +2226,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b939a78fa820cdfcb7ee7484466746a7377760970f6f9c6fe19f9edcc8a38d2" dependencies = [ "proc-macro-crate 0.1.5", - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "syn 1.0.109", ] @@ -2517,7 +2517,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "731f8ecebd9f3a4aa847dfe75455e4757a45da40a7793d2f0b1f9b6ed18b23f3" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "syn 1.0.109", ] @@ -2528,9 +2528,9 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ddece26afd34c31585c74a4db0630c376df271c285d682d1e55012197830b6df" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", - "syn 2.0.18", + "syn 2.0.23", ] [[package]] @@ -2792,9 +2792,9 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", - "syn 2.0.18", + "syn 2.0.23", ] [[package]] @@ -2922,7 +2922,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "86b26a931f824dd4eca30b3e43bb4f31cd5f0d3a403c5f5ff27106b805bfde7b" dependencies = [ "proc-macro-crate 1.3.1", - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "syn 1.0.109", ] @@ -3051,9 +3051,9 @@ checksum = "6c435bf1076437b851ebc8edc3a18442796b30f1728ffea6262d59bbe28b077e" dependencies = [ "pest", "pest_meta", - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", - "syn 2.0.18", + "syn 2.0.23", ] [[package]] @@ -3092,9 +3092,9 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "39407670928234ebc5e6e580247dd567ad73a3578460c5990f9503df207e8f07" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", - "syn 2.0.18", + "syn 2.0.23", ] [[package]] @@ -3187,7 +3187,7 @@ version = "0.1.25" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c8646e95016a7a6c4adea95bafa8a16baab64b583356217f2c85db4a39d9a86" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "syn 1.0.109", ] @@ -3197,8 +3197,8 @@ version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3b69d39aab54d069e7f2fe8cb970493e7834601ca2d8c65fd7bbd183578080d1" dependencies = [ - "proc-macro2 1.0.60", - "syn 2.0.18", + "proc-macro2 1.0.63", + "syn 2.0.23", ] [[package]] @@ -3238,7 +3238,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" dependencies = [ "proc-macro-error-attr", - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "syn 1.0.109", "version_check", @@ -3250,7 +3250,7 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "version_check", ] @@ -3266,9 +3266,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.60" +version = "1.0.63" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dec2b086b7a862cf4de201096214fa870344cf922b2b30c167badb3af3195406" +checksum = "7b368fba921b0dce7e60f5e04ec15e565b3303972b42bcfde1d0713b881959eb" dependencies = [ "unicode-ident", ] @@ -3344,7 +3344,7 @@ checksum = "e5d2d8d10f3c6ded6da8b05b5fb3b8a5082514344d56c9f871412d29b4e075b4" dependencies = [ "anyhow", "itertools 0.10.5", - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "syn 1.0.109", ] @@ -3407,7 +3407,7 @@ version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "608c156fd8e97febc07dc9c2e2c80bf74cfc6ef26893eae3daf8bc2bc94a4b7f" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "syn 1.0.109", ] @@ -3427,7 +3427,7 @@ version = "1.0.28" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b9ab9c7eadfd8df19006f1cf1a4aed13540ed5cbc047010ece5826e10825488" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", ] [[package]] @@ -4061,9 +4061,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.164" +version = "1.0.166" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e8c8cf938e98f769bc164923b06dce91cea1751522f46f8466461af04c9027d" +checksum = "d01b7404f9d441d3ad40e6a636a7782c377d2abdbe4fa2440e2edcc2f4f10db8" dependencies = [ "serde_derive", ] @@ -4079,13 +4079,13 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.164" +version = "1.0.166" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9735b638ccc51c28bf6914d90a2e9725b377144fc612c49a611fddd1b631d68" +checksum = "5dd83d6dde2b6b2d466e14d9d1acce8816dedee94f735eac6395808b3483c6d6" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", - "syn 2.0.18", + "syn 2.0.23", ] [[package]] @@ -4154,7 +4154,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e182d6ec6f05393cc0e5ed1bf81ad6db3a8feedf8ee515ecdd369809bcce8082" dependencies = [ "darling 0.13.4", - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "syn 1.0.109", ] @@ -4166,9 +4166,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "edc7d5d3932fb12ce722ee5e64dd38c504efba37567f0c402f6ca728c3b8b070" dependencies = [ "darling 0.20.1", - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", - "syn 2.0.18", + "syn 2.0.23", ] [[package]] @@ -4279,7 +4279,7 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5bdfb59103e43a0f99a346b57860d50f2138a7008d08acd964e9ac0fef3ae9a5" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "syn 1.0.109", ] @@ -4342,7 +4342,7 @@ checksum = "dcb5ae327f9cc13b68763b5749770cb9e048a99bd9dfdfa58d0cf05d5f64afe0" dependencies = [ "heck 0.3.3", "proc-macro-error", - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "syn 1.0.109", ] @@ -4370,18 +4370,18 @@ version = "1.0.109" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "unicode-ident", ] [[package]] name = "syn" -version = "2.0.18" +version = "2.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32d41677bcbe24c20c52e7c70b0d8db04134c5d1066bf98662e2871ad200ea3e" +checksum = "59fb7d6d8281a51045d62b8eb3a7d1ce347b76f312af50cd3dc0af39c87c1737" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "unicode-ident", ] @@ -4398,7 +4398,7 @@ version = "0.12.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f36bdaa60a83aca3921b5259d5400cbf5e90fc51931376a9bd4a0eb79aa7210f" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "syn 1.0.109", "unicode-xid 0.2.4", @@ -4457,9 +4457,9 @@ version = "1.0.40" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f9456a42c5b0d803c8cd86e73dd7cc9edd429499f37a3550d286d5e86720569f" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", - "syn 2.0.18", + "syn 2.0.23", ] [[package]] @@ -4563,9 +4563,9 @@ version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", - "syn 2.0.18", + "syn 2.0.23", ] [[package]] @@ -4719,7 +4719,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a6fdaae4c2c638bb70fe42803a26fbd6fc6ac8c72f5c59f67ecc2a2dcabf4b07" dependencies = [ "prettyplease 0.1.25", - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "prost-build", "quote 1.0.28", "syn 1.0.109", @@ -4838,9 +4838,9 @@ version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0f57e3ca2a01450b1a921183a9c9cbfda207fd822cef4ccb00a65402cbba7a74" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", - "syn 2.0.18", + "syn 2.0.23", ] [[package]] @@ -5208,9 +5208,9 @@ dependencies = [ "bumpalo", "log", "once_cell", - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", - "syn 2.0.18", + "syn 2.0.23", "wasm-bindgen-shared", ] @@ -5242,9 +5242,9 @@ version = "0.2.86" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e128beba882dd1eb6200e1dc92ae6c5dbaa4311aa7bb211ca035779e5efc39f8" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", - "syn 2.0.18", + "syn 2.0.23", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -6048,7 +6048,7 @@ version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", - "syn 2.0.18", + "syn 2.0.23", ] diff --git a/zebra-chain/Cargo.toml b/zebra-chain/Cargo.toml index d017c1e35ae..40a86f0d385 100644 --- a/zebra-chain/Cargo.toml +++ b/zebra-chain/Cargo.toml @@ -90,7 +90,7 @@ tracing = "0.1.37" # Serialization hex = { version = "0.4.3", features = ["serde"] } -serde = { version = "1.0.164", features = ["serde_derive", "rc"] } +serde = { version = "1.0.166", features = ["serde_derive", "rc"] } serde_with = "3.0.0" serde-big-array = "0.5.1" diff --git a/zebra-consensus/Cargo.toml b/zebra-consensus/Cargo.toml index 6ea989ef321..4bce5294efb 100644 --- a/zebra-consensus/Cargo.toml +++ b/zebra-consensus/Cargo.toml @@ -47,7 +47,7 @@ chrono = { version = "0.4.26", default-features = false, features = ["clock", "s displaydoc = "0.2.4" lazy_static = "1.4.0" once_cell = "1.18.0" -serde = { version = "1.0.164", features = ["serde_derive"] } +serde = { version = "1.0.166", features = ["serde_derive"] } futures = "0.3.28" futures-util = "0.3.28" diff --git a/zebra-network/Cargo.toml b/zebra-network/Cargo.toml index 39aebc70d89..cc4b8fe7c06 100644 --- a/zebra-network/Cargo.toml +++ b/zebra-network/Cargo.toml @@ -56,7 +56,7 @@ pin-project = "1.1.0" rand = "0.8.5" rayon = "1.7.0" regex = "1.8.4" -serde = { version = "1.0.164", features = ["serde_derive"] } +serde = { version = "1.0.166", features = ["serde_derive"] } tempfile = "3.5.0" thiserror = "1.0.40" diff --git a/zebra-node-services/Cargo.toml b/zebra-node-services/Cargo.toml index 76b2a841cb4..af993cfc421 100644 --- a/zebra-node-services/Cargo.toml +++ b/zebra-node-services/Cargo.toml @@ -44,7 +44,7 @@ color-eyre = { version = "0.6.2", optional = true } jsonrpc-core = { version = "18.0.0", optional = true } # Security: avoid default dependency on openssl reqwest = { version = "0.11.18", default-features = false, features = ["rustls-tls"], optional = true } -serde = { version = "1.0.164", optional = true } +serde = { version = "1.0.166", optional = true } serde_json = { version = "1.0.99", optional = true } [dev-dependencies] @@ -52,5 +52,5 @@ serde_json = { version = "1.0.99", optional = true } color-eyre = "0.6.2" jsonrpc-core = "18.0.0" reqwest = { version = "0.11.18", default-features = false, features = ["rustls-tls"] } -serde = "1.0.164" +serde = "1.0.166" serde_json = "1.0.97" diff --git a/zebra-rpc/Cargo.toml b/zebra-rpc/Cargo.toml index 9605242563f..0efb0b57253 100644 --- a/zebra-rpc/Cargo.toml +++ b/zebra-rpc/Cargo.toml @@ -60,7 +60,7 @@ tower = "0.4.13" tracing = "0.1.37" hex = { version = "0.4.3", features = ["serde"] } -serde = { version = "1.0.164", features = ["serde_derive"] } +serde = { version = "1.0.166", features = ["serde_derive"] } # Experimental feature getblocktemplate-rpcs rand = { version = "0.8.5", optional = true } diff --git a/zebra-state/Cargo.toml b/zebra-state/Cargo.toml index 210d89326ca..1d0d1e7d166 100644 --- a/zebra-state/Cargo.toml +++ b/zebra-state/Cargo.toml @@ -57,7 +57,7 @@ regex = "1.8.4" rlimit = "0.9.1" rocksdb = { version = "0.21.0", default_features = false, features = ["lz4"] } semver = "1.0.17" -serde = { version = "1.0.164", features = ["serde_derive"] } +serde = { version = "1.0.166", features = ["serde_derive"] } tempfile = "3.5.0" thiserror = "1.0.40" diff --git a/zebrad/Cargo.toml b/zebrad/Cargo.toml index 202093daf9e..6fed087caf6 100644 --- a/zebrad/Cargo.toml +++ b/zebrad/Cargo.toml @@ -144,7 +144,7 @@ humantime-serde = "1.1.1" indexmap = "2.0.0" lazy_static = "1.4.0" semver = "1.0.17" -serde = { version = "1.0.164", features = ["serde_derive"] } +serde = { version = "1.0.166", features = ["serde_derive"] } toml = "0.7.5" futures = "0.3.28" From 9b32ab7878e45e0034c1ecaaf0d0a7f9e2ea8eb8 Mon Sep 17 00:00:00 2001 From: teor Date: Wed, 5 Jul 2023 05:01:11 +1000 Subject: [PATCH 177/265] change(release): Update release script and check it in CI (#7128) * Activate production features on docs.rs * Make version requirements more flexible to work around unpublished versions * Remove a redundant feature requirement that causes publishing issues * fix release auto-replacement format * cargo release replace --verbose --execute --package zebrad * Add a missing 1.0.1 changelog entry * Fix incorrect commands in the release checklist * Check the crates.io release script in CI, to avoid release failures * Fix release date in changelog * Fix workflow typo * Add --no-confirm to the workflow * Fix outdated version in release check command * After the release, add a "remove do-not-merge" step * Set git identity before committing * Don't overwrite tweaked versions on upgrade * cargo release version doesn't support --dependent-version * Update the same commands in the release checklist * Commit to git after each change * And in the checklist * Add cargo release config * In CI, allow any branch and provide a fake previous tag name * Remove redundant `cargo release commit` * Use a simpler release version process * Start simplifying the release dry run in CI * Fix a cargo dependency lint * just skip the failing crates for now * Fix the release checklist to match CI * Use the latest versions of all Zebra dependencies --- .../release-checklist.md | 24 ++-- .github/workflows/release-crates-io.patch.yml | 28 ++++ .github/workflows/release-crates-io.yml | 125 ++++++++++++++++++ CHANGELOG.md | 3 +- book/src/user/docker.md | 2 +- book/src/user/install.md | 4 +- release.toml | 20 +++ zebra-state/Cargo.toml | 2 +- zebrad/Cargo.toml | 25 +++- 9 files changed, 214 insertions(+), 19 deletions(-) create mode 100644 .github/workflows/release-crates-io.patch.yml create mode 100644 .github/workflows/release-crates-io.yml create mode 100644 release.toml diff --git a/.github/PULL_REQUEST_TEMPLATE/release-checklist.md b/.github/PULL_REQUEST_TEMPLATE/release-checklist.md index 71090a99407..b60584b88ae 100644 --- a/.github/PULL_REQUEST_TEMPLATE/release-checklist.md +++ b/.github/PULL_REQUEST_TEMPLATE/release-checklist.md @@ -55,6 +55,7 @@ fastmod --fixed-strings '1.58' '1.65' - [ ] Create a release PR by adding `&template=release-checklist.md` to the comparing url ([Example](https://github.com/ZcashFoundation/zebra/compare/bump-v1.0.0?expand=1&template=release-checklist.md)). - [ ] Freeze the [`batched` queue](https://dashboard.mergify.com/github/ZcashFoundation/repo/zebra/queues) using Mergify. - [ ] Mark all the release PRs as `Critical` priority, so they go in the `urgent` Mergify queue. +- [ ] Mark all non-release PRs with `do-not-merge`, because Mergify checks approved PRs against every commit, even when a queue is frozen. # Update Versions and End of Support @@ -76,19 +77,24 @@ Zebra's Rust API doesn't have any support or stability guarantees, so we keep al

-If you're publishing crates for the first time: +If you're publishing crates for the first time, click this triangle for extra steps - [ ] Install `cargo-release`: `cargo install cargo-release` - [ ] Make sure you are an owner of the crate or [a member of the Zebra crates.io `owners` group on GitHub](https://github.com/orgs/ZcashFoundation/teams/owners)
-- [ ] Update crate versions and do a release dry-run - - [ ] `cargo clean` (optional) - - [ ] `cargo release version --verbose --execute --workspace --exclude zebrad beta` - - [ ] `cargo release version --verbose --execute --package zebrad [ major | minor | patch ]` - - [ ] `cargo release publish --verbose --dry-run --workspace` -- [ ] Commit the version changes to your release PR branch using `git`: `cargo release commit --verbose --execute --workspace` +Check that the release will work: +- [ ] Update crate versions, commit the changes to the release branch, and do a release dry-run: + +```sh +cargo release version --verbose --execute --allow-branch '*' --workspace --exclude zebrad beta +cargo release version --verbose --execute --allow-branch '*' --package zebrad patch # [ major | minor | patch ] +cargo release replace --verbose --execute --allow-branch '*' --package zebrad +cargo release commit --verbose --execute --allow-branch '*' +``` + +Crate publishing is [automatically checked in CI](https://github.com/ZcashFoundation/zebra/actions/workflows/release-crates-io.yml) using "dry run" mode. ## Update End of Support @@ -131,7 +137,6 @@ The end of support height is calculated from the current blockchain height: ## Test the Pre-Release - [ ] Wait until the [Docker binaries have been built on `main`](https://github.com/ZcashFoundation/zebra/actions/workflows/continous-integration-docker.yml), and the quick tests have passed. - (You can ignore the full sync and `lightwalletd` tests, because they take about a day to run.) - [ ] Wait until the [pre-release deployment machines have successfully launched](https://github.com/ZcashFoundation/zebra/actions/workflows/continous-delivery.yml) ## Publish Release @@ -144,12 +149,13 @@ The end of support height is calculated from the current blockchain height: - [ ] Run `cargo clean` in the zebra repo (optional) - [ ] Publish the crates to crates.io: `cargo release publish --verbose --workspace --execute` - [ ] Check that Zebra can be installed from `crates.io`: - `cargo install --locked --force --version 1.0.0 zebrad && ~/.cargo/bin/zebrad` + `cargo install --locked --force --version 1.minor.patch zebrad && ~/.cargo/bin/zebrad` and put the output in a comment on the PR. ## Publish Docker Images - [ ] Wait for the [the Docker images to be published successfully](https://github.com/ZcashFoundation/zebra/actions/workflows/release-binaries.yml). - [ ] Un-freeze the [`batched` queue](https://dashboard.mergify.com/github/ZcashFoundation/repo/zebra/queues) using Mergify. +- [ ] Remove `do-not-merge` from the PRs you added it to ## Release Failures diff --git a/.github/workflows/release-crates-io.patch.yml b/.github/workflows/release-crates-io.patch.yml new file mode 100644 index 00000000000..e8f18d6c755 --- /dev/null +++ b/.github/workflows/release-crates-io.patch.yml @@ -0,0 +1,28 @@ +name: Release crates + +on: + # Only patch the Release PR test job + pull_request: + paths-ignore: + # code and tests + - '**/*.rs' + # hard-coded checkpoints (and proptest regressions, which are not actually needed) + - '**/*.txt' + # dependencies + - '**/Cargo.toml' + - '**/Cargo.lock' + # configuration files + - '.cargo/config.toml' + - '**/clippy.toml' + # READMEs, which are shown on the crate page + - '**/README.md' + # workflow definitions + - '.github/workflows/release-crates.io.yml' + + +jobs: + check-release: + name: Check crate release dry run + runs-on: ubuntu-latest + steps: + - run: 'echo "No check required"' diff --git a/.github/workflows/release-crates-io.yml b/.github/workflows/release-crates-io.yml new file mode 100644 index 00000000000..350bf4d4bc1 --- /dev/null +++ b/.github/workflows/release-crates-io.yml @@ -0,0 +1,125 @@ +# This workflow checks that Zebra's crates.io release script works. +# +# We use a separate action, because the changed files are different to a Continuous Deployment +# or Docker release. +# +# This workflow is triggered when: +# - A PR that changes Rust files, a README, or this workflow is opened or updated +# - A change is pushed to the main branch +# +# TODO: +# If we decide to automate crates.io releases, we can also publish crates using this workflow, when: +# - A release is published +# - A pre-release is changed to a release + +name: Release crates + +# Ensures that only one workflow task will run at a time. Previous releases, if +# already in process, won't get cancelled. Instead, we let the first release complete, +# then queue the latest pending workflow, cancelling any workflows in between. +# +# Since the different event types do very different things (test vs release), +# we can run different event types concurrently. +# +# For pull requests, we only run the tests from this workflow, and don't do any releases. +# So an in-progress pull request gets cancelled, just like other tests. +concurrency: + group: ${{ github.workflow }}-${{ github.event_name }}-${{ github.ref }} + cancel-in-progress: ${{ github.event_name == 'pull_request' }} + + +on: +# disabled for now +# release: +# types: +# - released + + # Only runs the release tests, doesn't release any crates. + # + # We test all changes on the main branch, just in case the PR paths are too strict. + push: + branches: + - main + + pull_request: + paths: + # code and tests + - '**/*.rs' + # hard-coded checkpoints (and proptest regressions, which are not actually needed) + - '**/*.txt' + # dependencies + - '**/Cargo.toml' + - '**/Cargo.lock' + # configuration files + - '.cargo/config.toml' + - '**/clippy.toml' + # READMEs, which are shown on the crate page + - '**/README.md' + # workflow definitions + - '.github/workflows/release-crates.io.yml' + + +jobs: + # Test that Zebra can be released to crates.io using `cargo`. + # This checks that Zebra's dependencies and release configs are correct. + check-release: + name: Check crate release dry run + timeout-minutes: 15 + runs-on: ubuntu-latest + steps: + - uses: r7kamura/rust-problem-matchers@v1.3.0 + + - name: Checkout git repository + uses: actions/checkout@v3.5.3 + with: + persist-credentials: false + + - name: Inject slug/short variables + uses: rlespinasse/github-slug-action@v4 + with: + short-length: 7 + + # Setup Rust with stable toolchain and minimal profile + - name: Setup Rust + run: | + curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain=stable --profile=minimal + + - name: Install cargo-release + uses: baptiste0928/cargo-install@v2.1.0 + with: + crate: cargo-release + + # Make sure Zebra can be released! + # + # These steps should be kept up to date with the release checklist. + # + # TODO: move these steps into a script which is run in the release checklist and CI + - name: Crate release dry run + run: | + set -ex + git config --global user.email "release-tests-no-reply@zfnd.org" + git config --global user.name "Automated Release Test" + # This script must be the same as: + # https://github.com/ZcashFoundation/zebra/blob/main/.github/PULL_REQUEST_TEMPLATE/release-checklist.md#update-crate-versions + # with an extra `--no-confirm` argument for non-interactive testing. + cargo release version --verbose --execute --no-confirm --allow-branch '*' --workspace --exclude zebrad beta + cargo release version --verbose --execute --no-confirm --allow-branch '*' --package zebrad patch + cargo release replace --verbose --execute --no-confirm --allow-branch '*' --package zebrad + cargo release commit --verbose --execute --no-confirm --allow-branch '*' + # Check the release will work using a dry run + # + # Workaround unpublished dependency version errors by skipping those crates: + # https://github.com/crate-ci/cargo-release/issues/691 + # + # TODO: check all crates after fixing these errors + cargo release publish --verbose --dry-run --allow-branch '*' --workspace --exclude zebra-consensus --exclude zebra-utils --exclude zebrad + + + # TODO: actually do the release here + #release-crates: + # name: Release Zebra Crates + # needs: [ check-release ] + # runs-on: ubuntu-latest + # timeout-minutes: 30 + # if: ${{ !cancelled() && !failure() && github.event_name == 'release' }} + # steps: diff --git a/CHANGELOG.md b/CHANGELOG.md index 572babc2d72..4240c6514ec 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,7 +6,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org). -## [Zebra 1.0.1](https://github.com/ZcashFoundation/zebra/releases/tag/v1.0.1) - 2023-06-29 +## [Zebra 1.0.1](https://github.com/ZcashFoundation/zebra/releases/tag/v1.0.1) - 2023-07-03 Zebra's first patch release fixes multiple peer connection security issues and panics. It also significantly reduces Zebra's CPU usage. We recommend that all users upgrade to Zebra 1.0.1 or later. @@ -42,6 +42,7 @@ These platforms are no longer supported by the Zebra team: - Close new peer connections from the same IP and port, rather than replacing the older connection ([#6980](https://github.com/ZcashFoundation/zebra/pull/6980)) - Reduce inbound service overloads and add a timeout ([#6950](https://github.com/ZcashFoundation/zebra/pull/6950)) - Stop panicking when handling inbound connection handshakes ([#6984](https://github.com/ZcashFoundation/zebra/pull/6984)) +- Stop panicking on shutdown in the syncer and network ([#7104](https://github.com/ZcashFoundation/zebra/pull/7104)) ### Added diff --git a/book/src/user/docker.md b/book/src/user/docker.md index e53c51aa6c3..462d5745d8a 100644 --- a/book/src/user/docker.md +++ b/book/src/user/docker.md @@ -17,7 +17,7 @@ docker run --detach zfnd/zebra:latest ### Build it locally ```shell -git clone --depth 1 --branch v1.0.0 https://github.com/ZcashFoundation/zebra.git +git clone --depth 1 --branch v1.0.1 https://github.com/ZcashFoundation/zebra.git docker build --file docker/Dockerfile --target runtime --tag zebra:local . docker run --detach zebra:local ``` diff --git a/book/src/user/install.md b/book/src/user/install.md index c93790920ef..b70eed17f2b 100644 --- a/book/src/user/install.md +++ b/book/src/user/install.md @@ -20,7 +20,7 @@ To compile Zebra directly from GitHub, or from a GitHub release source archive: ```sh git clone https://github.com/ZcashFoundation/zebra.git cd zebra -git checkout v1.0.0 +git checkout v1.0.1 ``` 3. Build and Run `zebrad` @@ -33,7 +33,7 @@ target/release/zebrad start ### Compiling from git using cargo install ```sh -cargo install --git https://github.com/ZcashFoundation/zebra --tag v1.0.0 zebrad +cargo install --git https://github.com/ZcashFoundation/zebra --tag v1.0.1 zebrad ``` ### Compiling on ARM diff --git a/release.toml b/release.toml new file mode 100644 index 00000000000..0cbcdd51772 --- /dev/null +++ b/release.toml @@ -0,0 +1,20 @@ +# Only allow releases from the main branch +allow-branch = [ 'main' ] + +# TODO: +# configure all zebra-* crates with a shared version, +# and all tower-* crates with a different one: +# https://github.com/crate-ci/cargo-release/blob/master/docs/reference.md#config-fields +#shared-version = "TODO named groups" + +# Verify releases with release features +# +# TODO: add this feature to all crates +#enable-features = [ 'default-release-binaries' ] + +# Don't do a git push or tag +push = false +tag = false + +# Owners for new crates +owners = [ 'dconnolly', 'teor2345', 'zcashfoundation/owners' ] diff --git a/zebra-state/Cargo.toml b/zebra-state/Cargo.toml index 1d0d1e7d166..42917f1ad8c 100644 --- a/zebra-state/Cargo.toml +++ b/zebra-state/Cargo.toml @@ -55,7 +55,7 @@ metrics = "0.21.1" mset = "0.1.1" regex = "1.8.4" rlimit = "0.9.1" -rocksdb = { version = "0.21.0", default_features = false, features = ["lz4"] } +rocksdb = { version = "0.21.0", default-features = false, features = ["lz4"] } semver = "1.0.17" serde = { version = "1.0.166", features = ["serde_derive"] } tempfile = "3.5.0" diff --git a/zebrad/Cargo.toml b/zebrad/Cargo.toml index 6fed087caf6..158f9c98858 100644 --- a/zebrad/Cargo.toml +++ b/zebrad/Cargo.toml @@ -30,9 +30,24 @@ default-run = "zebrad" # `cargo release` settings [package.metadata.release] pre-release-replacements = [ - {file="../book/src/user/install.md", search="git checkout [a-z0-9\\.-]+", replace="git checkout {{version}}"}, - {file="../book/src/user/install.md", search="--tag [a-z0-9\\.-]+", replace="--tag {{version}}"}, - {file="../book/src/user/docker.md", search="--branch [a-z0-9\\.-]+", replace="--branch {{version}}"}, + {file="../book/src/user/install.md", search="git checkout [a-z0-9\\.-]+", replace="git checkout v{{version}}"}, + {file="../book/src/user/install.md", search="--tag [a-z0-9\\.-]+", replace="--tag v{{version}}"}, + {file="../book/src/user/docker.md", search="--branch [a-z0-9\\.-]+", replace="--branch v{{version}}"}, +] + +[package.metadata.docs.rs] + +# Publish Zebra's supported production and developer features on docs.rs. +# (Except for the log level features, because there are a lot of them.) +# +# +features = [ + "default-release-binaries", + "filter-reload", + "flamegraph", + "journald", + "prometheus", + "sentry", ] [features] @@ -55,7 +70,6 @@ getblocktemplate-rpcs = [ elasticsearch = [ "zebra-state/elasticsearch", - "zebra-chain/elasticsearch", ] sentry = ["dep:sentry"] @@ -134,6 +148,7 @@ zebra-network = { path = "../zebra-network", version = "1.0.0-beta.27" } zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.27" } zebra-rpc = { path = "../zebra-rpc", version = "1.0.0-beta.27" } zebra-state = { path = "../zebra-state", version = "1.0.0-beta.27" } + # Required for crates.io publishing, but it's only used in tests zebra-utils = { path = "../zebra-utils", version = "1.0.0-beta.27", optional = true } @@ -153,7 +168,7 @@ tokio = { version = "1.29.1", features = ["time", "rt-multi-thread", "macros", " tower = { version = "0.4.13", features = ["hedge", "limit"] } pin-project = "1.1.0" -color-eyre = { version = "0.6.2", default_features = false, features = ["issue-url"] } +color-eyre = { version = "0.6.2", default-features = false, features = ["issue-url"] } # This is a transitive dependency via color-eyre. # Enable a feature that makes tinyvec compile much faster. tinyvec = { version = "1.6.0", features = ["rustc_1_55"] } From 5859fac5b12170a93e9b872321dafa529caa07a2 Mon Sep 17 00:00:00 2001 From: Alfredo Garcia Date: Tue, 4 Jul 2023 18:29:41 -0300 Subject: [PATCH 178/265] docs(state): Use different terms for block verification and state queues (#7061) * claridy some checkpoint verifier docs * update documentation of `Request::CommitSemanticallyVerifiedBlock` and `Request::CommitCheckpointVerifiedBlock` * replace `prepared` with `semantically_verified` in state service checks code * replace `non-finalized` where needed in docs of the state service * fix double space in doc * replace `finalized` where needed in docs of the state service * change some docs in state queued_blocks.rs * Rewrite pending UTXO checkpoint block comment * Fix trailing space in docs * Apply suggestions from code review Co-authored-by: teor --------- Co-authored-by: teor --- zebra-consensus/src/block/check.rs | 2 +- zebra-consensus/src/checkpoint.rs | 6 +-- zebra-state/src/request.rs | 26 +++++----- zebra-state/src/service.rs | 56 +++++++++++----------- zebra-state/src/service/check.rs | 24 +++++----- zebra-state/src/service/check/anchors.rs | 29 ++++++----- zebra-state/src/service/check/nullifier.rs | 10 ++-- zebra-state/src/service/check/utxo.rs | 33 ++++++++----- zebra-state/src/service/queued_blocks.rs | 8 ++-- 9 files changed, 103 insertions(+), 91 deletions(-) diff --git a/zebra-consensus/src/block/check.rs b/zebra-consensus/src/block/check.rs index ddd3dbefa63..5f4aaa6ced6 100644 --- a/zebra-consensus/src/block/check.rs +++ b/zebra-consensus/src/block/check.rs @@ -315,7 +315,7 @@ pub fn merkle_root_validity( // // Duplicate transactions should cause a block to be // rejected, as duplicate transactions imply that the block contains a - // double-spend. As a defense-in-depth, however, we also check that there + // double-spend. As a defense-in-depth, however, we also check that there // are no duplicate transaction hashes. // // ## Checkpoint Validation diff --git a/zebra-consensus/src/checkpoint.rs b/zebra-consensus/src/checkpoint.rs index 2334383b76a..adbe69ded6f 100644 --- a/zebra-consensus/src/checkpoint.rs +++ b/zebra-consensus/src/checkpoint.rs @@ -4,11 +4,11 @@ //! speed up the initial chain sync for Zebra. This list is distributed //! with Zebra. //! -//! The checkpoint verifier queues pending blocks. Once there is a +//! The checkpoint verifier queues pending blocks. Once there is a //! chain from the previous checkpoint to a target checkpoint, it //! verifies all the blocks in that chain, and sends accepted blocks to -//! the state service as finalized chain state, skipping contextual -//! verification checks. +//! the state service as finalized chain state, skipping the majority of +//! contextual verification checks. //! //! Verification starts at the first checkpoint, which is the genesis //! block for the configured network. diff --git a/zebra-state/src/request.rs b/zebra-state/src/request.rs index 5a942d99f7e..cc3df2fd2fc 100644 --- a/zebra-state/src/request.rs +++ b/zebra-state/src/request.rs @@ -163,7 +163,7 @@ pub struct SemanticallyVerifiedBlock { } /// A block ready to be committed directly to the finalized state with -/// no checks. +/// a small number of checks if compared with a `ContextuallyVerifiedBlock`. /// /// This is exposed for use in checkpointing. /// @@ -455,12 +455,11 @@ impl DerefMut for CheckpointVerifiedBlock { /// A query about or modification to the chain state, via the /// [`StateService`](crate::service::StateService). pub enum Request { - /// Performs contextual validation of the given block, committing it to the - /// state if successful. + /// Performs contextual validation of the given semantically verified block, + /// committing it to the state if successful. /// - /// It is the caller's responsibility to perform semantic validation. This - /// request can be made out-of-order; the state service will queue it until - /// its parent is ready. + /// This request can be made out-of-order; the state service will queue it + /// until its parent is ready. /// /// Returns [`Response::Committed`] with the hash of the block when it is /// committed to the state, or an error if the block fails contextual @@ -478,12 +477,12 @@ pub enum Request { /// documentation for details. CommitSemanticallyVerifiedBlock(SemanticallyVerifiedBlock), - /// Commit a checkpointed block to the state, skipping most block validation. + /// Commit a checkpointed block to the state, skipping most but not all + /// contextual validation. /// - /// This is exposed for use in checkpointing, which produces finalized - /// blocks. It is the caller's responsibility to ensure that the block is - /// semantically valid and final. This request can be made out-of-order; - /// the state service will queue it until its parent is ready. + /// This is exposed for use in checkpointing, which produces checkpoint vefified + /// blocks. This request can be made out-of-order; the state service will queue + /// it until its parent is ready. /// /// Returns [`Response::Committed`] with the hash of the newly committed /// block, or an error. @@ -495,8 +494,9 @@ pub enum Request { /// /// # Note /// - /// Finalized and non-finalized blocks are an internal Zebra implementation detail. - /// There is no difference between these blocks on the network, or in Zebra's + /// [`SemanticallyVerifiedBlock`], [`ContextuallyVerifiedBlock`] and + /// [`CheckpointVerifiedBlock`] are an internal Zebra implementation detail. + /// There is no difference between these blocks on the Zcash network, or in Zebra's /// network or syncer implementations. /// /// # Consensus diff --git a/zebra-state/src/service.rs b/zebra-state/src/service.rs index 0e7c96d1748..897903ed4ab 100644 --- a/zebra-state/src/service.rs +++ b/zebra-state/src/service.rs @@ -141,7 +141,7 @@ pub(crate) struct StateService { /// so they can be written to the [`FinalizedState`]. /// /// This sender is dropped after the state has finished sending all the checkpointed blocks, - /// and the lowest non-finalized block arrives. + /// and the lowest semantically verified block arrives. finalized_block_write_sender: Option>, @@ -154,11 +154,6 @@ pub(crate) struct StateService { /// /// If `invalid_block_write_reset_receiver` gets a reset, this is: /// - the hash of the last valid committed block (the parent of the invalid block). - // - // TODO: - // - turn this into an IndexMap containing recent non-finalized block hashes and heights - // (they are all potential tips) - // - remove block hashes once their heights are strictly less than the finalized tip finalized_block_write_last_sent_hash: block::Hash, /// A set of block hashes that have been sent to the block write task. @@ -455,7 +450,7 @@ impl StateService { (state, read_service, latest_chain_tip, chain_tip_change) } - /// Queue a finalized block for verification and storage in the finalized state. + /// Queue a checkpoint verified block for verification and storage in the finalized state. /// /// Returns a channel receiver that provides the result of the block commit. fn queue_and_commit_to_finalized_state( @@ -471,7 +466,7 @@ impl StateService { let queued_height = checkpoint_verified.height; // If we're close to the final checkpoint, make the block's UTXOs available for - // full verification of non-finalized blocks, even when it is in the channel. + // semantic block verification, even when it is in the channel. if self.is_close_to_final_checkpoint(queued_height) { self.non_finalized_block_write_sent_hashes .add_finalized(&checkpoint_verified) @@ -481,32 +476,32 @@ impl StateService { let queued = (checkpoint_verified, rsp_tx); if self.finalized_block_write_sender.is_some() { - // We're still committing finalized blocks + // We're still committing checkpoint verified blocks if let Some(duplicate_queued) = self .finalized_state_queued_blocks .insert(queued_prev_hash, queued) { Self::send_checkpoint_verified_block_error( duplicate_queued, - "dropping older finalized block: got newer duplicate block", + "dropping older checkpoint verified block: got newer duplicate block", ); } self.drain_finalized_queue_and_commit(); } else { - // We've finished committing finalized blocks, so drop any repeated queued blocks, - // and return an error. + // We've finished committing checkpoint verified blocks to the finalized state, + // so drop any repeated queued blocks, and return an error. // // TODO: track the latest sent height, and drop any blocks under that height // every time we send some blocks (like QueuedSemanticallyVerifiedBlocks) Self::send_checkpoint_verified_block_error( queued, - "already finished committing finalized blocks: dropped duplicate block, \ + "already finished committing checkpoint verified blocks: dropped duplicate block, \ block is already committed to the state", ); self.clear_finalized_block_queue( - "already finished committing finalized blocks: dropped duplicate block, \ + "already finished committing checkpoint verified blocks: dropped duplicate block, \ block is already committed to the state", ); } @@ -636,7 +631,7 @@ impl StateService { std::mem::drop(finalized); } - /// Queue a non finalized block for verification and check if any queued + /// Queue a semantically verified block for contextual verification and check if any queued /// blocks are ready to be verified and committed to the state. /// /// This function encodes the logic for [committing non-finalized blocks][1] @@ -694,8 +689,8 @@ impl StateService { rsp_rx }; - // We've finished sending finalized blocks when: - // - we've sent the finalized block for the last checkpoint, and + // We've finished sending checkpoint verified blocks when: + // - we've sent the verified block for the last checkpoint, and // - it has been successfully written to disk. // // We detect the last checkpoint by looking for non-finalized blocks @@ -709,13 +704,13 @@ impl StateService { && self.read_service.db.finalized_tip_hash() == self.finalized_block_write_last_sent_hash { - // Tell the block write task to stop committing finalized blocks, - // and move on to committing non-finalized blocks. + // Tell the block write task to stop committing checkpoint verified blocks to the finalized state, + // and move on to committing semantically verified blocks to the non-finalized state. std::mem::drop(self.finalized_block_write_sender.take()); - // We've finished committing finalized blocks, so drop any repeated queued blocks. + // We've finished committing checkpoint verified blocks to finalized state, so drop any repeated queued blocks. self.clear_finalized_block_queue( - "already finished committing finalized blocks: dropped duplicate block, \ + "already finished committing checkpoint verified blocks: dropped duplicate block, \ block is already committed to the state", ); } @@ -754,7 +749,7 @@ impl StateService { /// Returns `true` if `queued_height` is near the final checkpoint. /// - /// The non-finalized block verifier needs access to UTXOs from finalized blocks + /// The semantic block verifier needs access to UTXOs from checkpoint verified blocks /// near the final checkpoint, so that it can verify blocks that spend those UTXOs. /// /// If it doesn't have the required UTXOs, some blocks will time out, @@ -818,7 +813,7 @@ impl StateService { // required by `Request::CommitSemanticallyVerifiedBlock` call assert!( block.height > self.network.mandatory_checkpoint_height(), - "invalid non-finalized block height: the canopy checkpoint is mandatory, pre-canopy \ + "invalid semantically verified block height: the canopy checkpoint is mandatory, pre-canopy \ blocks, and the canopy activation block, must be committed to the state as finalized \ blocks" ); @@ -970,11 +965,16 @@ impl Service for StateService { Request::CommitCheckpointVerifiedBlock(finalized) => { // # Consensus // - // A non-finalized block verification could have called AwaitUtxo - // before this finalized block arrived in the state. - // So we need to check for pending UTXOs here for non-finalized blocks, - // even though it is redundant for most finalized blocks. - // (Finalized blocks are verified using block hash checkpoints + // A semantic block verification could have called AwaitUtxo + // before this checkpoint verified block arrived in the state. + // So we need to check for pending UTXO requests sent by running + // semantic block verifications. + // + // This check is redundant for most checkpoint verified blocks, + // because semantic verification can only succeed near the final + // checkpoint, when all the UTXOs are available for the verifying block. + // + // (Checkpoint block UTXOs are verified using block hash checkpoints // and transaction merkle tree block header commitments.) self.pending_utxos .check_against_ordered(&finalized.new_outputs); diff --git a/zebra-state/src/service/check.rs b/zebra-state/src/service/check.rs index f1e45010194..bd8dd8b8648 100644 --- a/zebra-state/src/service/check.rs +++ b/zebra-state/src/service/check.rs @@ -38,8 +38,8 @@ mod tests; pub(crate) use difficulty::AdjustedDifficulty; -/// Check that the `prepared` block is contextually valid for `network`, based -/// on the `finalized_tip_height` and `relevant_chain`. +/// Check that the semantically verified block is contextually valid for `network`, +/// based on the `finalized_tip_height` and `relevant_chain`. /// /// This function performs checks that require a small number of recent blocks, /// including previous hash, previous height, and block difficulty. @@ -50,9 +50,9 @@ pub(crate) use difficulty::AdjustedDifficulty; /// # Panics /// /// If the state contains less than 28 ([`POW_ADJUSTMENT_BLOCK_SPAN`]) blocks. -#[tracing::instrument(skip(prepared, finalized_tip_height, relevant_chain))] +#[tracing::instrument(skip(semantically_verified, finalized_tip_height, relevant_chain))] pub(crate) fn block_is_valid_for_recent_chain( - prepared: &SemanticallyVerifiedBlock, + semantically_verified: &SemanticallyVerifiedBlock, network: Network, finalized_tip_height: Option, relevant_chain: C, @@ -64,7 +64,7 @@ where { let finalized_tip_height = finalized_tip_height .expect("finalized state must contain at least one block to do contextual validation"); - check::block_is_not_orphaned(finalized_tip_height, prepared.height)?; + check::block_is_not_orphaned(finalized_tip_height, semantically_verified.height)?; let relevant_chain: Vec<_> = relevant_chain .into_iter() @@ -78,7 +78,7 @@ where let parent_height = parent_block .coinbase_height() .expect("valid blocks have a coinbase height"); - check::height_one_more_than_parent_height(parent_height, prepared.height)?; + check::height_one_more_than_parent_height(parent_height, semantically_verified.height)?; if relevant_chain.len() < POW_ADJUSTMENT_BLOCK_SPAN { // skip this check during tests if we don't have enough blocks in the chain @@ -107,9 +107,9 @@ where ) }); let difficulty_adjustment = - AdjustedDifficulty::new_from_block(&prepared.block, network, relevant_data); + AdjustedDifficulty::new_from_block(&semantically_verified.block, network, relevant_data); check::difficulty_threshold_and_time_are_valid( - prepared.block.header.difficulty_threshold, + semantically_verified.block.header.difficulty_threshold, difficulty_adjustment, )?; @@ -375,23 +375,23 @@ where pub(crate) fn initial_contextual_validity( finalized_state: &ZebraDb, non_finalized_state: &NonFinalizedState, - prepared: &SemanticallyVerifiedBlock, + semantically_verified: &SemanticallyVerifiedBlock, ) -> Result<(), ValidateContextError> { let relevant_chain = any_ancestor_blocks( non_finalized_state, finalized_state, - prepared.block.header.previous_block_hash, + semantically_verified.block.header.previous_block_hash, ); // Security: check proof of work before any other checks check::block_is_valid_for_recent_chain( - prepared, + semantically_verified, non_finalized_state.network, finalized_state.finalized_tip_height(), relevant_chain, )?; - check::nullifier::no_duplicates_in_finalized_chain(prepared, finalized_state)?; + check::nullifier::no_duplicates_in_finalized_chain(semantically_verified, finalized_state)?; Ok(()) } diff --git a/zebra-state/src/service/check/anchors.rs b/zebra-state/src/service/check/anchors.rs index f410abd89b2..471f39174bc 100644 --- a/zebra-state/src/service/check/anchors.rs +++ b/zebra-state/src/service/check/anchors.rs @@ -190,7 +190,7 @@ fn fetch_sprout_final_treestates( /// treestate of any prior `JoinSplit` _within the same transaction_. /// /// This method searches for anchors in the supplied `sprout_final_treestates` -/// (which must be populated with all treestates pointed to in the `prepared` block; +/// (which must be populated with all treestates pointed to in the `semantically_verified` block; /// see [`fetch_sprout_final_treestates()`]); or in the interstitial /// treestates which are computed on the fly in this function. #[tracing::instrument(skip(sprout_final_treestates, transaction))] @@ -322,20 +322,23 @@ fn sprout_anchors_refer_to_treestates( pub(crate) fn block_sapling_orchard_anchors_refer_to_final_treestates( finalized_state: &ZebraDb, parent_chain: &Arc, - prepared: &SemanticallyVerifiedBlock, + semantically_verified: &SemanticallyVerifiedBlock, ) -> Result<(), ValidateContextError> { - prepared.block.transactions.iter().enumerate().try_for_each( - |(tx_index_in_block, transaction)| { + semantically_verified + .block + .transactions + .iter() + .enumerate() + .try_for_each(|(tx_index_in_block, transaction)| { sapling_orchard_anchors_refer_to_final_treestates( finalized_state, Some(parent_chain), transaction, - prepared.transaction_hashes[tx_index_in_block], + semantically_verified.transaction_hashes[tx_index_in_block], Some(tx_index_in_block), - Some(prepared.height), + Some(semantically_verified.height), ) - }, - ) + }) } /// Accepts a [`ZebraDb`], [`Arc`](Chain), and [`SemanticallyVerifiedBlock`]. @@ -353,18 +356,20 @@ pub(crate) fn block_sapling_orchard_anchors_refer_to_final_treestates( pub(crate) fn block_fetch_sprout_final_treestates( finalized_state: &ZebraDb, parent_chain: &Arc, - prepared: &SemanticallyVerifiedBlock, + semantically_verified: &SemanticallyVerifiedBlock, ) -> HashMap> { let mut sprout_final_treestates = HashMap::new(); - for (tx_index_in_block, transaction) in prepared.block.transactions.iter().enumerate() { + for (tx_index_in_block, transaction) in + semantically_verified.block.transactions.iter().enumerate() + { fetch_sprout_final_treestates( &mut sprout_final_treestates, finalized_state, Some(parent_chain), transaction, Some(tx_index_in_block), - Some(prepared.height), + Some(semantically_verified.height), ); } @@ -381,7 +386,7 @@ pub(crate) fn block_fetch_sprout_final_treestates( /// treestate of any prior `JoinSplit` _within the same transaction_. /// /// This method searches for anchors in the supplied `sprout_final_treestates` -/// (which must be populated with all treestates pointed to in the `prepared` block; +/// (which must be populated with all treestates pointed to in the `semantically_verified` block; /// see [`fetch_sprout_final_treestates()`]); or in the interstitial /// treestates which are computed on the fly in this function. #[tracing::instrument(skip(sprout_final_treestates, block, transaction_hashes))] diff --git a/zebra-state/src/service/check/nullifier.rs b/zebra-state/src/service/check/nullifier.rs index 4f638b24ff7..809e78383ba 100644 --- a/zebra-state/src/service/check/nullifier.rs +++ b/zebra-state/src/service/check/nullifier.rs @@ -30,24 +30,24 @@ use crate::service; /// > even if they have the same bit pattern. /// /// -#[tracing::instrument(skip(prepared, finalized_state))] +#[tracing::instrument(skip(semantically_verified, finalized_state))] pub(crate) fn no_duplicates_in_finalized_chain( - prepared: &SemanticallyVerifiedBlock, + semantically_verified: &SemanticallyVerifiedBlock, finalized_state: &ZebraDb, ) -> Result<(), ValidateContextError> { - for nullifier in prepared.block.sprout_nullifiers() { + for nullifier in semantically_verified.block.sprout_nullifiers() { if finalized_state.contains_sprout_nullifier(nullifier) { Err(nullifier.duplicate_nullifier_error(true))?; } } - for nullifier in prepared.block.sapling_nullifiers() { + for nullifier in semantically_verified.block.sapling_nullifiers() { if finalized_state.contains_sapling_nullifier(nullifier) { Err(nullifier.duplicate_nullifier_error(true))?; } } - for nullifier in prepared.block.orchard_nullifiers() { + for nullifier in semantically_verified.block.orchard_nullifiers() { if finalized_state.contains_orchard_nullifier(nullifier) { Err(nullifier.duplicate_nullifier_error(true))?; } diff --git a/zebra-state/src/service/check/utxo.rs b/zebra-state/src/service/check/utxo.rs index c8a79852b8a..186f89d83af 100644 --- a/zebra-state/src/service/check/utxo.rs +++ b/zebra-state/src/service/check/utxo.rs @@ -36,14 +36,16 @@ use crate::{ /// - spends of an immature transparent coinbase output, /// - unshielded spends of a transparent coinbase output. pub fn transparent_spend( - prepared: &SemanticallyVerifiedBlock, + semantically_verified: &SemanticallyVerifiedBlock, non_finalized_chain_unspent_utxos: &HashMap, non_finalized_chain_spent_utxos: &HashSet, finalized_state: &ZebraDb, ) -> Result, ValidateContextError> { let mut block_spends = HashMap::new(); - for (spend_tx_index_in_block, transaction) in prepared.block.transactions.iter().enumerate() { + for (spend_tx_index_in_block, transaction) in + semantically_verified.block.transactions.iter().enumerate() + { // Coinbase inputs represent new coins, // so there are no UTXOs to mark as spent. let spends = transaction @@ -55,7 +57,7 @@ pub fn transparent_spend( let utxo = transparent_spend_chain_order( spend, spend_tx_index_in_block, - &prepared.new_outputs, + &semantically_verified.new_outputs, non_finalized_chain_unspent_utxos, non_finalized_chain_spent_utxos, finalized_state, @@ -70,7 +72,8 @@ pub fn transparent_spend( // We don't want to use UTXOs from invalid pending blocks, // so we check transparent coinbase maturity and shielding // using known valid UTXOs during non-finalized chain validation. - let spend_restriction = transaction.coinbase_spend_restriction(prepared.height); + let spend_restriction = + transaction.coinbase_spend_restriction(semantically_verified.height); transparent_coinbase_spend(spend, spend_restriction, utxo.as_ref())?; // We don't delete the UTXOs until the block is committed, @@ -86,7 +89,7 @@ pub fn transparent_spend( } } - remaining_transaction_value(prepared, &block_spends)?; + remaining_transaction_value(semantically_verified, &block_spends)?; Ok(block_spends) } @@ -225,10 +228,12 @@ pub fn transparent_coinbase_spend( /// /// pub fn remaining_transaction_value( - prepared: &SemanticallyVerifiedBlock, + semantically_verified: &SemanticallyVerifiedBlock, utxos: &HashMap, ) -> Result<(), ValidateContextError> { - for (tx_index_in_block, transaction) in prepared.block.transactions.iter().enumerate() { + for (tx_index_in_block, transaction) in + semantically_verified.block.transactions.iter().enumerate() + { if transaction.is_coinbase() { continue; } @@ -243,26 +248,28 @@ pub fn remaining_transaction_value( { Err(ValidateContextError::NegativeRemainingTransactionValue { amount_error, - height: prepared.height, + height: semantically_verified.height, tx_index_in_block, - transaction_hash: prepared.transaction_hashes[tx_index_in_block], + transaction_hash: semantically_verified.transaction_hashes + [tx_index_in_block], }) } Err(amount_error) => { Err(ValidateContextError::CalculateRemainingTransactionValue { amount_error, - height: prepared.height, + height: semantically_verified.height, tx_index_in_block, - transaction_hash: prepared.transaction_hashes[tx_index_in_block], + transaction_hash: semantically_verified.transaction_hashes + [tx_index_in_block], }) } }, Err(value_balance_error) => { Err(ValidateContextError::CalculateTransactionValueBalances { value_balance_error, - height: prepared.height, + height: semantically_verified.height, tx_index_in_block, - transaction_hash: prepared.transaction_hashes[tx_index_in_block], + transaction_hash: semantically_verified.transaction_hashes[tx_index_in_block], }) } }? diff --git a/zebra-state/src/service/queued_blocks.rs b/zebra-state/src/service/queued_blocks.rs index 41e938122bf..dabd36082ca 100644 --- a/zebra-state/src/service/queued_blocks.rs +++ b/zebra-state/src/service/queued_blocks.rs @@ -15,13 +15,13 @@ use crate::{BoxError, CheckpointVerifiedBlock, SemanticallyVerifiedBlock}; #[cfg(test)] mod tests; -/// A finalized state queue block, and its corresponding [`Result`] channel. +/// A queued checkpoint verified block, and its corresponding [`Result`] channel. pub type QueuedCheckpointVerified = ( CheckpointVerifiedBlock, oneshot::Sender>, ); -/// A non-finalized state queue block, and its corresponding [`Result`] channel. +/// A queued semantically verified block, and its corresponding [`Result`] channel. pub type QueuedSemanticallyVerified = ( SemanticallyVerifiedBlock, oneshot::Sender>, @@ -264,10 +264,10 @@ impl SentHashes { self.update_metrics_for_block(block.height); } - /// Stores the finalized `block`'s hash, height, and UTXOs, so they can be used to check if a + /// Stores the checkpoint verified `block`'s hash, height, and UTXOs, so they can be used to check if a /// block or UTXO is available in the state. /// - /// Used for finalized blocks close to the final checkpoint, so non-finalized blocks can look up + /// Used for checkpoint verified blocks close to the final checkpoint, so the semantic block verifier can look up /// their UTXOs. /// /// Assumes that blocks are added in the order of their height between `finish_batch` calls From b50252d70cbde707c15e6b32929e1658210cf41e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 5 Jul 2023 02:09:45 +0000 Subject: [PATCH 179/265] build(deps): bump Swatinem/rust-cache from 2.5.0 to 2.5.1 (#7138) Bumps [Swatinem/rust-cache](https://github.com/swatinem/rust-cache) from 2.5.0 to 2.5.1. - [Release notes](https://github.com/swatinem/rust-cache/releases) - [Changelog](https://github.com/Swatinem/rust-cache/blob/master/CHANGELOG.md) - [Commits](https://github.com/swatinem/rust-cache/compare/v2.5.0...v2.5.1) --- updated-dependencies: - dependency-name: Swatinem/rust-cache dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/continous-integration-os.yml | 4 ++-- .github/workflows/docs.yml | 2 +- .github/workflows/lint.yml | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/continous-integration-os.yml b/.github/workflows/continous-integration-os.yml index 3a189653a82..37a14cb8b0a 100644 --- a/.github/workflows/continous-integration-os.yml +++ b/.github/workflows/continous-integration-os.yml @@ -107,7 +107,7 @@ jobs: curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain=${{ matrix.rust }} --profile=minimal - - uses: Swatinem/rust-cache@v2.5.0 + - uses: Swatinem/rust-cache@v2.5.1 # TODO: change Rust cache target directory on Windows, # or remove this workaround once the build is more efficient (#3005). #with: @@ -242,7 +242,7 @@ jobs: run: | curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain=stable --profile=minimal - - uses: Swatinem/rust-cache@v2.5.0 + - uses: Swatinem/rust-cache@v2.5.1 with: shared-key: "clippy-cargo-lock" diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index 5c3b0999d30..2c4f0e5ec9f 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -56,7 +56,7 @@ jobs: run: | curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain=beta --profile=default - - uses: Swatinem/rust-cache@v2.5.0 + - uses: Swatinem/rust-cache@v2.5.1 - name: Setup mdBook uses: peaceiris/actions-mdbook@v1.2.0 diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index dd981e8e5c0..67386acc0f6 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -86,7 +86,7 @@ jobs: run: | curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain=stable --profile=default - - uses: Swatinem/rust-cache@v2.5.0 + - uses: Swatinem/rust-cache@v2.5.1 with: shared-key: "clippy-cargo-lock" @@ -131,7 +131,7 @@ jobs: # We don't cache `fmt` outputs because the job is quick, # and we want to use the limited GitHub actions cache space for slower jobs. - #- uses: Swatinem/rust-cache@v2.5.0 + #- uses: Swatinem/rust-cache@v2.5.1 - run: | cargo fmt --all -- --check From 8fed1823afd5b961683da0c970103ddc3ac35aad Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 5 Jul 2023 02:09:57 +0000 Subject: [PATCH 180/265] build(deps): bump rlimit from 0.9.1 to 0.10.0 (#7139) Bumps [rlimit](https://github.com/Nugine/rlimit) from 0.9.1 to 0.10.0. - [Changelog](https://github.com/Nugine/rlimit/blob/main/CHANGELOG.md) - [Commits](https://github.com/Nugine/rlimit/compare/v0.9.1...v0.10.0) --- updated-dependencies: - dependency-name: rlimit dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 8 ++++---- zebra-state/Cargo.toml | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 609b5bc00f5..4ae2455da4f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2296,9 +2296,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.146" +version = "0.2.147" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f92be4933c13fd498862a9e02a3055f8a8d9c039ce33db97306fd5a6caa7f29b" +checksum = "b4668fb0ea861c1df094127ac5f1da3409a82116a4ba74fca2e58ef927159bb3" [[package]] name = "libgit2-sys" @@ -3718,9 +3718,9 @@ dependencies = [ [[package]] name = "rlimit" -version = "0.9.1" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8a29d87a652dc4d43c586328706bb5cdff211f3f39a530f240b53f7221dab8e" +checksum = "9b5b8be0bc0ef630d24f8fa836b3a3463479b2343b29f9a8fa905c71a8c7b69b" dependencies = [ "libc", ] diff --git a/zebra-state/Cargo.toml b/zebra-state/Cargo.toml index 42917f1ad8c..73d68bbaf8c 100644 --- a/zebra-state/Cargo.toml +++ b/zebra-state/Cargo.toml @@ -54,7 +54,7 @@ lazy_static = "1.4.0" metrics = "0.21.1" mset = "0.1.1" regex = "1.8.4" -rlimit = "0.9.1" +rlimit = "0.10.0" rocksdb = { version = "0.21.0", default-features = false, features = ["lz4"] } semver = "1.0.17" serde = { version = "1.0.166", features = ["serde_derive"] } From 522390bcc2c98a458bdd098edae6c0d4db573812 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 5 Jul 2023 05:00:48 +0000 Subject: [PATCH 181/265] build(deps): bump clap from 4.3.8 to 4.3.10 (#7115) Bumps [clap](https://github.com/clap-rs/clap) from 4.3.8 to 4.3.10. - [Release notes](https://github.com/clap-rs/clap/releases) - [Changelog](https://github.com/clap-rs/clap/blob/master/CHANGELOG.md) - [Commits](https://github.com/clap-rs/clap/compare/v4.3.8...v4.3.10) --- updated-dependencies: - dependency-name: clap dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 15 +++++++-------- zebrad/Cargo.toml | 2 +- 2 files changed, 8 insertions(+), 9 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4ae2455da4f..909ff789277 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -12,7 +12,7 @@ dependencies = [ "arc-swap", "backtrace", "canonical-path", - "clap 4.3.8", + "clap 4.3.10", "color-eyre", "fs-err", "once_cell", @@ -773,9 +773,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.3.8" +version = "4.3.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9394150f5b4273a1763355bd1c2ec54cc5a2593f790587bcd6b2c947cfa9211" +checksum = "384e169cc618c613d5e3ca6404dda77a8685a63e08660dcc64abaf7da7cb0c7a" dependencies = [ "clap_builder", "clap_derive", @@ -784,13 +784,12 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.3.8" +version = "4.3.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a78fbdd3cc2914ddf37ba444114bc7765bbdcb55ec9cbe6fa054f0137400717" +checksum = "ef137bbe35aab78bdb468ccfba75a5f4d8321ae011d34063770780545176af2d" dependencies = [ "anstream", "anstyle", - "bitflags 1.3.2", "clap_lex", "once_cell", "strsim 0.10.0", @@ -956,7 +955,7 @@ dependencies = [ "anes", "cast", "ciborium", - "clap 4.3.8", + "clap 4.3.10", "criterion-plot", "is-terminal", "itertools 0.10.5", @@ -5975,7 +5974,7 @@ dependencies = [ "abscissa_core", "atty", "chrono", - "clap 4.3.8", + "clap 4.3.10", "color-eyre", "console-subscriber", "dirs", diff --git a/zebrad/Cargo.toml b/zebrad/Cargo.toml index 158f9c98858..9e13a9e3454 100644 --- a/zebrad/Cargo.toml +++ b/zebrad/Cargo.toml @@ -153,7 +153,7 @@ zebra-state = { path = "../zebra-state", version = "1.0.0-beta.27" } zebra-utils = { path = "../zebra-utils", version = "1.0.0-beta.27", optional = true } abscissa_core = "0.7.0" -clap = { version = "4.3.8", features = ["cargo"] } +clap = { version = "4.3.10", features = ["cargo"] } chrono = { version = "0.4.26", default-features = false, features = ["clock", "std"] } humantime-serde = "1.1.1" indexmap = "2.0.0" From 5598d1a72b4e5d254fea38b7d7f2d1c5ef6d6cb0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 5 Jul 2023 07:08:31 +0000 Subject: [PATCH 182/265] build(deps): bump pin-project from 1.1.0 to 1.1.2 (#7127) Bumps [pin-project](https://github.com/taiki-e/pin-project) from 1.1.0 to 1.1.2. - [Release notes](https://github.com/taiki-e/pin-project/releases) - [Changelog](https://github.com/taiki-e/pin-project/blob/main/CHANGELOG.md) - [Commits](https://github.com/taiki-e/pin-project/compare/v1.1.0...v1.1.2) --- updated-dependencies: - dependency-name: pin-project dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 8 ++++---- tower-batch-control/Cargo.toml | 2 +- tower-fallback/Cargo.toml | 2 +- zebra-network/Cargo.toml | 2 +- zebrad/Cargo.toml | 2 +- 5 files changed, 8 insertions(+), 8 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 909ff789277..036d59071be 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3078,18 +3078,18 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.1.0" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c95a7476719eab1e366eaf73d0260af3021184f18177925b07f54b30089ceead" +checksum = "030ad2bc4db10a8944cb0d837f158bdfec4d4a4873ab701a95046770d11f8842" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.0" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39407670928234ebc5e6e580247dd567ad73a3578460c5990f9503df207e8f07" +checksum = "ec2e072ecce94ec471b13398d5402c188e76ac03cf74dd1a975161b23a3f6d9c" dependencies = [ "proc-macro2 1.0.63", "quote 1.0.28", diff --git a/tower-batch-control/Cargo.toml b/tower-batch-control/Cargo.toml index a16ac617615..cbb89fed857 100644 --- a/tower-batch-control/Cargo.toml +++ b/tower-batch-control/Cargo.toml @@ -24,7 +24,7 @@ categories = ["algorithms", "asynchronous"] [dependencies] futures = "0.3.28" futures-core = "0.3.28" -pin-project = "1.1.0" +pin-project = "1.1.2" rayon = "1.7.0" tokio = { version = "1.29.1", features = ["time", "sync", "tracing", "macros"] } tokio-util = "0.7.8" diff --git a/tower-fallback/Cargo.toml b/tower-fallback/Cargo.toml index ea62c333c4e..e7d9a58c87e 100644 --- a/tower-fallback/Cargo.toml +++ b/tower-fallback/Cargo.toml @@ -16,7 +16,7 @@ keywords = ["tower", "batch"] categories = ["algorithms", "asynchronous"] [dependencies] -pin-project = "1.1.0" +pin-project = "1.1.2" tower = "0.4.13" futures-core = "0.3.28" tracing = "0.1.37" diff --git a/zebra-network/Cargo.toml b/zebra-network/Cargo.toml index cc4b8fe7c06..643c3d49bae 100644 --- a/zebra-network/Cargo.toml +++ b/zebra-network/Cargo.toml @@ -52,7 +52,7 @@ itertools = "0.11.0" lazy_static = "1.4.0" num-integer = "0.1.45" ordered-map = "0.4.2" -pin-project = "1.1.0" +pin-project = "1.1.2" rand = "0.8.5" rayon = "1.7.0" regex = "1.8.4" diff --git a/zebrad/Cargo.toml b/zebrad/Cargo.toml index 9e13a9e3454..10c90d6421a 100644 --- a/zebrad/Cargo.toml +++ b/zebrad/Cargo.toml @@ -166,7 +166,7 @@ futures = "0.3.28" rayon = "1.7.0" tokio = { version = "1.29.1", features = ["time", "rt-multi-thread", "macros", "tracing", "signal"] } tower = { version = "0.4.13", features = ["hedge", "limit"] } -pin-project = "1.1.0" +pin-project = "1.1.2" color-eyre = { version = "0.6.2", default-features = false, features = ["issue-url"] } # This is a transitive dependency via color-eyre. From f2a2a403a867a7e6d787774afdb3207cb97e34f6 Mon Sep 17 00:00:00 2001 From: teor Date: Wed, 5 Jul 2023 17:08:59 +1000 Subject: [PATCH 183/265] fix(log): Remove redundant startup logs, fix progress bar number, order, and wording (#7087) * Remove duplicate "running" message, send log file opening logs to stderr * Add missing block progress bar desc, make them consistent * Put progress bars in a specific order in each section * Actually make block progress bar desc consistent * Make progress bar order even more consistent * Fix fork blocks plural for 1 block * Use the correct number of chain fork bars * Disable confusing partial work display * Add struct field category comments * Silence a verbose inventory log --- zebra-consensus/src/checkpoint.rs | 5 ++- zebra-network/src/address_book_updater.rs | 14 +++---- zebra-network/src/peer_set/limit.rs | 2 +- zebra-network/src/peer_set/set.rs | 4 +- .../src/service/non_finalized_state.rs | 29 +++++++++----- zebrad/src/components/mempool.rs | 39 ++++++++++++------- zebrad/src/components/sync/progress.rs | 28 ++++++++----- zebrad/src/components/tracing/component.rs | 16 ++++---- 8 files changed, 82 insertions(+), 55 deletions(-) diff --git a/zebra-consensus/src/checkpoint.rs b/zebra-consensus/src/checkpoint.rs index adbe69ded6f..bcd49187764 100644 --- a/zebra-consensus/src/checkpoint.rs +++ b/zebra-consensus/src/checkpoint.rs @@ -265,10 +265,11 @@ where let (sender, receiver) = mpsc::channel(); #[cfg(feature = "progress-bar")] - let queued_blocks_bar = howudoin::new().label("Queued Checkpoint Blocks"); + let queued_blocks_bar = howudoin::new_root().label("Checkpoint Queue Height"); #[cfg(feature = "progress-bar")] - let verified_checkpoint_bar = howudoin::new().label("Verified Checkpoints"); + let verified_checkpoint_bar = + howudoin::new_with_parent(queued_blocks_bar.id()).label("Verified Checkpoints"); let verifier = CheckpointVerifier { checkpoint_list, diff --git a/zebra-network/src/address_book_updater.rs b/zebra-network/src/address_book_updater.rs index 59d02155c97..91aa4a6f144 100644 --- a/zebra-network/src/address_book_updater.rs +++ b/zebra-network/src/address_book_updater.rs @@ -58,14 +58,12 @@ impl AddressBookUpdater { #[cfg(feature = "progress-bar")] let (mut address_info, address_bar, never_bar, failed_bar) = { - let address_bar = howudoin::new().label("Known Peers"); - - ( - address_metrics.clone(), - address_bar, - howudoin::new_with_parent(address_bar.id()).label("Never Attempted Peers"), - howudoin::new_with_parent(address_bar.id()).label("Failed Peers"), - ) + let address_bar = howudoin::new_root().label("Known Peers"); + let never_bar = + howudoin::new_with_parent(address_bar.id()).label("Never Attempted Peers"); + let failed_bar = howudoin::new_with_parent(never_bar.id()).label("Failed Peers"); + + (address_metrics.clone(), address_bar, never_bar, failed_bar) }; let worker_address_book = address_book.clone(); diff --git a/zebra-network/src/peer_set/limit.rs b/zebra-network/src/peer_set/limit.rs index e23c4c4c06a..aef255dac1a 100644 --- a/zebra-network/src/peer_set/limit.rs +++ b/zebra-network/src/peer_set/limit.rs @@ -66,7 +66,7 @@ impl ActiveConnectionCounter { let label = label.to_string(); #[cfg(feature = "progress-bar")] - let connection_bar = howudoin::new().label(label.clone()); + let connection_bar = howudoin::new_root().label(label.clone()); Self { count: 0, diff --git a/zebra-network/src/peer_set/set.rs b/zebra-network/src/peer_set/set.rs index 522ee6b1802..2fa546c9883 100644 --- a/zebra-network/src/peer_set/set.rs +++ b/zebra-network/src/peer_set/set.rs @@ -783,11 +783,11 @@ where return fut.map_err(Into::into).boxed(); } - // TODO: reduce this log level after testing #2156 and #2726 - tracing::info!( + tracing::debug!( ?hash, "all ready peers are missing inventory, failing request" ); + async move { // Let other tasks run, so a retry request might get different ready peers. tokio::task::yield_now().await; diff --git a/zebra-state/src/service/non_finalized_state.rs b/zebra-state/src/service/non_finalized_state.rs index 1fa2b29e347..162373b5d57 100644 --- a/zebra-state/src/service/non_finalized_state.rs +++ b/zebra-state/src/service/non_finalized_state.rs @@ -36,6 +36,8 @@ pub(crate) use chain::Chain; /// /// Most chain data is clone-on-write using [`Arc`]. pub struct NonFinalizedState { + // Chain Data + // /// Verified, non-finalized chains, in ascending work order. /// /// The best chain is [`NonFinalizedState::best_chain()`], or `chain_iter().next()`. @@ -43,6 +45,8 @@ pub struct NonFinalizedState { /// callers should migrate to `chain_iter().next()`. chain_set: BTreeSet>, + // Configuration + // /// The configured Zcash network. pub network: Network, @@ -653,7 +657,7 @@ impl NonFinalizedState { // Update the chain count bar if self.chain_count_bar.is_none() { - self.chain_count_bar = Some(howudoin::new().label("Chain Forks")); + self.chain_count_bar = Some(howudoin::new_root().label("Chain Forks")); } let chain_count_bar = self @@ -677,9 +681,11 @@ impl NonFinalizedState { match self.chain_count().cmp(&prev_length_bars) { Greater => self .chain_fork_length_bars - .resize_with(self.chain_count(), howudoin::new), + .resize_with(self.chain_count(), || { + howudoin::new_with_parent(chain_count_bar.id()) + }), Less => { - let redundant_bars = self.chain_fork_length_bars.split_off(prev_length_bars); + let redundant_bars = self.chain_fork_length_bars.split_off(self.chain_count()); for bar in redundant_bars { bar.close(); } @@ -708,19 +714,24 @@ impl NonFinalizedState { // zebra_chain::transparent::MIN_TRANSPARENT_COINBASE_MATURITY, // )); - // display work as bits - let mut desc = format!( - "Work {:.1} bits", - chain.partial_cumulative_work.difficulty_bits_for_display(), - ); + // TODO: store work in the finalized state for each height (#7109), + // and show the full chain work here, like `zcashd` (#7110) + // + // For now, we don't show any work here, see the deleted code in PR #7087. + let mut desc = String::new(); if let Some(recent_fork_height) = chain.recent_fork_height() { let recent_fork_length = chain .recent_fork_length() .expect("just checked recent fork height"); + let mut plural = "s"; + if recent_fork_length == 1 { + plural = ""; + } + desc.push_str(&format!( - " at {recent_fork_height:?} + {recent_fork_length} blocks" + " at {recent_fork_height:?} + {recent_fork_length} block{plural}" )); } diff --git a/zebrad/src/components/mempool.rs b/zebrad/src/components/mempool.rs index ceb73ad8eec..aef623e45fa 100644 --- a/zebrad/src/components/mempool.rs +++ b/zebrad/src/components/mempool.rs @@ -414,26 +414,35 @@ impl Mempool { let _max_transaction_count = self.config.tx_cost_limit / zebra_chain::transaction::MEMPOOL_TRANSACTION_COST_THRESHOLD; - self.queued_count_bar = Some(*howudoin::new().label("Mempool Queue").set_pos(0u64)); + let transaction_count_bar = *howudoin::new_root() + .label("Mempool Transactions") + .set_pos(0u64); + // .set_len(max_transaction_count); + + let transaction_cost_bar = howudoin::new_with_parent(transaction_count_bar.id()) + .label("Mempool Cost") + .set_pos(0u64) + // .set_len(self.config.tx_cost_limit) + .fmt_as_bytes(true); + + let queued_count_bar = *howudoin::new_with_parent(transaction_cost_bar.id()) + .label("Mempool Queue") + .set_pos(0u64); // .set_len( // u64::try_from(downloads::MAX_INBOUND_CONCURRENCY).expect("fits in u64"), - // ), + // ); - self.transaction_count_bar = Some(*howudoin::new().label("Mempool Txs").set_pos(0u64)); - // .set_len(max_transaction_count), - - self.transaction_cost_bar = Some( - howudoin::new() - .label("Mempool Cost") - .set_pos(0u64) - // .set_len(self.config.tx_cost_limit) - .fmt_as_bytes(true), - ); - - self.rejected_count_bar = Some(*howudoin::new().label("Mempool Rejects").set_pos(0u64)); + let rejected_count_bar = *howudoin::new_with_parent(queued_count_bar.id()) + .label("Mempool Rejects") + .set_pos(0u64); // .set_len( // u64::try_from(storage::MAX_EVICTION_MEMORY_ENTRIES).expect("fits in u64"), - // ), + // ); + + self.transaction_count_bar = Some(transaction_count_bar); + self.transaction_cost_bar = Some(transaction_cost_bar); + self.queued_count_bar = Some(queued_count_bar); + self.rejected_count_bar = Some(rejected_count_bar); } // Update if the mempool has ever been active diff --git a/zebrad/src/components/sync/progress.rs b/zebrad/src/components/sync/progress.rs index 8c49df4363c..66acfb082c7 100644 --- a/zebrad/src/components/sync/progress.rs +++ b/zebrad/src/components/sync/progress.rs @@ -152,8 +152,7 @@ pub async fn show_block_chain_progress( } else { block_bar .set_pos(current_height.0) - .set_len(u64::from(estimated_height.0)) - .desc(network_upgrade.to_string()); + .set_len(u64::from(estimated_height.0)); } // Skip logging and status updates if it isn't time for them yet. @@ -217,7 +216,7 @@ pub async fn show_block_chain_progress( // TODO: use add_warn(), but only add each warning once #[cfg(feature = "progress-bar")] - block_bar.desc("chain updates have stalled"); + block_bar.desc(format!("{}: sync has stalled", network_upgrade)); } else if is_syncer_stopped && remaining_sync_blocks > MIN_SYNC_WARNING_BLOCKS { // We've stopped syncing blocks, but we estimate we're a long way from the tip. // @@ -235,7 +234,10 @@ pub async fn show_block_chain_progress( ); #[cfg(feature = "progress-bar")] - block_bar.desc("sync is very slow, or estimated tip is wrong"); + block_bar.desc(format!( + "{}: sync is very slow, or estimated tip is wrong", + network_upgrade + )); } else if is_syncer_stopped && current_height <= after_checkpoint_height { // We've stopped syncing blocks, // but we're below the minimum height estimated from our checkpoints. @@ -259,7 +261,7 @@ pub async fn show_block_chain_progress( ); #[cfg(feature = "progress-bar")] - block_bar.desc("sync is very slow"); + block_bar.desc(format!("{}: sync is very slow", network_upgrade)); } else if is_syncer_stopped { // We've stayed near the tip for a while, and we've stopped syncing lots of blocks. // So we're mostly using gossiped blocks now. @@ -273,7 +275,7 @@ pub async fn show_block_chain_progress( ); #[cfg(feature = "progress-bar")] - block_bar.desc(format!("{}: initial sync finished", network_upgrade)); + block_bar.desc(format!("{}: waiting for next block", network_upgrade)); } else if remaining_sync_blocks <= MAX_CLOSE_TO_TIP_BLOCKS { // We estimate we're near the tip, but we have been syncing lots of blocks recently. // We might also be using some gossiped blocks. @@ -288,7 +290,7 @@ pub async fn show_block_chain_progress( ); #[cfg(feature = "progress-bar")] - block_bar.desc(format!("{}: initial sync almost finished", network_upgrade)); + block_bar.desc(format!("{}: finishing initial sync", network_upgrade)); } else { // We estimate we're far from the tip, and we've been syncing lots of blocks. info!( @@ -299,9 +301,14 @@ pub async fn show_block_chain_progress( %time_since_last_state_block, "estimated progress to chain tip", ); + + #[cfg(feature = "progress-bar")] + block_bar.desc(format!("{}: syncing blocks", network_upgrade)); } } else { let sync_percent = format!("{:.SYNC_PERCENT_FRAC_DIGITS$} %", 0.0f64,); + #[cfg(feature = "progress-bar")] + let network_upgrade = NetworkUpgrade::Genesis; if is_syncer_stopped { // We've stopped syncing blocks, @@ -315,7 +322,7 @@ pub async fn show_block_chain_progress( ); #[cfg(feature = "progress-bar")] - block_bar.desc("can't download genesis block"); + block_bar.desc(format!("{}: can't download genesis block", network_upgrade)); } else { // We're waiting for the genesis block to be committed to the state, // before we can estimate the best chain tip. @@ -326,7 +333,10 @@ pub async fn show_block_chain_progress( ); #[cfg(feature = "progress-bar")] - block_bar.desc("waiting to download genesis block"); + block_bar.desc(format!( + "{}: waiting to download genesis block", + network_upgrade + )); } } diff --git a/zebrad/src/components/tracing/component.rs b/zebrad/src/components/tracing/component.rs index 36a2abaacd0..745315ae16a 100644 --- a/zebrad/src/components/tracing/component.rs +++ b/zebrad/src/components/tracing/component.rs @@ -77,7 +77,9 @@ impl Tracing { /// /// If `uses_intro` is true, show a welcome message, the `network`, /// and the Zebra logo on startup. (If the terminal supports it.) - #[allow(clippy::print_stdout, clippy::print_stderr, clippy::unwrap_in_result)] + // + // This method should only print to stderr, because stdout is for tracing logs. + #[allow(clippy::print_stderr, clippy::unwrap_in_result)] pub fn new(network: Network, config: Config, uses_intro: bool) -> Result { // Only use color if tracing output is being sent to a terminal or if it was explicitly // forced to. @@ -113,10 +115,6 @@ impl Tracing { } let writer = if let Some(log_file) = config.log_file.as_ref() { - if uses_intro { - println!("running zebra"); - } - // Make sure the directory for the log file exists. // If the log is configured in the current directory, it won't have a parent directory. // @@ -131,17 +129,17 @@ impl Tracing { let log_file_dir = log_file.parent(); if let Some(log_file_dir) = log_file_dir { if !log_file_dir.exists() { - println!("directory for log file {log_file:?} does not exist, trying to create it..."); + eprintln!("Directory for log file {log_file:?} does not exist, trying to create it..."); if let Err(create_dir_error) = fs::create_dir_all(log_file_dir) { - println!("failed to create directory for log file: {create_dir_error}"); - println!("trying log file anyway..."); + eprintln!("Failed to create directory for log file: {create_dir_error}"); + eprintln!("Trying log file anyway..."); } } } if uses_intro { - println!("sending logs to {log_file:?}..."); + eprintln!("Sending logs to {log_file:?}..."); } let log_file = File::options().append(true).create(true).open(log_file)?; Box::new(log_file) as BoxWrite From 147b8fa3a8969b4208598d7038b2866f060ec2f7 Mon Sep 17 00:00:00 2001 From: teor Date: Wed, 5 Jul 2023 17:11:27 +1000 Subject: [PATCH 184/265] cleanup(rust): Fix new nightly clippy warnings (#7135) * Fix "comparison is always true" warning * Add missing Send bound * cargo clippy --fix --all-features --all-targets * incorrect implementation of clone on a Copy type * cargo fmt --all --- zebra-chain/src/block/height.rs | 6 ++++- zebra-chain/src/block/merkle.rs | 2 +- zebra-chain/src/orchard/note/ciphertexts.rs | 10 +++----- zebra-chain/src/primitives/proofs/bctv14.rs | 4 +-- zebra-chain/src/primitives/proofs/groth16.rs | 4 +-- zebra-chain/src/sapling/note/ciphertexts.rs | 10 +++----- zebra-chain/src/sprout/note/ciphertexts.rs | 6 ++--- zebra-chain/src/transaction/serialize.rs | 8 +++--- zebra-chain/src/value_balance/tests/prop.rs | 2 +- zebra-chain/src/work/equihash.rs | 4 +-- zebra-consensus/src/checkpoint/list/tests.rs | 6 ++--- zebra-consensus/src/checkpoint/tests.rs | 2 +- zebra-consensus/src/transaction/tests.rs | 4 ++- zebra-network/src/config.rs | 5 +--- zebra-network/src/peer/connection.rs | 4 +-- zebra-network/src/peer/handshake.rs | 2 +- .../protocol/external/codec/tests/vectors.rs | 16 ++++++------ .../src/methods/get_block_template_rpcs.rs | 18 ++++++------- .../get_block_template.rs | 7 +++--- .../tests/snapshot/get_block_template_rpcs.rs | 18 +++++++------ zebra-rpc/src/methods/tests/vectors.rs | 25 ++++++++++++------- zebra-state/src/service/read/address/tx_id.rs | 5 +--- zebra-state/src/service/read/address/utxo.rs | 2 +- zebra-test/src/mock_service.rs | 1 + zebra-utils/src/bin/search-issue-refs/main.rs | 6 +---- zebrad/tests/common/checkpoints.rs | 2 +- 26 files changed, 89 insertions(+), 90 deletions(-) diff --git a/zebra-chain/src/block/height.rs b/zebra-chain/src/block/height.rs index 70bc17e818c..1b5b3408177 100644 --- a/zebra-chain/src/block/height.rs +++ b/zebra-chain/src/block/height.rs @@ -82,7 +82,11 @@ impl TryFrom for Height { /// Checks that the `height` is within the valid [`Height`] range. fn try_from(height: u32) -> Result { // Check the bounds. - if Height::MIN.0 <= height && height <= Height::MAX.0 { + // + // Clippy warns that `height >= Height::MIN.0` is always true. + assert_eq!(Height::MIN.0, 0); + + if height <= Height::MAX.0 { Ok(Height(height)) } else { Err("heights must be less than or equal to Height::MAX") diff --git a/zebra-chain/src/block/merkle.rs b/zebra-chain/src/block/merkle.rs index 9f1ef0d45f6..42762bbe6ca 100644 --- a/zebra-chain/src/block/merkle.rs +++ b/zebra-chain/src/block/merkle.rs @@ -486,7 +486,7 @@ mod tests { // Compute the AuthDataRoot with a single [0xFF; 32] digest. // Since ZIP-244 specifies that this value must be used as the auth digest of // pre-V5 transactions, then the roots must match. - let expect_auth_root = vec![AuthDigest([0xFF; 32])] + let expect_auth_root = [AuthDigest([0xFF; 32])] .iter() .copied() .collect::(); diff --git a/zebra-chain/src/orchard/note/ciphertexts.rs b/zebra-chain/src/orchard/note/ciphertexts.rs index 72cfeb98a13..8f857cf1444 100644 --- a/zebra-chain/src/orchard/note/ciphertexts.rs +++ b/zebra-chain/src/orchard/note/ciphertexts.rs @@ -1,3 +1,5 @@ +//! Encrypted parts of Orchard notes. + use std::{fmt, io}; use serde_big_array::BigArray; @@ -17,9 +19,7 @@ impl Copy for EncryptedNote {} impl Clone for EncryptedNote { fn clone(&self) -> Self { - let mut bytes = [0; 580]; - bytes[..].copy_from_slice(&self.0[..]); - Self(bytes) + *self } } @@ -86,9 +86,7 @@ impl Copy for WrappedNoteKey {} impl Clone for WrappedNoteKey { fn clone(&self) -> Self { - let mut bytes = [0; 80]; - bytes[..].copy_from_slice(&self.0[..]); - Self(bytes) + *self } } diff --git a/zebra-chain/src/primitives/proofs/bctv14.rs b/zebra-chain/src/primitives/proofs/bctv14.rs index abef385349c..ac1f6d5f0c9 100644 --- a/zebra-chain/src/primitives/proofs/bctv14.rs +++ b/zebra-chain/src/primitives/proofs/bctv14.rs @@ -25,9 +25,7 @@ impl Copy for Bctv14Proof {} impl Clone for Bctv14Proof { fn clone(&self) -> Self { - let mut bytes = [0; 296]; - bytes[..].copy_from_slice(&self.0[..]); - Self(bytes) + *self } } diff --git a/zebra-chain/src/primitives/proofs/groth16.rs b/zebra-chain/src/primitives/proofs/groth16.rs index 8153b2fb3a8..43f661a38fe 100644 --- a/zebra-chain/src/primitives/proofs/groth16.rs +++ b/zebra-chain/src/primitives/proofs/groth16.rs @@ -25,9 +25,7 @@ impl Copy for Groth16Proof {} impl Clone for Groth16Proof { fn clone(&self) -> Self { - let mut bytes = [0; 192]; - bytes[..].copy_from_slice(&self.0[..]); - Self(bytes) + *self } } diff --git a/zebra-chain/src/sapling/note/ciphertexts.rs b/zebra-chain/src/sapling/note/ciphertexts.rs index 47fe5606861..472dbfb0a44 100644 --- a/zebra-chain/src/sapling/note/ciphertexts.rs +++ b/zebra-chain/src/sapling/note/ciphertexts.rs @@ -1,3 +1,5 @@ +//! Encrypted parts of Sapling notes. + use std::{fmt, io}; use serde_big_array::BigArray; @@ -24,9 +26,7 @@ impl Copy for EncryptedNote {} impl Clone for EncryptedNote { fn clone(&self) -> Self { - let mut bytes = [0; 580]; - bytes[..].copy_from_slice(&self.0[..]); - Self(bytes) + *self } } @@ -73,9 +73,7 @@ impl Copy for WrappedNoteKey {} impl Clone for WrappedNoteKey { fn clone(&self) -> Self { - let mut bytes = [0; 80]; - bytes[..].copy_from_slice(&self.0[..]); - Self(bytes) + *self } } diff --git a/zebra-chain/src/sprout/note/ciphertexts.rs b/zebra-chain/src/sprout/note/ciphertexts.rs index 37628c2c965..7fd3bb42b72 100644 --- a/zebra-chain/src/sprout/note/ciphertexts.rs +++ b/zebra-chain/src/sprout/note/ciphertexts.rs @@ -1,3 +1,5 @@ +//! Encrypted parts of Sprout notes. + use std::{fmt, io}; use serde::{Deserialize, Serialize}; @@ -25,9 +27,7 @@ impl Copy for EncryptedNote {} impl Clone for EncryptedNote { fn clone(&self) -> Self { - let mut bytes = [0; 601]; - bytes[..].copy_from_slice(&self.0[..]); - Self(bytes) + *self } } diff --git a/zebra-chain/src/transaction/serialize.rs b/zebra-chain/src/transaction/serialize.rs index f79244da6ea..e083921b77c 100644 --- a/zebra-chain/src/transaction/serialize.rs +++ b/zebra-chain/src/transaction/serialize.rs @@ -268,15 +268,15 @@ impl ZcashDeserialize for Option> { // Create shielded spends from deserialized parts let spends: Vec<_> = spend_prefixes .into_iter() - .zip(spend_proofs.into_iter()) - .zip(spend_sigs.into_iter()) + .zip(spend_proofs) + .zip(spend_sigs) .map(|((prefix, proof), sig)| Spend::::from_v5_parts(prefix, proof, sig)) .collect(); // Create shielded outputs from deserialized parts let outputs = output_prefixes .into_iter() - .zip(output_proofs.into_iter()) + .zip(output_proofs) .map(|(prefix, proof)| Output::from_v5_parts(prefix, proof)) .collect(); @@ -427,7 +427,7 @@ impl ZcashDeserialize for Option { // Create the AuthorizedAction from deserialized parts let authorized_actions: Vec = actions .into_iter() - .zip(sigs.into_iter()) + .zip(sigs) .map(|(action, spend_auth_sig)| { orchard::AuthorizedAction::from_parts(action, spend_auth_sig) }) diff --git a/zebra-chain/src/value_balance/tests/prop.rs b/zebra-chain/src/value_balance/tests/prop.rs index 9ee2fb1e634..248824157d0 100644 --- a/zebra-chain/src/value_balance/tests/prop.rs +++ b/zebra-chain/src/value_balance/tests/prop.rs @@ -79,7 +79,7 @@ proptest! { ) { let _init_guard = zebra_test::init(); - let collection = vec![value_balance1, value_balance2]; + let collection = [value_balance1, value_balance2]; let transparent = value_balance1.transparent + value_balance2.transparent; let sprout = value_balance1.sprout + value_balance2.sprout; diff --git a/zebra-chain/src/work/equihash.rs b/zebra-chain/src/work/equihash.rs index 731d9497afd..e8b73b1614a 100644 --- a/zebra-chain/src/work/equihash.rs +++ b/zebra-chain/src/work/equihash.rs @@ -87,9 +87,7 @@ impl Copy for Solution {} impl Clone for Solution { fn clone(&self) -> Self { - let mut bytes = [0; SOLUTION_SIZE]; - bytes[..].copy_from_slice(&self.0[..]); - Self(bytes) + *self } } diff --git a/zebra-consensus/src/checkpoint/list/tests.rs b/zebra-consensus/src/checkpoint/list/tests.rs index 9ad1febeb7a..da07c689464 100644 --- a/zebra-consensus/src/checkpoint/list/tests.rs +++ b/zebra-consensus/src/checkpoint/list/tests.rs @@ -103,7 +103,7 @@ fn checkpoint_list_no_genesis_fail() -> Result<(), BoxError> { fn checkpoint_list_null_hash_fail() -> Result<(), BoxError> { let _init_guard = zebra_test::init(); - let checkpoint_data = vec![(block::Height(0), block::Hash([0; 32]))]; + let checkpoint_data = [(block::Height(0), block::Hash([0; 32]))]; // Make a checkpoint list containing the non-genesis block let checkpoint_list: BTreeMap = @@ -119,7 +119,7 @@ fn checkpoint_list_null_hash_fail() -> Result<(), BoxError> { fn checkpoint_list_bad_height_fail() -> Result<(), BoxError> { let _init_guard = zebra_test::init(); - let checkpoint_data = vec![( + let checkpoint_data = [( block::Height(block::Height::MAX.0 + 1), block::Hash([1; 32]), )]; @@ -131,7 +131,7 @@ fn checkpoint_list_bad_height_fail() -> Result<(), BoxError> { "a checkpoint list with an invalid block height (block::Height::MAX + 1) should fail", ); - let checkpoint_data = vec![(block::Height(u32::MAX), block::Hash([1; 32]))]; + let checkpoint_data = [(block::Height(u32::MAX), block::Hash([1; 32]))]; // Make a checkpoint list containing the non-genesis block let checkpoint_list: BTreeMap = diff --git a/zebra-consensus/src/checkpoint/tests.rs b/zebra-consensus/src/checkpoint/tests.rs index 2dbefab1979..9fb29048c40 100644 --- a/zebra-consensus/src/checkpoint/tests.rs +++ b/zebra-consensus/src/checkpoint/tests.rs @@ -254,7 +254,7 @@ async fn continuous_blockchain( // - checkpoints start at genesis // - checkpoints end at the end of the range (there's no point in having extra blocks) let expected_max_height = block::Height((blockchain_len - 1).try_into().unwrap()); - let checkpoint_list = vec![ + let checkpoint_list = [ &blockchain[0], &blockchain[blockchain_len / 3], &blockchain[blockchain_len / 2], diff --git a/zebra-consensus/src/transaction/tests.rs b/zebra-consensus/src/transaction/tests.rs index aad207836a6..87ce0f7e3bf 100644 --- a/zebra-consensus/src/transaction/tests.rs +++ b/zebra-consensus/src/transaction/tests.rs @@ -782,7 +782,9 @@ async fn state_error_converted_correctly() { "expected matching state and transaction errors" ); - let TransactionError::ValidateContextError(propagated_validate_context_error) = transaction_error else { + let TransactionError::ValidateContextError(propagated_validate_context_error) = + transaction_error + else { panic!("should be a ValidateContextError variant"); }; diff --git a/zebra-network/src/config.rs b/zebra-network/src/config.rs index 49798137b58..15922f81486 100644 --- a/zebra-network/src/config.rs +++ b/zebra-network/src/config.rs @@ -228,10 +228,7 @@ impl Config { // Ignore disk errors because the cache is optional and the method already logs them. let disk_peers = self.load_peer_cache().await.unwrap_or_default(); - dns_peers - .into_iter() - .chain(disk_peers.into_iter()) - .collect() + dns_peers.into_iter().chain(disk_peers).collect() } /// Concurrently resolves `peers` into zero or more IP addresses, with a diff --git a/zebra-network/src/peer/connection.rs b/zebra-network/src/peer/connection.rs index 71838366c35..2266085812e 100644 --- a/zebra-network/src/peer/connection.rs +++ b/zebra-network/src/peer/connection.rs @@ -1531,8 +1531,8 @@ where /// to be disconnected. fn overload_drop_connection_probability(now: Instant, prev: Option) -> f32 { let Some(prev) = prev else { - return MIN_OVERLOAD_DROP_PROBABILITY; - }; + return MIN_OVERLOAD_DROP_PROBABILITY; + }; let protection_fraction_since_last_overload = (now - prev).as_secs_f32() / OVERLOAD_PROTECTION_INTERVAL.as_secs_f32(); diff --git a/zebra-network/src/peer/handshake.rs b/zebra-network/src/peer/handshake.rs index 01cfe98e859..692c9f56135 100644 --- a/zebra-network/src/peer/handshake.rs +++ b/zebra-network/src/peer/handshake.rs @@ -1160,7 +1160,7 @@ pub(crate) async fn register_inventory_status( let _ = inv_collector .send(InventoryChange::new_available(*advertised, transient_addr)); } - [advertised @ ..] => { + advertised => { let advertised = advertised .iter() .filter(|advertised| advertised.unmined_tx_id().is_some()); diff --git a/zebra-network/src/protocol/external/codec/tests/vectors.rs b/zebra-network/src/protocol/external/codec/tests/vectors.rs index 89c6b08f2a0..74f46ec7f52 100644 --- a/zebra-network/src/protocol/external/codec/tests/vectors.rs +++ b/zebra-network/src/protocol/external/codec/tests/vectors.rs @@ -467,12 +467,13 @@ fn version_user_agent_size_limits() { // Encode the rest of the message onto `bytes` (relay should be optional) { let Message::Version(VersionMessage { - user_agent, - start_height, - .. - }) = invalid_version_message else { - unreachable!("version_message is a version"); - }; + user_agent, + start_height, + .. + }) = invalid_version_message + else { + unreachable!("version_message is a version"); + }; user_agent .zcash_serialize(&mut writer) @@ -553,7 +554,8 @@ fn reject_command_and_reason_size_limits() { ccode, reason, data, - } = invalid_reject_message else { + } = invalid_reject_message + else { unreachable!("invalid_reject_message is a reject"); }; diff --git a/zebra-rpc/src/methods/get_block_template_rpcs.rs b/zebra-rpc/src/methods/get_block_template_rpcs.rs index 5f0ff5ce274..ca861e5440b 100644 --- a/zebra-rpc/src/methods/get_block_template_rpcs.rs +++ b/zebra-rpc/src/methods/get_block_template_rpcs.rs @@ -521,15 +521,15 @@ where // // Optional TODO: // - add a `MempoolChange` type with an `async changed()` method (like `ChainTip`) - let Some(mempool_txs) = - fetch_mempool_transactions(mempool.clone(), tip_hash) - .await? - // If the mempool and state responses are out of sync: - // - if we are not long polling, omit mempool transactions from the template, - // - if we are long polling, continue to the next iteration of the loop to make fresh state and mempool requests. - .or_else(|| client_long_poll_id.is_none().then(Vec::new)) else { - continue; - }; + let Some(mempool_txs) = fetch_mempool_transactions(mempool.clone(), tip_hash) + .await? + // If the mempool and state responses are out of sync: + // - if we are not long polling, omit mempool transactions from the template, + // - if we are long polling, continue to the next iteration of the loop to make fresh state and mempool requests. + .or_else(|| client_long_poll_id.is_none().then(Vec::new)) + else { + continue; + }; // - Long poll ID calculation let server_long_poll_id = LongPollInput::new( diff --git a/zebra-rpc/src/methods/get_block_template_rpcs/get_block_template.rs b/zebra-rpc/src/methods/get_block_template_rpcs/get_block_template.rs index 04a3fa9c8e1..0e496ad37fa 100644 --- a/zebra-rpc/src/methods/get_block_template_rpcs/get_block_template.rs +++ b/zebra-rpc/src/methods/get_block_template_rpcs/get_block_template.rs @@ -39,8 +39,8 @@ pub use crate::methods::get_block_template_rpcs::types::get_block_template::*; /// Returns an error if there's a mismatch between the mode and whether `data` is provided. pub fn check_parameters(parameters: &Option) -> Result<()> { let Some(parameters) = parameters else { - return Ok(()) - }; + return Ok(()); + }; match parameters { JsonParameters { @@ -267,7 +267,8 @@ where let mempool::Response::FullTransactions { transactions, last_seen_tip_hash, - } = response else { + } = response + else { unreachable!("unmatched response to a mempool::FullTransactions request") }; diff --git a/zebra-rpc/src/methods/tests/snapshot/get_block_template_rpcs.rs b/zebra-rpc/src/methods/tests/snapshot/get_block_template_rpcs.rs index f941b7e2771..ab57b7b1e10 100644 --- a/zebra-rpc/src/methods/tests/snapshot/get_block_template_rpcs.rs +++ b/zebra-rpc/src/methods/tests/snapshot/get_block_template_rpcs.rs @@ -286,10 +286,11 @@ pub async fn test_responses( mock_read_state_request_handler, ); - let get_block_template::Response::TemplateMode(get_block_template) = get_block_template - .expect("unexpected error in getblocktemplate RPC call") else { - panic!("this getblocktemplate call without parameters should return the `TemplateMode` variant of the response") - }; + let get_block_template::Response::TemplateMode(get_block_template) = + get_block_template.expect("unexpected error in getblocktemplate RPC call") + else { + panic!("this getblocktemplate call without parameters should return the `TemplateMode` variant of the response") + }; let coinbase_tx: Transaction = get_block_template .coinbase_txn @@ -330,10 +331,11 @@ pub async fn test_responses( mock_read_state_request_handler, ); - let get_block_template::Response::TemplateMode(get_block_template) = get_block_template - .expect("unexpected error in getblocktemplate RPC call") else { - panic!("this getblocktemplate call without parameters should return the `TemplateMode` variant of the response") - }; + let get_block_template::Response::TemplateMode(get_block_template) = + get_block_template.expect("unexpected error in getblocktemplate RPC call") + else { + panic!("this getblocktemplate call without parameters should return the `TemplateMode` variant of the response") + }; let coinbase_tx: Transaction = get_block_template .coinbase_txn diff --git a/zebra-rpc/src/methods/tests/vectors.rs b/zebra-rpc/src/methods/tests/vectors.rs index 255acafd79f..b5892a3583b 100644 --- a/zebra-rpc/src/methods/tests/vectors.rs +++ b/zebra-rpc/src/methods/tests/vectors.rs @@ -457,7 +457,12 @@ async fn rpc_getrawtransaction() { } let (response, _) = futures::join!(get_tx_verbose_1_req, make_mempool_req(tx_hash)); - let GetRawTransaction::Object { hex, height, confirmations } = response.expect("We should have a GetRawTransaction struct") else { + let GetRawTransaction::Object { + hex, + height, + confirmations, + } = response.expect("We should have a GetRawTransaction struct") + else { unreachable!("Should return a Raw enum") }; @@ -1291,10 +1296,11 @@ async fn rpc_getblocktemplate_mining_address(use_p2pkh: bool) { make_mock_read_state_request_handler(), ); - let get_block_template::Response::TemplateMode(get_block_template) = get_block_template - .expect("unexpected error in getblocktemplate RPC call") else { - panic!("this getblocktemplate call without parameters should return the `TemplateMode` variant of the response") - }; + let get_block_template::Response::TemplateMode(get_block_template) = + get_block_template.expect("unexpected error in getblocktemplate RPC call") + else { + panic!("this getblocktemplate call without parameters should return the `TemplateMode` variant of the response") + }; assert_eq!( get_block_template.capabilities, @@ -1456,10 +1462,11 @@ async fn rpc_getblocktemplate_mining_address(use_p2pkh: bool) { make_mock_read_state_request_handler(), ); - let get_block_template::Response::TemplateMode(get_block_template) = get_block_template - .expect("unexpected error in getblocktemplate RPC call") else { - panic!("this getblocktemplate call without parameters should return the `TemplateMode` variant of the response") - }; + let get_block_template::Response::TemplateMode(get_block_template) = + get_block_template.expect("unexpected error in getblocktemplate RPC call") + else { + panic!("this getblocktemplate call without parameters should return the `TemplateMode` variant of the response") + }; // mempool transactions should be omitted if the tip hash in the GetChainInfo response from the state // does not match the `last_seen_tip_hash` in the FullTransactions response from the mempool. diff --git a/zebra-state/src/service/read/address/tx_id.rs b/zebra-state/src/service/read/address/tx_id.rs index 560f7b101c4..27b9a9b39dc 100644 --- a/zebra-state/src/service/read/address/tx_id.rs +++ b/zebra-state/src/service/read/address/tx_id.rs @@ -276,8 +276,5 @@ fn apply_tx_id_changes( ) -> BTreeMap { // Correctness: compensate for inconsistent tx IDs finalized blocks across multiple addresses, // by combining them with overlapping non-finalized block tx IDs. - finalized_tx_ids - .into_iter() - .chain(chain_tx_ids.into_iter()) - .collect() + finalized_tx_ids.into_iter().chain(chain_tx_ids).collect() } diff --git a/zebra-state/src/service/read/address/utxo.rs b/zebra-state/src/service/read/address/utxo.rs index 7ee5cb4f110..30bcad2c555 100644 --- a/zebra-state/src/service/read/address/utxo.rs +++ b/zebra-state/src/service/read/address/utxo.rs @@ -370,7 +370,7 @@ fn apply_utxo_changes( // to compensate for overlapping finalized and non-finalized blocks. finalized_utxos .into_iter() - .chain(created_chain_utxos.into_iter()) + .chain(created_chain_utxos) .filter(|(utxo_location, _output)| !spent_chain_utxos.contains(utxo_location)) .collect() } diff --git a/zebra-test/src/mock_service.rs b/zebra-test/src/mock_service.rs index d92e6f8b4ba..25f379034e6 100644 --- a/zebra-test/src/mock_service.rs +++ b/zebra-test/src/mock_service.rs @@ -146,6 +146,7 @@ pub struct ResponseSender { impl Service for MockService where + Request: Send + 'static, Response: Send + 'static, Error: Send + 'static, { diff --git a/zebra-utils/src/bin/search-issue-refs/main.rs b/zebra-utils/src/bin/search-issue-refs/main.rs index 2af6a70769a..cc71ee198a0 100644 --- a/zebra-utils/src/bin/search-issue-refs/main.rs +++ b/zebra-utils/src/bin/search-issue-refs/main.rs @@ -288,11 +288,7 @@ to create a github token." let mut num_closed_issues = 0; while let Some(res) = github_api_requests.join_next().await { - let Ok(( - res, - id, - issue_refs, - )) = res else { + let Ok((res, id, issue_refs)) = res else { println!("warning: failed to join api request thread/task"); continue; }; diff --git a/zebrad/tests/common/checkpoints.rs b/zebrad/tests/common/checkpoints.rs index b083d2126ca..cc5e6be40f9 100644 --- a/zebrad/tests/common/checkpoints.rs +++ b/zebrad/tests/common/checkpoints.rs @@ -303,7 +303,7 @@ impl ZebraCheckpointsTestDirExt for TempDir { let zebra_checkpoints = self.spawn_child_with_command(zebra_checkpoints_path, args.clone()); - let Err(system_path_error) = zebra_checkpoints else { + let Err(system_path_error) = zebra_checkpoints else { return zebra_checkpoints; }; From 7e7ce2ba712e07971d96ef4c44b8af9092041ae0 Mon Sep 17 00:00:00 2001 From: Arya Date: Wed, 5 Jul 2023 05:28:05 -0400 Subject: [PATCH 185/265] fix(commands): Require an argument name before the list of tracing filters when used without a subcommand (#7056) * require flag for tracing filters when subcmd is omitted. * Fixes test, updates Changelog * Adds code comment * Apply suggestions from code review Co-authored-by: teor --------- Co-authored-by: teor --- CHANGELOG.md | 7 +++++++ zebrad/src/commands/entry_point.rs | 31 ++++++++++++------------------ zebrad/src/commands/tests.rs | 3 ++- 3 files changed, 21 insertions(+), 20 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4240c6514ec..708809ac43f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,13 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org). +## [Zebra 1.0.1](https://github.com/ZcashFoundation/zebra/releases/tag/v1.0.1) - 2023-XX-XX + +### Breaking Changes + +- Zebra now detects subcommand name typos on the command-line. If you want to give Zebra a list of tracing filters, use `zebrad start --filters debug,...` ([#7056](https://github.com/ZcashFoundation/zebra/pull/7056)) + + ## [Zebra 1.0.1](https://github.com/ZcashFoundation/zebra/releases/tag/v1.0.1) - 2023-07-03 Zebra's first patch release fixes multiple peer connection security issues and panics. It also significantly reduces Zebra's CPU usage. We recommend that all users upgrade to Zebra 1.0.1 or later. diff --git a/zebrad/src/commands/entry_point.rs b/zebrad/src/commands/entry_point.rs index 5888c0e5ef4..3955dadb35a 100644 --- a/zebrad/src/commands/entry_point.rs +++ b/zebrad/src/commands/entry_point.rs @@ -1,7 +1,5 @@ //! Zebrad EntryPoint -use std::cmp::min; - use abscissa_core::{Command, Configurable, FrameworkError, Runnable}; use clap::Parser; use std::{ffi::OsString, path::PathBuf}; @@ -42,7 +40,7 @@ pub struct EntryPoint { /// Filter strings which override the config file and defaults // This can be applied to the default start command if no subcommand is provided. - #[clap(help = "tracing filters which override the zebrad.toml config")] + #[clap(long, help = "tracing filters which override the zebrad.toml config")] filters: Vec, } @@ -63,28 +61,23 @@ impl EntryPoint { "start" } + /// Checks if the provided arguments include a subcommand + fn should_add_default_subcommand(&self) -> bool { + self.cmd.is_none() + } + /// Process command arguments and insert the default subcommand /// if no subcommand is provided. pub fn process_cli_args(mut args: Vec) -> clap::error::Result> { - // Check if the provided arguments include a subcommand - let should_add_default_subcommand = EntryPoint::try_parse_from(&args)?.cmd.is_none(); + let entry_point = EntryPoint::try_parse_from(&args)?; // Add the default subcommand to args after the top-level args if cmd is None - if should_add_default_subcommand { - // try_parse_from currently produces an error if the first argument is not the binary name, - let mut num_top_level_args = 1; - - // update last_top_level_arg_idx to the number of top-level args - for (idx, arg) in args.iter().enumerate() { - num_top_level_args = match arg.to_str() { - Some("--verbose" | "-v" | "--version" | "-V" | "--help") => idx + 1, - Some("--config" | "-c") => idx + 2, - _ => num_top_level_args, - } + if entry_point.should_add_default_subcommand() { + args.push(EntryPoint::default_cmd_as_str().into()); + // This duplicates the top-level filters args, but the tracing component only checks `StartCmd.filters`. + for filter in entry_point.filters { + args.push(filter.into()) } - - num_top_level_args = min(num_top_level_args, args.len()); - args.insert(num_top_level_args, EntryPoint::default_cmd_as_str().into()); } Ok(args) diff --git a/zebrad/src/commands/tests.rs b/zebrad/src/commands/tests.rs index f87c48b119a..edd019a9022 100644 --- a/zebrad/src/commands/tests.rs +++ b/zebrad/src/commands/tests.rs @@ -16,7 +16,8 @@ fn args_with_subcommand_pass_through() { (true, false, false, vec!["zebrad", "--help"]), (false, true, false, vec!["zebrad", "start"]), (false, true, true, vec!["zebrad", "-v", "start"]), - (false, true, false, vec!["zebrad", "warn"]), + (false, true, false, vec!["zebrad", "--filters", "warn"]), + (true, false, false, vec!["zebrad", "warn"]), (false, true, false, vec!["zebrad", "start", "warn"]), (true, false, false, vec!["zebrad", "help", "warn"]), ]; From c8d411017dc0be9d56da833588577a08ec709c33 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 5 Jul 2023 20:51:56 +0000 Subject: [PATCH 186/265] build(deps): bump console-subscriber from 0.1.9 to 0.1.10 (#7140) Bumps [console-subscriber](https://github.com/tokio-rs/console) from 0.1.9 to 0.1.10. - [Release notes](https://github.com/tokio-rs/console/releases) - [Commits](https://github.com/tokio-rs/console/compare/console-subscriber-v0.1.9...console-subscriber-v0.1.10) --- updated-dependencies: - dependency-name: console-subscriber dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- zebrad/Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 036d59071be..8e7f37c16ca 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -884,9 +884,9 @@ dependencies = [ [[package]] name = "console-subscriber" -version = "0.1.9" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57ab2224a0311582eb03adba4caaf18644f7b1f10a760803a803b9b605187fc7" +checksum = "d4cf42660ac07fcebed809cfe561dd8730bcd35b075215e6479c516bcd0d11cb" dependencies = [ "console-api", "crossbeam-channel", diff --git a/zebrad/Cargo.toml b/zebrad/Cargo.toml index 10c90d6421a..d29bf034991 100644 --- a/zebrad/Cargo.toml +++ b/zebrad/Cargo.toml @@ -220,7 +220,7 @@ proptest = { version = "1.2.0", optional = true } proptest-derive = { version = "0.3.0", optional = true } # test feature tokio-console -console-subscriber = { version = "0.1.8", optional = true } +console-subscriber = { version = "0.1.10", optional = true } [build-dependencies] vergen = { version = "8.2.3", default-features = false, features = ["cargo", "git", "git2", "rustc"] } From 9cff85d1a426b04507739466ef5fcf8647495e41 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 5 Jul 2023 20:52:34 +0000 Subject: [PATCH 187/265] build(deps): bump tj-actions/changed-files from 37.0.4 to 37.0.5 (#7117) Bumps [tj-actions/changed-files](https://github.com/tj-actions/changed-files) from 37.0.4 to 37.0.5. - [Release notes](https://github.com/tj-actions/changed-files/releases) - [Changelog](https://github.com/tj-actions/changed-files/blob/main/HISTORY.md) - [Commits](https://github.com/tj-actions/changed-files/compare/v37.0.4...v37.0.5) --- updated-dependencies: - dependency-name: tj-actions/changed-files dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/lint.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 67386acc0f6..02c5e08b958 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -37,7 +37,7 @@ jobs: - name: Rust files id: changed-files-rust - uses: tj-actions/changed-files@v37.0.4 + uses: tj-actions/changed-files@v37.0.5 with: files: | **/*.rs @@ -49,7 +49,7 @@ jobs: - name: Workflow files id: changed-files-workflows - uses: tj-actions/changed-files@v37.0.4 + uses: tj-actions/changed-files@v37.0.5 with: files: | .github/workflows/*.yml From 730b10a27873b9be54a27ba496fd6e737855928b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 5 Jul 2023 20:53:05 +0000 Subject: [PATCH 188/265] build(deps): bump thiserror from 1.0.40 to 1.0.41 (#7153) Bumps [thiserror](https://github.com/dtolnay/thiserror) from 1.0.40 to 1.0.41. - [Release notes](https://github.com/dtolnay/thiserror/releases) - [Commits](https://github.com/dtolnay/thiserror/compare/1.0.40...1.0.41) --- updated-dependencies: - dependency-name: thiserror dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 96 +++++++++++++++++++------------------- zebra-chain/Cargo.toml | 2 +- zebra-consensus/Cargo.toml | 2 +- zebra-network/Cargo.toml | 2 +- zebra-rpc/Cargo.toml | 2 +- zebra-script/Cargo.toml | 2 +- zebra-state/Cargo.toml | 2 +- zebra-test/Cargo.toml | 2 +- zebra-utils/Cargo.toml | 2 +- zebrad/Cargo.toml | 2 +- 10 files changed, 57 insertions(+), 57 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8e7f37c16ca..e4c1612c761 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -36,7 +36,7 @@ checksum = "55bfb86e57d13c06e482c570826ddcddcc8f07fab916760e8911141d4fda8b62" dependencies = [ "ident_case", "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 1.0.109", "synstructure", ] @@ -258,7 +258,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 2.0.23", ] @@ -269,7 +269,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b9ccdd8f2a161be9bd5c023df56f1b2a0bd1d83872ae53b71a84a12c9bf6e842" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 2.0.23", ] @@ -425,7 +425,7 @@ dependencies = [ "peeking_take_while", "prettyplease 0.2.6", "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "regex", "rustc-hash", "shlex", @@ -803,7 +803,7 @@ checksum = "b8cd2b2a819ad6eec39e8f1d6b53001af1e5469f8c177579cdaeb313115b825f" dependencies = [ "heck 0.4.1", "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 2.0.23", ] @@ -1077,7 +1077,7 @@ checksum = "b677bcf759c79656defee3b0374aeff759122d3fc80edb0b77eeb0fd06e8fd20" dependencies = [ "codespan-reporting", "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 2.0.23", ] @@ -1094,7 +1094,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2345488264226bf682893e25de0769f3360aac9957980ec49361b083ddaa5bc5" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 2.0.23", ] @@ -1127,7 +1127,7 @@ dependencies = [ "fnv", "ident_case", "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "strsim 0.10.0", "syn 1.0.109", ] @@ -1141,7 +1141,7 @@ dependencies = [ "fnv", "ident_case", "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "strsim 0.10.0", "syn 2.0.23", ] @@ -1153,7 +1153,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c972679f83bdf9c42bd905396b6c3588a843a17f0f16dfcfa3e2c5d57441835" dependencies = [ "darling_core 0.13.4", - "quote 1.0.28", + "quote 1.0.29", "syn 1.0.109", ] @@ -1164,7 +1164,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "29a358ff9f12ec09c3e61fef9b5a9902623a695a46a917b07f269bff1445611a" dependencies = [ "darling_core 0.20.1", - "quote 1.0.28", + "quote 1.0.29", "syn 2.0.23", ] @@ -1235,7 +1235,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 2.0.23", ] @@ -1556,7 +1556,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 2.0.23", ] @@ -2026,7 +2026,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "11d7a9f6330b71fea57921c9b61c47ee6e84f72d394754eff6163ae67e7395eb" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 1.0.109", ] @@ -2226,7 +2226,7 @@ checksum = "5b939a78fa820cdfcb7ee7484466746a7377760970f6f9c6fe19f9edcc8a38d2" dependencies = [ "proc-macro-crate 0.1.5", "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 1.0.109", ] @@ -2517,7 +2517,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "731f8ecebd9f3a4aa847dfe75455e4757a45da40a7793d2f0b1f9b6ed18b23f3" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 1.0.109", ] @@ -2528,7 +2528,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ddece26afd34c31585c74a4db0630c376df271c285d682d1e55012197830b6df" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 2.0.23", ] @@ -2792,7 +2792,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 2.0.23", ] @@ -2922,7 +2922,7 @@ checksum = "86b26a931f824dd4eca30b3e43bb4f31cd5f0d3a403c5f5ff27106b805bfde7b" dependencies = [ "proc-macro-crate 1.3.1", "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 1.0.109", ] @@ -3051,7 +3051,7 @@ dependencies = [ "pest", "pest_meta", "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 2.0.23", ] @@ -3092,7 +3092,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec2e072ecce94ec471b13398d5402c188e76ac03cf74dd1a975161b23a3f6d9c" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 2.0.23", ] @@ -3238,7 +3238,7 @@ checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" dependencies = [ "proc-macro-error-attr", "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 1.0.109", "version_check", ] @@ -3250,7 +3250,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "version_check", ] @@ -3344,7 +3344,7 @@ dependencies = [ "anyhow", "itertools 0.10.5", "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 1.0.109", ] @@ -3407,7 +3407,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "608c156fd8e97febc07dc9c2e2c80bf74cfc6ef26893eae3daf8bc2bc94a4b7f" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 1.0.109", ] @@ -3422,9 +3422,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.28" +version = "1.0.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b9ab9c7eadfd8df19006f1cf1a4aed13540ed5cbc047010ece5826e10825488" +checksum = "573015e8ab27661678357f27dc26460738fd2b6c86e46f386fde94cb5d913105" dependencies = [ "proc-macro2 1.0.63", ] @@ -4083,7 +4083,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5dd83d6dde2b6b2d466e14d9d1acce8816dedee94f735eac6395808b3483c6d6" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 2.0.23", ] @@ -4154,7 +4154,7 @@ checksum = "e182d6ec6f05393cc0e5ed1bf81ad6db3a8feedf8ee515ecdd369809bcce8082" dependencies = [ "darling 0.13.4", "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 1.0.109", ] @@ -4166,7 +4166,7 @@ checksum = "edc7d5d3932fb12ce722ee5e64dd38c504efba37567f0c402f6ca728c3b8b070" dependencies = [ "darling 0.20.1", "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 2.0.23", ] @@ -4279,7 +4279,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5bdfb59103e43a0f99a346b57860d50f2138a7008d08acd964e9ac0fef3ae9a5" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 1.0.109", ] @@ -4342,7 +4342,7 @@ dependencies = [ "heck 0.3.3", "proc-macro-error", "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 1.0.109", ] @@ -4370,7 +4370,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "unicode-ident", ] @@ -4381,7 +4381,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "59fb7d6d8281a51045d62b8eb3a7d1ce347b76f312af50cd3dc0af39c87c1737" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "unicode-ident", ] @@ -4398,7 +4398,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f36bdaa60a83aca3921b5259d5400cbf5e90fc51931376a9bd4a0eb79aa7210f" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 1.0.109", "unicode-xid 0.2.4", ] @@ -4443,21 +4443,21 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.40" +version = "1.0.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "978c9a314bd8dc99be594bc3c175faaa9794be04a5a5e153caba6915336cebac" +checksum = "c16a64ba9387ef3fdae4f9c1a7f07a0997fce91985c0336f1ddc1822b3b37802" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.40" +version = "1.0.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9456a42c5b0d803c8cd86e73dd7cc9edd429499f37a3550d286d5e86720569f" +checksum = "d14928354b01c4d6a4f0e549069adef399a284e7995c7ccca94e8a07a5346c59" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 2.0.23", ] @@ -4563,7 +4563,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 2.0.23", ] @@ -4720,7 +4720,7 @@ dependencies = [ "prettyplease 0.1.25", "proc-macro2 1.0.63", "prost-build", - "quote 1.0.28", + "quote 1.0.29", "syn 1.0.109", ] @@ -4838,7 +4838,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0f57e3ca2a01450b1a921183a9c9cbfda207fd822cef4ccb00a65402cbba7a74" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 2.0.23", ] @@ -4942,7 +4942,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "258bc1c4f8e2e73a977812ab339d503e6feeb92700f6d07a6de4d321522d5c08" dependencies = [ "lazy_static", - "quote 1.0.28", + "quote 1.0.29", "syn 1.0.109", ] @@ -5208,7 +5208,7 @@ dependencies = [ "log", "once_cell", "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 2.0.23", "wasm-bindgen-shared", ] @@ -5231,7 +5231,7 @@ version = "0.2.86" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "14d6b024f1a526bb0234f52840389927257beb670610081360e5a03c5df9c258" dependencies = [ - "quote 1.0.28", + "quote 1.0.29", "wasm-bindgen-macro-support", ] @@ -5242,7 +5242,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e128beba882dd1eb6200e1dc92ae6c5dbaa4311aa7bb211ca035779e5efc39f8" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 2.0.23", "wasm-bindgen-backend", "wasm-bindgen-shared", @@ -6048,6 +6048,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 2.0.23", ] diff --git a/zebra-chain/Cargo.toml b/zebra-chain/Cargo.toml index 40a86f0d385..4dda603ad95 100644 --- a/zebra-chain/Cargo.toml +++ b/zebra-chain/Cargo.toml @@ -85,7 +85,7 @@ humantime = "2.1.0" # Error Handling & Formatting displaydoc = "0.2.4" static_assertions = "1.1.0" -thiserror = "1.0.40" +thiserror = "1.0.41" tracing = "0.1.37" # Serialization diff --git a/zebra-consensus/Cargo.toml b/zebra-consensus/Cargo.toml index 4bce5294efb..365b99d6038 100644 --- a/zebra-consensus/Cargo.toml +++ b/zebra-consensus/Cargo.toml @@ -52,7 +52,7 @@ serde = { version = "1.0.166", features = ["serde_derive"] } futures = "0.3.28" futures-util = "0.3.28" metrics = "0.21.1" -thiserror = "1.0.40" +thiserror = "1.0.41" tokio = { version = "1.29.1", features = ["time", "sync", "tracing", "rt-multi-thread"] } tower = { version = "0.4.13", features = ["timeout", "util", "buffer"] } tracing = "0.1.37" diff --git a/zebra-network/Cargo.toml b/zebra-network/Cargo.toml index 643c3d49bae..3a1079d9720 100644 --- a/zebra-network/Cargo.toml +++ b/zebra-network/Cargo.toml @@ -58,7 +58,7 @@ rayon = "1.7.0" regex = "1.8.4" serde = { version = "1.0.166", features = ["serde_derive"] } tempfile = "3.5.0" -thiserror = "1.0.40" +thiserror = "1.0.41" futures = "0.3.28" tokio = { version = "1.29.1", features = ["fs", "io-util", "net", "time", "tracing", "macros", "rt-multi-thread"] } diff --git a/zebra-rpc/Cargo.toml b/zebra-rpc/Cargo.toml index 0efb0b57253..a839884c655 100644 --- a/zebra-rpc/Cargo.toml +++ b/zebra-rpc/Cargo.toml @@ -82,7 +82,7 @@ insta = { version = "1.30.0", features = ["redactions", "json", "ron"] } proptest = "1.2.0" -thiserror = "1.0.40" +thiserror = "1.0.41" tokio = { version = "1.29.1", features = ["full", "tracing", "test-util"] } zebra-chain = { path = "../zebra-chain", features = ["proptest-impl"] } diff --git a/zebra-script/Cargo.toml b/zebra-script/Cargo.toml index d1399b0d7bf..6efe74fd543 100644 --- a/zebra-script/Cargo.toml +++ b/zebra-script/Cargo.toml @@ -19,7 +19,7 @@ zcash_script = "0.1.12" zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.27" } -thiserror = "1.0.40" +thiserror = "1.0.41" displaydoc = "0.2.4" [dev-dependencies] diff --git a/zebra-state/Cargo.toml b/zebra-state/Cargo.toml index 73d68bbaf8c..55e1b15376e 100644 --- a/zebra-state/Cargo.toml +++ b/zebra-state/Cargo.toml @@ -59,7 +59,7 @@ rocksdb = { version = "0.21.0", default-features = false, features = ["lz4"] } semver = "1.0.17" serde = { version = "1.0.166", features = ["serde_derive"] } tempfile = "3.5.0" -thiserror = "1.0.40" +thiserror = "1.0.41" rayon = "1.7.0" tokio = { version = "1.29.1", features = ["rt-multi-thread", "sync", "tracing"] } diff --git a/zebra-test/Cargo.toml b/zebra-test/Cargo.toml index 3376c566c3b..41706bf7d40 100644 --- a/zebra-test/Cargo.toml +++ b/zebra-test/Cargo.toml @@ -36,7 +36,7 @@ tinyvec = { version = "1.6.0", features = ["rustc_1_55"] } humantime = "2.1.0" owo-colors = "3.5.0" spandoc = "0.2.2" -thiserror = "1.0.40" +thiserror = "1.0.41" tracing-subscriber = { version = "0.3.17", features = ["env-filter"] } tracing-error = "0.2.0" diff --git a/zebra-utils/Cargo.toml b/zebra-utils/Cargo.toml index 7ecbd146cc3..19dadb8088e 100644 --- a/zebra-utils/Cargo.toml +++ b/zebra-utils/Cargo.toml @@ -72,7 +72,7 @@ hex = "0.4.3" serde_json = "1.0.99" tracing-error = "0.2.0" tracing-subscriber = "0.3.17" -thiserror = "1.0.40" +thiserror = "1.0.41" zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.27" } zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.27" } diff --git a/zebrad/Cargo.toml b/zebrad/Cargo.toml index d29bf034991..eb081a3a15a 100644 --- a/zebrad/Cargo.toml +++ b/zebrad/Cargo.toml @@ -173,7 +173,7 @@ color-eyre = { version = "0.6.2", default-features = false, features = ["issue-u # Enable a feature that makes tinyvec compile much faster. tinyvec = { version = "1.6.0", features = ["rustc_1_55"] } -thiserror = "1.0.40" +thiserror = "1.0.41" tracing-subscriber = { version = "0.3.17", features = ["env-filter"] } tracing-appender = "0.2.2" From ea41dcbbac0908594e71c2b3e2c2f5f11de5c631 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 5 Jul 2023 20:53:41 +0000 Subject: [PATCH 189/265] build(deps): bump clap from 4.3.10 to 4.3.11 (#7150) Bumps [clap](https://github.com/clap-rs/clap) from 4.3.10 to 4.3.11. - [Release notes](https://github.com/clap-rs/clap/releases) - [Changelog](https://github.com/clap-rs/clap/blob/master/CHANGELOG.md) - [Commits](https://github.com/clap-rs/clap/compare/v4.3.10...v4.3.11) --- updated-dependencies: - dependency-name: clap dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 14 +++++++------- zebrad/Cargo.toml | 2 +- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e4c1612c761..f2301475c50 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -12,7 +12,7 @@ dependencies = [ "arc-swap", "backtrace", "canonical-path", - "clap 4.3.10", + "clap 4.3.11", "color-eyre", "fs-err", "once_cell", @@ -773,9 +773,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.3.10" +version = "4.3.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "384e169cc618c613d5e3ca6404dda77a8685a63e08660dcc64abaf7da7cb0c7a" +checksum = "1640e5cc7fb47dbb8338fd471b105e7ed6c3cb2aeb00c2e067127ffd3764a05d" dependencies = [ "clap_builder", "clap_derive", @@ -784,9 +784,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.3.10" +version = "4.3.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef137bbe35aab78bdb468ccfba75a5f4d8321ae011d34063770780545176af2d" +checksum = "98c59138d527eeaf9b53f35a77fcc1fad9d883116070c63d5de1c7dc7b00c72b" dependencies = [ "anstream", "anstyle", @@ -955,7 +955,7 @@ dependencies = [ "anes", "cast", "ciborium", - "clap 4.3.10", + "clap 4.3.11", "criterion-plot", "is-terminal", "itertools 0.10.5", @@ -5974,7 +5974,7 @@ dependencies = [ "abscissa_core", "atty", "chrono", - "clap 4.3.10", + "clap 4.3.11", "color-eyre", "console-subscriber", "dirs", diff --git a/zebrad/Cargo.toml b/zebrad/Cargo.toml index eb081a3a15a..081b16827ab 100644 --- a/zebrad/Cargo.toml +++ b/zebrad/Cargo.toml @@ -153,7 +153,7 @@ zebra-state = { path = "../zebra-state", version = "1.0.0-beta.27" } zebra-utils = { path = "../zebra-utils", version = "1.0.0-beta.27", optional = true } abscissa_core = "0.7.0" -clap = { version = "4.3.10", features = ["cargo"] } +clap = { version = "4.3.11", features = ["cargo"] } chrono = { version = "0.4.26", default-features = false, features = ["clock", "std"] } humantime-serde = "1.1.1" indexmap = "2.0.0" From 00dd110265eca38a80bb7745f33946f650746cd2 Mon Sep 17 00:00:00 2001 From: Marek Date: Thu, 6 Jul 2023 01:01:22 +0200 Subject: [PATCH 190/265] Use standard syntax for Dockerfile ENV instruction (#7155) --- docker/Dockerfile | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/docker/Dockerfile b/docker/Dockerfile index 4f15b3bf51c..c2ec9b2af5f 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -64,20 +64,20 @@ RUN if [ "$(uname -m)" != "aarch64" ]; then \ # # We set defaults to all variables. ARG RUST_BACKTRACE -ENV RUST_BACKTRACE ${RUST_BACKTRACE:-0} +ENV RUST_BACKTRACE=${RUST_BACKTRACE:-0} ARG RUST_LIB_BACKTRACE -ENV RUST_LIB_BACKTRACE ${RUST_LIB_BACKTRACE:-0} +ENV RUST_LIB_BACKTRACE=${RUST_LIB_BACKTRACE:-0} ARG COLORBT_SHOW_HIDDEN -ENV COLORBT_SHOW_HIDDEN ${COLORBT_SHOW_HIDDEN:-0} +ENV COLORBT_SHOW_HIDDEN=${COLORBT_SHOW_HIDDEN:-0} ARG RUST_LOG -ENV RUST_LOG ${RUST_LOG:-info} +ENV RUST_LOG=${RUST_LOG:-info} # Skip IPv6 tests by default, as some CI environment don't have IPv6 available ARG ZEBRA_SKIP_IPV6_TESTS -ENV ZEBRA_SKIP_IPV6_TESTS ${ZEBRA_SKIP_IPV6_TESTS:-1} +ENV ZEBRA_SKIP_IPV6_TESTS=${ZEBRA_SKIP_IPV6_TESTS:-1} # Build zebrad with these features # Keep these in sync with: @@ -86,13 +86,13 @@ ARG FEATURES="default-release-binaries" ARG TEST_FEATURES="lightwalletd-grpc-tests zebra-checkpoints" # Use ENTRYPOINT_FEATURES to override the specific features used to run tests in entrypoint.sh, # separately from the test and production image builds. -ENV ENTRYPOINT_FEATURES "$TEST_FEATURES $FEATURES" +ENV ENTRYPOINT_FEATURES="$TEST_FEATURES $FEATURES" # Use default network value if none is provided ARG NETWORK -ENV NETWORK ${NETWORK:-Mainnet} +ENV NETWORK=${NETWORK:-Mainnet} -ENV CARGO_HOME /opt/zebrad/.cargo/ +ENV CARGO_HOME="/opt/zebrad/.cargo/" # In this stage we build tests (without running then) # @@ -178,7 +178,7 @@ RUN apt-get update && \ # Config settings ARG NETWORK -ENV NETWORK ${NETWORK:-Mainnet} +ENV NETWORK=${NETWORK:-Mainnet} # Expose configured ports EXPOSE 8233 18233 From 9df78ffdbaf54b458cba4cc3b8ffe4d20a1feea7 Mon Sep 17 00:00:00 2001 From: teor Date: Thu, 6 Jul 2023 11:04:28 +1000 Subject: [PATCH 191/265] change(tests): Do round-trip tests for note commitment tree data structure and RPC serialisation (#7147) * Add an assert_frontier_eq() method to note commitment trees for tests * Check round-trip serialization for note commitment trees * fix typos --------- Co-authored-by: Alfredo Garcia --- zebra-chain/src/orchard/tree.rs | 45 ++++++--- zebra-chain/src/sapling/tree.rs | 45 ++++++--- zebra-chain/src/sprout/tree.rs | 43 +++++--- .../service/finalized_state/tests/vectors.rs | 99 +++++++++++++++++-- 4 files changed, 187 insertions(+), 45 deletions(-) diff --git a/zebra-chain/src/orchard/tree.rs b/zebra-chain/src/orchard/tree.rs index 6af9680c5f2..0924b3d8445 100644 --- a/zebra-chain/src/orchard/tree.rs +++ b/zebra-chain/src/orchard/tree.rs @@ -14,7 +14,6 @@ use std::{ fmt, hash::{Hash, Hasher}, io, - ops::Deref, sync::Arc, }; @@ -330,14 +329,9 @@ impl NoteCommitmentTree { /// Returns the current root of the tree, used as an anchor in Orchard /// shielded transactions. pub fn root(&self) -> Root { - if let Some(root) = self - .cached_root - .read() - .expect("a thread that previously held exclusive lock access panicked") - .deref() - { + if let Some(root) = self.cached_root() { // Return cached root. - return *root; + return root; } // Get exclusive access, compute the root, and cache it. @@ -358,6 +352,15 @@ impl NoteCommitmentTree { } } + /// Returns the current root of the tree, if it has already been cached. + #[allow(clippy::unwrap_in_result)] + pub fn cached_root(&self) -> Option { + *self + .cached_root + .read() + .expect("a thread that previously held exclusive lock access panicked") + } + /// Get the Pallas-based Sinsemilla hash / root node of this merkle tree of /// note commitments. pub fn hash(&self) -> [u8; 32] { @@ -379,15 +382,33 @@ impl NoteCommitmentTree { pub fn count(&self) -> u64 { self.inner.position().map_or(0, |pos| u64::from(pos) + 1) } + + /// Checks if the tree roots and inner data structures of `self` and `other` are equal. + /// + /// # Panics + /// + /// If they aren't equal, with a message explaining the differences. + /// + /// Only for use in tests. + #[cfg(any(test, feature = "proptest-impl"))] + pub fn assert_frontier_eq(&self, other: &Self) { + // It's technically ok for the cached root not to be preserved, + // but it can result in expensive cryptographic operations, + // so we fail the tests if it happens. + assert_eq!(self.cached_root(), other.cached_root()); + + // Check the data in the internal data structure + assert_eq!(self.inner, other.inner); + + // Check the RPC serialization format (not the same as the Zebra database format) + assert_eq!(SerializedTree::from(self), SerializedTree::from(other)); + } } impl Clone for NoteCommitmentTree { /// Clones the inner tree, and creates a new `RwLock` with the cloned root data. fn clone(&self) -> Self { - let cached_root = *self - .cached_root - .read() - .expect("a thread that previously held exclusive lock access panicked"); + let cached_root = self.cached_root(); Self { inner: self.inner.clone(), diff --git a/zebra-chain/src/sapling/tree.rs b/zebra-chain/src/sapling/tree.rs index f5307a8e0a1..2b6feeb0d5e 100644 --- a/zebra-chain/src/sapling/tree.rs +++ b/zebra-chain/src/sapling/tree.rs @@ -14,7 +14,6 @@ use std::{ fmt, hash::{Hash, Hasher}, io, - ops::Deref, sync::Arc, }; @@ -333,14 +332,9 @@ impl NoteCommitmentTree { /// Returns the current root of the tree, used as an anchor in Sapling /// shielded transactions. pub fn root(&self) -> Root { - if let Some(root) = self - .cached_root - .read() - .expect("a thread that previously held exclusive lock access panicked") - .deref() - { + if let Some(root) = self.cached_root() { // Return cached root. - return *root; + return root; } // Get exclusive access, compute the root, and cache it. @@ -361,6 +355,15 @@ impl NoteCommitmentTree { } } + /// Returns the current root of the tree, if it has already been cached. + #[allow(clippy::unwrap_in_result)] + pub fn cached_root(&self) -> Option { + *self + .cached_root + .read() + .expect("a thread that previously held exclusive lock access panicked") + } + /// Gets the Jubjub-based Pedersen hash of root node of this merkle tree of /// note commitments. pub fn hash(&self) -> [u8; 32] { @@ -382,16 +385,34 @@ impl NoteCommitmentTree { pub fn count(&self) -> u64 { self.inner.position().map_or(0, |pos| u64::from(pos) + 1) } + + /// Checks if the tree roots and inner data structures of `self` and `other` are equal. + /// + /// # Panics + /// + /// If they aren't equal, with a message explaining the differences. + /// + /// Only for use in tests. + #[cfg(any(test, feature = "proptest-impl"))] + pub fn assert_frontier_eq(&self, other: &Self) { + // It's technically ok for the cached root not to be preserved, + // but it can result in expensive cryptographic operations, + // so we fail the tests if it happens. + assert_eq!(self.cached_root(), other.cached_root()); + + // Check the data in the internal data structure + assert_eq!(self.inner, other.inner); + + // Check the RPC serialization format (not the same as the Zebra database format) + assert_eq!(SerializedTree::from(self), SerializedTree::from(other)); + } } impl Clone for NoteCommitmentTree { /// Clones the inner tree, and creates a new [`RwLock`](std::sync::RwLock) /// with the cloned root data. fn clone(&self) -> Self { - let cached_root = *self - .cached_root - .read() - .expect("a thread that previously held exclusive lock access panicked"); + let cached_root = self.cached_root(); Self { inner: self.inner.clone(), diff --git a/zebra-chain/src/sprout/tree.rs b/zebra-chain/src/sprout/tree.rs index ab597fc9869..d28738be7d8 100644 --- a/zebra-chain/src/sprout/tree.rs +++ b/zebra-chain/src/sprout/tree.rs @@ -10,7 +10,7 @@ //! //! A root of a note commitment tree is associated with each treestate. -use std::{fmt, ops::Deref}; +use std::fmt; use byteorder::{BigEndian, ByteOrder}; use incrementalmerkletree::{bridgetree, Frontier}; @@ -266,14 +266,9 @@ impl NoteCommitmentTree { /// Returns the current root of the tree; used as an anchor in Sprout /// shielded transactions. pub fn root(&self) -> Root { - if let Some(root) = self - .cached_root - .read() - .expect("a thread that previously held exclusive lock access panicked") - .deref() - { + if let Some(root) = self.cached_root() { // Return cached root. - return *root; + return root; } // Get exclusive access, compute the root, and cache it. @@ -294,6 +289,15 @@ impl NoteCommitmentTree { } } + /// Returns the current root of the tree, if it has already been cached. + #[allow(clippy::unwrap_in_result)] + pub fn cached_root(&self) -> Option { + *self + .cached_root + .read() + .expect("a thread that previously held exclusive lock access panicked") + } + /// Returns a hash of the Sprout note commitment tree root. pub fn hash(&self) -> [u8; 32] { self.root().into() @@ -316,15 +320,30 @@ impl NoteCommitmentTree { pub fn count(&self) -> u64 { self.inner.position().map_or(0, |pos| u64::from(pos) + 1) } + + /// Checks if the tree roots and inner data structures of `self` and `other` are equal. + /// + /// # Panics + /// + /// If they aren't equal, with a message explaining the differences. + /// + /// Only for use in tests. + #[cfg(any(test, feature = "proptest-impl"))] + pub fn assert_frontier_eq(&self, other: &Self) { + // It's technically ok for the cached root not to be preserved, + // but it can result in expensive cryptographic operations, + // so we fail the tests if it happens. + assert_eq!(self.cached_root(), other.cached_root()); + + // Check the data in the internal data structure + assert_eq!(self.inner, other.inner); + } } impl Clone for NoteCommitmentTree { /// Clones the inner tree, and creates a new `RwLock` with the cloned root data. fn clone(&self) -> Self { - let cached_root = *self - .cached_root - .read() - .expect("a thread that previously held exclusive lock access panicked"); + let cached_root = self.cached_root(); Self { inner: self.inner.clone(), diff --git a/zebra-state/src/service/finalized_state/tests/vectors.rs b/zebra-state/src/service/finalized_state/tests/vectors.rs index 98975646263..078c0267596 100644 --- a/zebra-state/src/service/finalized_state/tests/vectors.rs +++ b/zebra-state/src/service/finalized_state/tests/vectors.rs @@ -48,9 +48,18 @@ fn sprout_note_commitment_tree_serialization() { let serialized_tree = incremental_tree.as_bytes(); assert_eq!(hex::encode(&serialized_tree), expected_serialized_tree_hex); - let deserialized_tree = sprout::tree::NoteCommitmentTree::from_bytes(serialized_tree); + let deserialized_tree = sprout::tree::NoteCommitmentTree::from_bytes(&serialized_tree); + // This check isn't enough to show that the entire struct is the same, because it just compares + // the cached serialized/deserialized roots. (NoteCommitmentTree::eq() also just compares + // roots.) assert_eq!(incremental_tree.root(), deserialized_tree.root()); + + incremental_tree.assert_frontier_eq(&deserialized_tree); + + // Double-check that the internal format is the same by re-serializing the tree. + let re_serialized_tree = deserialized_tree.as_bytes(); + assert_eq!(serialized_tree, re_serialized_tree); } /// Check that the sprout tree database serialization format has not changed for one commitment. @@ -85,9 +94,18 @@ fn sprout_note_commitment_tree_serialization_one() { let serialized_tree = incremental_tree.as_bytes(); assert_eq!(hex::encode(&serialized_tree), expected_serialized_tree_hex); - let deserialized_tree = sprout::tree::NoteCommitmentTree::from_bytes(serialized_tree); + let deserialized_tree = sprout::tree::NoteCommitmentTree::from_bytes(&serialized_tree); + // This check isn't enough to show that the entire struct is the same, because it just compares + // the cached serialized/deserialized roots. (NoteCommitmentTree::eq() also just compares + // roots.) assert_eq!(incremental_tree.root(), deserialized_tree.root()); + + incremental_tree.assert_frontier_eq(&deserialized_tree); + + // Double-check that the internal format is the same by re-serializing the tree. + let re_serialized_tree = deserialized_tree.as_bytes(); + assert_eq!(serialized_tree, re_serialized_tree); } /// Check that the sprout tree database serialization format has not changed when the number of @@ -131,9 +149,18 @@ fn sprout_note_commitment_tree_serialization_pow2() { let serialized_tree = incremental_tree.as_bytes(); assert_eq!(hex::encode(&serialized_tree), expected_serialized_tree_hex); - let deserialized_tree = sprout::tree::NoteCommitmentTree::from_bytes(serialized_tree); + let deserialized_tree = sprout::tree::NoteCommitmentTree::from_bytes(&serialized_tree); + // This check isn't enough to show that the entire struct is the same, because it just compares + // the cached serialized/deserialized roots. (NoteCommitmentTree::eq() also just compares + // roots.) assert_eq!(incremental_tree.root(), deserialized_tree.root()); + + incremental_tree.assert_frontier_eq(&deserialized_tree); + + // Double-check that the internal format is the same by re-serializing the tree. + let re_serialized_tree = deserialized_tree.as_bytes(); + assert_eq!(serialized_tree, re_serialized_tree); } /// Check that the sapling tree database serialization format has not changed. @@ -172,9 +199,18 @@ fn sapling_note_commitment_tree_serialization() { let serialized_tree = incremental_tree.as_bytes(); assert_eq!(hex::encode(&serialized_tree), expected_serialized_tree_hex); - let deserialized_tree = sapling::tree::NoteCommitmentTree::from_bytes(serialized_tree); + let deserialized_tree = sapling::tree::NoteCommitmentTree::from_bytes(&serialized_tree); + // This check isn't enough to show that the entire struct is the same, because it just compares + // the cached serialized/deserialized roots. (NoteCommitmentTree::eq() also just compares + // roots.) assert_eq!(incremental_tree.root(), deserialized_tree.root()); + + incremental_tree.assert_frontier_eq(&deserialized_tree); + + // Double-check that the internal format is the same by re-serializing the tree. + let re_serialized_tree = deserialized_tree.as_bytes(); + assert_eq!(serialized_tree, re_serialized_tree); } /// Check that the sapling tree database serialization format has not changed for one commitment. @@ -209,9 +245,18 @@ fn sapling_note_commitment_tree_serialization_one() { let serialized_tree = incremental_tree.as_bytes(); assert_eq!(hex::encode(&serialized_tree), expected_serialized_tree_hex); - let deserialized_tree = sapling::tree::NoteCommitmentTree::from_bytes(serialized_tree); + let deserialized_tree = sapling::tree::NoteCommitmentTree::from_bytes(&serialized_tree); + // This check isn't enough to show that the entire struct is the same, because it just compares + // the cached serialized/deserialized roots. (NoteCommitmentTree::eq() also just compares + // roots.) assert_eq!(incremental_tree.root(), deserialized_tree.root()); + + incremental_tree.assert_frontier_eq(&deserialized_tree); + + // Double-check that the internal format is the same by re-serializing the tree. + let re_serialized_tree = deserialized_tree.as_bytes(); + assert_eq!(serialized_tree, re_serialized_tree); } /// Check that the sapling tree database serialization format has not changed when the number of @@ -259,9 +304,18 @@ fn sapling_note_commitment_tree_serialization_pow2() { let serialized_tree = incremental_tree.as_bytes(); assert_eq!(hex::encode(&serialized_tree), expected_serialized_tree_hex); - let deserialized_tree = sapling::tree::NoteCommitmentTree::from_bytes(serialized_tree); + let deserialized_tree = sapling::tree::NoteCommitmentTree::from_bytes(&serialized_tree); + // This check isn't enough to show that the entire struct is the same, because it just compares + // the cached serialized/deserialized roots. (NoteCommitmentTree::eq() also just compares + // roots.) assert_eq!(incremental_tree.root(), deserialized_tree.root()); + + incremental_tree.assert_frontier_eq(&deserialized_tree); + + // Double-check that the internal format is the same by re-serializing the tree. + let re_serialized_tree = deserialized_tree.as_bytes(); + assert_eq!(serialized_tree, re_serialized_tree); } /// Check that the orchard tree database serialization format has not changed. @@ -310,9 +364,18 @@ fn orchard_note_commitment_tree_serialization() { let serialized_tree = incremental_tree.as_bytes(); assert_eq!(hex::encode(&serialized_tree), expected_serialized_tree_hex); - let deserialized_tree = orchard::tree::NoteCommitmentTree::from_bytes(serialized_tree); + let deserialized_tree = orchard::tree::NoteCommitmentTree::from_bytes(&serialized_tree); + // This check isn't enough to show that the entire struct is the same, because it just compares + // the cached serialized/deserialized roots. (NoteCommitmentTree::eq() also just compares + // roots.) assert_eq!(incremental_tree.root(), deserialized_tree.root()); + + incremental_tree.assert_frontier_eq(&deserialized_tree); + + // Double-check that the internal format is the same by re-serializing the tree. + let re_serialized_tree = deserialized_tree.as_bytes(); + assert_eq!(serialized_tree, re_serialized_tree); } /// Check that the orchard tree database serialization format has not changed for one commitment. @@ -349,9 +412,18 @@ fn orchard_note_commitment_tree_serialization_one() { let serialized_tree = incremental_tree.as_bytes(); assert_eq!(hex::encode(&serialized_tree), expected_serialized_tree_hex); - let deserialized_tree = orchard::tree::NoteCommitmentTree::from_bytes(serialized_tree); + let deserialized_tree = orchard::tree::NoteCommitmentTree::from_bytes(&serialized_tree); + // This check isn't enough to show that the entire struct is the same, because it just compares + // the cached serialized/deserialized roots. (NoteCommitmentTree::eq() also just compares + // roots.) assert_eq!(incremental_tree.root(), deserialized_tree.root()); + + incremental_tree.assert_frontier_eq(&deserialized_tree); + + // Double-check that the internal format is the same by re-serializing the tree. + let re_serialized_tree = deserialized_tree.as_bytes(); + assert_eq!(serialized_tree, re_serialized_tree); } /// Check that the orchard tree database serialization format has not changed when the number of @@ -399,7 +471,16 @@ fn orchard_note_commitment_tree_serialization_pow2() { let serialized_tree = incremental_tree.as_bytes(); assert_eq!(hex::encode(&serialized_tree), expected_serialized_tree_hex); - let deserialized_tree = orchard::tree::NoteCommitmentTree::from_bytes(serialized_tree); + let deserialized_tree = orchard::tree::NoteCommitmentTree::from_bytes(&serialized_tree); + // This check isn't enough to show that the entire struct is the same, because it just compares + // the cached serialized/deserialized roots. (NoteCommitmentTree::eq() also just compares + // roots.) assert_eq!(incremental_tree.root(), deserialized_tree.root()); + + incremental_tree.assert_frontier_eq(&deserialized_tree); + + // Double-check that the internal format is the same by re-serializing the tree. + let re_serialized_tree = deserialized_tree.as_bytes(); + assert_eq!(serialized_tree, re_serialized_tree); } From 83c459d8abda1eceddfa217238e54be37d2adbd8 Mon Sep 17 00:00:00 2001 From: teor Date: Thu, 6 Jul 2023 11:04:45 +1000 Subject: [PATCH 192/265] Add extra patch job to continous-integration-os.patch.yml (#7086) --- .github/workflows/continous-integration-os.patch.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/continous-integration-os.patch.yml b/.github/workflows/continous-integration-os.patch.yml index b79ab949975..fe81951024a 100644 --- a/.github/workflows/continous-integration-os.patch.yml +++ b/.github/workflows/continous-integration-os.patch.yml @@ -60,7 +60,7 @@ jobs: checks: - bans - sources - features: ['', '--all-features'] + features: ['', '--features default-release-binaries', '--all-features'] steps: - run: 'echo "No build required"' From 77ad91ced47c44e02b34b205f6eaf1f47f14e171 Mon Sep 17 00:00:00 2001 From: Arya Date: Thu, 6 Jul 2023 01:54:10 -0400 Subject: [PATCH 193/265] fix(network): Avoid initiating outbound handshakes with IPs for which Zebra already has an active peer. (#7029) * Adds most_recent_by_ip field to address book * adds test * Apply suggestions from code review * fixes lint * Updates most_recent_by_ip in .take() Updates should_update_most_recent_by_ip() and has_active_peer_with_ip to check last_attempt and last_failure times Renames has_active_peer_with_ip * Documents that Zebra will not initiate more than 1 outbound connections per IP * Fixes is_ready_for_connection_attempt_with_ip Adds test coverage for AttemptPending and Failed Fixes new_outbound_peer_connections_are_rate_limited proptest * Applies suggestions from code review. * Applies suggestions from code review * Always return true from `is_ready_for_connection_attempt_with_ip` if max_connections_per_ip != 0 * Update max_connections_per_ip config docs * Warn about invalid config fields and use default values * Ignores last_attempt and last_failure in is_ready_for_connection_attempt_with_ip updates test * Only update most_recent_by_ip if update.last_conn_state is responded. * Apply suggestions from code review Co-authored-by: teor * fixes lint * Update zebra-network/src/address_book.rs Co-authored-by: teor * Apply suggestions from code review Co-authored-by: teor * Fix Rust syntax * Fix whitespace --------- Co-authored-by: teor --- zebra-network/src/address_book.rs | 119 +++++++++++++++++- zebra-network/src/address_book/tests/prop.rs | 6 +- .../src/address_book/tests/vectors.rs | 78 +++++++++++- zebra-network/src/address_book_updater.rs | 1 + zebra-network/src/config.rs | 42 ++++++- zebra-network/src/constants.rs | 13 +- zebra-network/src/meta_addr.rs | 15 ++- zebra-network/src/meta_addr/arbitrary.rs | 18 ++- zebra-network/src/meta_addr/tests/prop.rs | 8 +- .../src/peer_set/candidate_set/tests/prop.rs | 19 ++- .../peer_set/candidate_set/tests/vectors.rs | 4 +- zebra-network/src/peer_set/initialize.rs | 7 -- .../src/peer_set/initialize/tests/vectors.rs | 7 +- zebra-network/src/peer_set/set/tests.rs | 8 +- .../components/inbound/tests/fake_peer_set.rs | 5 +- 15 files changed, 313 insertions(+), 37 deletions(-) diff --git a/zebra-network/src/address_book.rs b/zebra-network/src/address_book.rs index 8e8b45114cc..de05b39bd3a 100644 --- a/zebra-network/src/address_book.rs +++ b/zebra-network/src/address_book.rs @@ -3,8 +3,9 @@ use std::{ cmp::Reverse, + collections::HashMap, iter::Extend, - net::SocketAddr, + net::{IpAddr, SocketAddr}, sync::{Arc, Mutex}, time::Instant, }; @@ -72,6 +73,14 @@ pub struct AddressBook { /// [`OrderedMap`] sorts in descending order. by_addr: OrderedMap>, + /// The address with a last_connection_state of [`PeerAddrState::Responded`] and + /// the most recent `last_response` time by IP. + /// + /// This is used to avoid initiating outbound connections past [`Config::max_connections_per_ip`](crate::config::Config), and + /// currently only supports a `max_connections_per_ip` of 1, and must be `None` when used with a greater `max_connections_per_ip`. + // TODO: Replace with `by_ip: HashMap>` to support configured `max_connections_per_ip` greater than 1 + most_recent_by_ip: Option>, + /// The local listener address. local_listener: SocketAddr, @@ -130,7 +139,12 @@ impl AddressBook { /// Construct an [`AddressBook`] with the given `local_listener` on `network`. /// /// Uses the supplied [`tracing::Span`] for address book operations. - pub fn new(local_listener: SocketAddr, network: Network, span: Span) -> AddressBook { + pub fn new( + local_listener: SocketAddr, + network: Network, + max_connections_per_ip: usize, + span: Span, + ) -> AddressBook { let constructor_span = span.clone(); let _guard = constructor_span.enter(); @@ -141,6 +155,8 @@ impl AddressBook { // and it gets replaced by `update_metrics` anyway. let (address_metrics_tx, _address_metrics_rx) = watch::channel(AddressMetrics::default()); + // Avoid initiating outbound handshakes when max_connections_per_ip is 1. + let should_limit_outbound_conns_per_ip = max_connections_per_ip == 1; let mut new_book = AddressBook { by_addr: OrderedMap::new(|meta_addr| Reverse(*meta_addr)), local_listener: canonical_socket_addr(local_listener), @@ -149,6 +165,7 @@ impl AddressBook { span, address_metrics_tx, last_address_log: None, + most_recent_by_ip: should_limit_outbound_conns_per_ip.then(HashMap::new), }; new_book.update_metrics(instant_now, chrono_now); @@ -170,6 +187,7 @@ impl AddressBook { pub fn new_with_addrs( local_listener: SocketAddr, network: Network, + max_connections_per_ip: usize, addr_limit: usize, span: Span, addrs: impl IntoIterator, @@ -183,7 +201,7 @@ impl AddressBook { // The maximum number of addresses should be always greater than 0 assert!(addr_limit > 0); - let mut new_book = AddressBook::new(local_listener, network, span); + let mut new_book = AddressBook::new(local_listener, network, max_connections_per_ip, span); new_book.addr_limit = addr_limit; let addrs = addrs @@ -198,6 +216,14 @@ impl AddressBook { for (socket_addr, meta_addr) in addrs { // overwrite any duplicate addresses new_book.by_addr.insert(socket_addr, meta_addr); + // Add the address to `most_recent_by_ip` if it has responded + if new_book.should_update_most_recent_by_ip(meta_addr) { + new_book + .most_recent_by_ip + .as_mut() + .expect("should be some when should_update_most_recent_by_ip is true") + .insert(socket_addr.ip(), meta_addr); + } // exit as soon as we get enough addresses if new_book.by_addr.len() >= addr_limit { break; @@ -314,6 +340,45 @@ impl AddressBook { meta_addr } + /// Returns true if `updated` needs to be applied to the recent outbound peer connection IP cache. + /// + /// Checks if there are no existing entries in the address book with this IP, + /// or if `updated` has a more recent `last_response` requiring the outbound connector to wait + /// longer before initiating handshakes with peers at this IP. + /// + /// This code only needs to check a single cache entry, rather than the entire address book, + /// because other code maintains these invariants: + /// - `last_response` times for an entry can only increase. + /// - this is the only field checked by `has_connection_recently_responded()` + /// + /// See [`AddressBook::is_ready_for_connection_attempt_with_ip`] for more details. + fn should_update_most_recent_by_ip(&self, updated: MetaAddr) -> bool { + let Some(most_recent_by_ip) = self.most_recent_by_ip.as_ref() else { + return false + }; + + if let Some(previous) = most_recent_by_ip.get(&updated.addr.ip()) { + updated.last_connection_state == PeerAddrState::Responded + && updated.last_response() > previous.last_response() + } else { + updated.last_connection_state == PeerAddrState::Responded + } + } + + /// Returns true if `addr` is the latest entry for its IP, which is stored in `most_recent_by_ip`. + /// The entry is checked for an exact match to the IP and port of `addr`. + fn should_remove_most_recent_by_ip(&self, addr: PeerSocketAddr) -> bool { + let Some(most_recent_by_ip) = self.most_recent_by_ip.as_ref() else { + return false + }; + + if let Some(previous) = most_recent_by_ip.get(&addr.ip()) { + previous.addr == addr + } else { + false + } + } + /// Apply `change` to the address book, returning the updated `MetaAddr`, /// if the change was valid. /// @@ -373,6 +438,15 @@ impl AddressBook { self.by_addr.insert(updated.addr, updated); + // Add the address to `most_recent_by_ip` if it sent the most recent + // response Zebra has received from this IP. + if self.should_update_most_recent_by_ip(updated) { + self.most_recent_by_ip + .as_mut() + .expect("should be some when should_update_most_recent_by_ip is true") + .insert(updated.addr.ip(), updated); + } + debug!( ?change, ?updated, @@ -397,6 +471,15 @@ impl AddressBook { self.by_addr.remove(&surplus_peer.addr); + // Check if this surplus peer's addr matches that in `most_recent_by_ip` + // for this the surplus peer's ip to remove it there as well. + if self.should_remove_most_recent_by_ip(surplus_peer.addr) { + self.most_recent_by_ip + .as_mut() + .expect("should be some when should_remove_most_recent_by_ip is true") + .remove(&surplus_peer.addr.ip()); + } + debug!( surplus = ?surplus_peer, ?updated, @@ -435,6 +518,14 @@ impl AddressBook { ); if let Some(entry) = self.by_addr.remove(&removed_addr) { + // Check if this surplus peer's addr matches that in `most_recent_by_ip` + // for this the surplus peer's ip to remove it there as well. + if self.should_remove_most_recent_by_ip(entry.addr) { + if let Some(most_recent_by_ip) = self.most_recent_by_ip.as_mut() { + most_recent_by_ip.remove(&entry.addr.ip()); + } + } + std::mem::drop(_guard); self.update_metrics(instant_now, chrono_now); Some(entry) @@ -463,6 +554,26 @@ impl AddressBook { self.by_addr.descending_values().cloned() } + /// Is this IP ready for a new outbound connection attempt? + /// Checks if the outbound connection with the most recent response at this IP has recently responded. + /// + /// Note: last_response times may remain live for a long time if the local clock is changed to an earlier time. + fn is_ready_for_connection_attempt_with_ip( + &self, + ip: &IpAddr, + chrono_now: chrono::DateTime, + ) -> bool { + let Some(most_recent_by_ip) = self.most_recent_by_ip.as_ref() else { + // if we're not checking IPs, any connection is allowed + return true; + }; + let Some(same_ip_peer) = most_recent_by_ip.get(ip) else { + // If there's no entry for this IP, any connection is allowed + return true; + }; + !same_ip_peer.has_connection_recently_responded(chrono_now) + } + /// Return an iterator over peers that are due for a reconnection attempt, /// in reconnection attempt order. pub fn reconnection_peers( @@ -478,6 +589,7 @@ impl AddressBook { .descending_values() .filter(move |peer| { peer.is_ready_for_connection_attempt(instant_now, chrono_now, self.network) + && self.is_ready_for_connection_attempt_with_ip(&peer.addr.ip(), chrono_now) }) .cloned() } @@ -699,6 +811,7 @@ impl Clone for AddressBook { span: self.span.clone(), address_metrics_tx, last_address_log: None, + most_recent_by_ip: self.most_recent_by_ip.clone(), } } } diff --git a/zebra-network/src/address_book/tests/prop.rs b/zebra-network/src/address_book/tests/prop.rs index 9c497ad3692..732d477379b 100644 --- a/zebra-network/src/address_book/tests/prop.rs +++ b/zebra-network/src/address_book/tests/prop.rs @@ -9,7 +9,7 @@ use tracing::Span; use zebra_chain::{parameters::Network::*, serialization::Duration32}; use crate::{ - constants::{MAX_ADDRS_IN_ADDRESS_BOOK, MAX_PEER_ACTIVE_FOR_GOSSIP}, + constants::{DEFAULT_MAX_CONNS_PER_IP, MAX_ADDRS_IN_ADDRESS_BOOK, MAX_PEER_ACTIVE_FOR_GOSSIP}, meta_addr::{arbitrary::MAX_META_ADDR, MetaAddr, MetaAddrChange}, AddressBook, }; @@ -30,6 +30,7 @@ proptest! { let address_book = AddressBook::new_with_addrs( local_listener, Mainnet, + DEFAULT_MAX_CONNS_PER_IP, MAX_ADDRS_IN_ADDRESS_BOOK, Span::none(), addresses @@ -59,6 +60,7 @@ proptest! { let address_book = AddressBook::new_with_addrs( local_listener, Mainnet, + DEFAULT_MAX_CONNS_PER_IP, MAX_ADDRS_IN_ADDRESS_BOOK, Span::none(), addresses @@ -97,6 +99,7 @@ proptest! { let mut address_book = AddressBook::new_with_addrs( local_listener, Mainnet, + DEFAULT_MAX_CONNS_PER_IP, addr_limit, Span::none(), initial_addrs.clone(), @@ -119,6 +122,7 @@ proptest! { let mut address_book = AddressBook::new_with_addrs( local_listener, Mainnet, + DEFAULT_MAX_CONNS_PER_IP, addr_limit, Span::none(), initial_addrs, diff --git a/zebra-network/src/address_book/tests/vectors.rs b/zebra-network/src/address_book/tests/vectors.rs index 9896a358c85..e401e6a5de3 100644 --- a/zebra-network/src/address_book/tests/vectors.rs +++ b/zebra-network/src/address_book/tests/vectors.rs @@ -11,14 +11,21 @@ use zebra_chain::{ }; use crate::{ - constants::MAX_ADDRS_IN_ADDRESS_BOOK, meta_addr::MetaAddr, - protocol::external::types::PeerServices, AddressBook, + constants::{DEFAULT_MAX_CONNS_PER_IP, MAX_ADDRS_IN_ADDRESS_BOOK}, + meta_addr::MetaAddr, + protocol::external::types::PeerServices, + AddressBook, }; /// Make sure an empty address book is actually empty. #[test] fn address_book_empty() { - let address_book = AddressBook::new("0.0.0.0:0".parse().unwrap(), Mainnet, Span::current()); + let address_book = AddressBook::new( + "0.0.0.0:0".parse().unwrap(), + Mainnet, + DEFAULT_MAX_CONNS_PER_IP, + Span::current(), + ); assert_eq!( address_book @@ -48,6 +55,7 @@ fn address_book_peer_order() { let address_book = AddressBook::new_with_addrs( "0.0.0.0:0".parse().unwrap(), Mainnet, + DEFAULT_MAX_CONNS_PER_IP, MAX_ADDRS_IN_ADDRESS_BOOK, Span::current(), addrs, @@ -64,6 +72,7 @@ fn address_book_peer_order() { let address_book = AddressBook::new_with_addrs( "0.0.0.0:0".parse().unwrap(), Mainnet, + DEFAULT_MAX_CONNS_PER_IP, MAX_ADDRS_IN_ADDRESS_BOOK, Span::current(), addrs, @@ -83,6 +92,7 @@ fn address_book_peer_order() { let address_book = AddressBook::new_with_addrs( "0.0.0.0:0".parse().unwrap(), Mainnet, + DEFAULT_MAX_CONNS_PER_IP, MAX_ADDRS_IN_ADDRESS_BOOK, Span::current(), addrs, @@ -99,6 +109,7 @@ fn address_book_peer_order() { let address_book = AddressBook::new_with_addrs( "0.0.0.0:0".parse().unwrap(), Mainnet, + DEFAULT_MAX_CONNS_PER_IP, MAX_ADDRS_IN_ADDRESS_BOOK, Span::current(), addrs, @@ -110,3 +121,64 @@ fn address_book_peer_order() { Some(meta_addr2), ); } + +/// Check that `reconnection_peers` skips addresses with IPs for which +/// Zebra already has recently updated outbound peers. +#[test] +fn reconnection_peers_skips_recently_updated_ip() { + // tests that reconnection_peers() skips addresses where there's a connection at that IP with a recent: + // - `last_response` + test_reconnection_peers_skips_recently_updated_ip(true, |addr| { + MetaAddr::new_responded(addr, &PeerServices::NODE_NETWORK) + }); + + // tests that reconnection_peers() *does not* skip addresses where there's a connection at that IP with a recent: + // - `last_attempt` + test_reconnection_peers_skips_recently_updated_ip(false, MetaAddr::new_reconnect); + // - `last_failure` + test_reconnection_peers_skips_recently_updated_ip(false, |addr| { + MetaAddr::new_errored(addr, PeerServices::NODE_NETWORK) + }); +} + +fn test_reconnection_peers_skips_recently_updated_ip< + M: Fn(crate::PeerSocketAddr) -> crate::meta_addr::MetaAddrChange, +>( + should_skip_ip: bool, + make_meta_addr_change: M, +) { + let addr1 = "127.0.0.1:1".parse().unwrap(); + let addr2 = "127.0.0.1:2".parse().unwrap(); + + let meta_addr1 = make_meta_addr_change(addr1).into_new_meta_addr( + Instant::now(), + Utc::now().try_into().expect("will succeed until 2038"), + ); + let meta_addr2 = MetaAddr::new_gossiped_meta_addr( + addr2, + PeerServices::NODE_NETWORK, + DateTime32::MIN.saturating_add(Duration32::from_seconds(1)), + ); + + // The second address should be skipped because the first address has a + // recent `last_response` time and the two addresses have the same IP. + let addrs = vec![meta_addr1, meta_addr2]; + let address_book = AddressBook::new_with_addrs( + "0.0.0.0:0".parse().unwrap(), + Mainnet, + DEFAULT_MAX_CONNS_PER_IP, + MAX_ADDRS_IN_ADDRESS_BOOK, + Span::current(), + addrs, + ); + + let next_reconnection_peer = address_book + .reconnection_peers(Instant::now(), Utc::now()) + .next(); + + if should_skip_ip { + assert_eq!(next_reconnection_peer, None,); + } else { + assert_ne!(next_reconnection_peer, None,); + } +} diff --git a/zebra-network/src/address_book_updater.rs b/zebra-network/src/address_book_updater.rs index 91aa4a6f144..ef503bc8d82 100644 --- a/zebra-network/src/address_book_updater.rs +++ b/zebra-network/src/address_book_updater.rs @@ -51,6 +51,7 @@ impl AddressBookUpdater { let address_book = AddressBook::new( local_listener, config.network, + config.max_connections_per_ip, span!(Level::TRACE, "address book"), ); let address_metrics = address_book.address_metrics_watcher(); diff --git a/zebra-network/src/config.rs b/zebra-network/src/config.rs index 15922f81486..402ee6fc4cc 100644 --- a/zebra-network/src/config.rs +++ b/zebra-network/src/config.rs @@ -19,8 +19,9 @@ use zebra_chain::parameters::Network; use crate::{ constants::{ - DEFAULT_CRAWL_NEW_PEER_INTERVAL, DEFAULT_MAX_CONNS_PER_IP, DNS_LOOKUP_TIMEOUT, - INBOUND_PEER_LIMIT_MULTIPLIER, MAX_PEER_DISK_CACHE_SIZE, OUTBOUND_PEER_LIMIT_MULTIPLIER, + DEFAULT_CRAWL_NEW_PEER_INTERVAL, DEFAULT_MAX_CONNS_PER_IP, + DEFAULT_PEERSET_INITIAL_TARGET_SIZE, DNS_LOOKUP_TIMEOUT, INBOUND_PEER_LIMIT_MULTIPLIER, + MAX_PEER_DISK_CACHE_SIZE, OUTBOUND_PEER_LIMIT_MULTIPLIER, }, protocol::external::{canonical_peer_addr, canonical_socket_addr}, BoxError, PeerSocketAddr, @@ -158,6 +159,20 @@ pub struct Config { /// before it drops any additional peer connections with that IP. /// /// The default and minimum value are 1. + /// + /// # Security + /// + /// Increasing this config above 1 reduces Zebra's network security. + /// + /// If this config is greater than 1, Zebra can initiate multiple outbound handshakes to the same + /// IP address. + /// + /// This config does not currently limit the number of inbound connections that Zebra will accept + /// from the same IP address. + /// + /// If Zebra makes multiple inbound or outbound connections to the same IP, they will be dropped + /// after the handshake, but before adding them to the peer set. The total numbers of inbound and + /// outbound connections are also limited to a multiple of `peerset_initial_target_size`. pub max_connections_per_ip: usize, } @@ -593,7 +608,7 @@ impl Default for Config { // // But Zebra should only make a small number of initial outbound connections, // so that idle peers don't use too many connection slots. - peerset_initial_target_size: 25, + peerset_initial_target_size: DEFAULT_PEERSET_INITIAL_TARGET_SIZE, max_connections_per_ip: DEFAULT_MAX_CONNS_PER_IP, } } @@ -629,7 +644,7 @@ impl<'de> Deserialize<'de> for Config { cache_dir: config.cache_dir, peerset_initial_target_size: config.peerset_initial_target_size, crawl_new_peer_interval: config.crawl_new_peer_interval, - max_connections_per_ip: Some(DEFAULT_MAX_CONNS_PER_IP), + max_connections_per_ip: Some(config.max_connections_per_ip), } } } @@ -655,6 +670,23 @@ impl<'de> Deserialize<'de> for Config { }, }?; + let [max_connections_per_ip, peerset_initial_target_size] = [ + ("max_connections_per_ip", max_connections_per_ip, DEFAULT_MAX_CONNS_PER_IP), + // If we want Zebra to operate with no network, + // we should implement a `zebrad` command that doesn't use `zebra-network`. + ("peerset_initial_target_size", Some(peerset_initial_target_size), DEFAULT_PEERSET_INITIAL_TARGET_SIZE) + ].map(|(field_name, non_zero_config_field, default_config_value)| { + if non_zero_config_field == Some(0) { + warn!( + ?field_name, + ?non_zero_config_field, + "{field_name} should be greater than 0, using default value of {default_config_value} instead" + ); + } + + non_zero_config_field.filter(|config_value| config_value > &0).unwrap_or(default_config_value) + }); + Ok(Config { listen_addr: canonical_socket_addr(listen_addr), network, @@ -663,7 +695,7 @@ impl<'de> Deserialize<'de> for Config { cache_dir, peerset_initial_target_size, crawl_new_peer_interval, - max_connections_per_ip: max_connections_per_ip.unwrap_or(DEFAULT_MAX_CONNS_PER_IP), + max_connections_per_ip, }) } } diff --git a/zebra-network/src/constants.rs b/zebra-network/src/constants.rs index b466d4fd42c..32c1c477599 100644 --- a/zebra-network/src/constants.rs +++ b/zebra-network/src/constants.rs @@ -70,9 +70,20 @@ pub const OUTBOUND_PEER_LIMIT_MULTIPLIER: usize = 3; /// The default maximum number of peer connections Zebra will keep for a given IP address /// before it drops any additional peer connections with that IP. /// -/// This will be used as Config.max_connections_per_ip if no value is provided. +/// This will be used as `Config.max_connections_per_ip` if no valid value is provided. +/// +/// Note: Zebra will currently avoid initiating outbound connections where it +/// has recently had a successful handshake with any address +/// on that IP. Zebra will not initiate more than 1 outbound connection +/// to an IP based on the default configuration, but it will accept more inbound +/// connections to an IP. pub const DEFAULT_MAX_CONNS_PER_IP: usize = 1; +/// The default peerset target size. +/// +/// This will be used as `Config.peerset_initial_target_size` if no valid value is provided. +pub const DEFAULT_PEERSET_INITIAL_TARGET_SIZE: usize = 25; + /// The buffer size for the peer set. /// /// This should be greater than 1 to avoid sender contention, but also reasonably diff --git a/zebra-network/src/meta_addr.rs b/zebra-network/src/meta_addr.rs index 6fbd4e7ebdf..1f8572fd53c 100644 --- a/zebra-network/src/meta_addr.rs +++ b/zebra-network/src/meta_addr.rs @@ -561,6 +561,17 @@ impl MetaAddr { } } + /// Returns true if any messages were recently sent to or received from this address. + pub fn was_recently_updated( + &self, + instant_now: Instant, + chrono_now: chrono::DateTime, + ) -> bool { + self.has_connection_recently_responded(chrono_now) + || self.was_connection_recently_attempted(instant_now) + || self.has_connection_recently_failed(instant_now) + } + /// Is this address ready for a new outbound connection attempt? pub fn is_ready_for_connection_attempt( &self, @@ -569,9 +580,7 @@ impl MetaAddr { network: Network, ) -> bool { self.last_known_info_is_valid_for_outbound(network) - && !self.has_connection_recently_responded(chrono_now) - && !self.was_connection_recently_attempted(instant_now) - && !self.has_connection_recently_failed(instant_now) + && !self.was_recently_updated(instant_now, chrono_now) && self.is_probably_reachable(chrono_now) } diff --git a/zebra-network/src/meta_addr/arbitrary.rs b/zebra-network/src/meta_addr/arbitrary.rs index 1b96440e968..d7d4dd840e8 100644 --- a/zebra-network/src/meta_addr/arbitrary.rs +++ b/zebra-network/src/meta_addr/arbitrary.rs @@ -1,6 +1,6 @@ //! Randomised test data generation for MetaAddr. -use std::time::Instant; +use std::{net::IpAddr, time::Instant}; use proptest::{arbitrary::any, collection::vec, prelude::*}; @@ -99,7 +99,17 @@ impl MetaAddrChange { .boxed() } - /// Create a strategy that generates [`MetaAddrChange`]s which are ready for + /// Create a strategy that generates [`IpAddr`]s for [`MetaAddrChange`]s which are ready for + /// outbound connections. + pub fn ready_outbound_strategy_ip() -> BoxedStrategy { + any::() + .prop_filter("failed MetaAddr::is_valid_for_outbound", |ip| { + !ip.is_unspecified() + }) + .boxed() + } + + /// Create a strategy that generates port numbers for [`MetaAddrChange`]s which are ready for /// outbound connections. /// /// Currently, all generated changes are the [`NewAlternate`][1] variant. @@ -107,7 +117,7 @@ impl MetaAddrChange { /// fields. (After PR #2276 merges.) /// /// [1]: super::NewAlternate - pub fn ready_outbound_strategy() -> BoxedStrategy { + pub fn ready_outbound_strategy_port() -> BoxedStrategy { ( canonical_peer_addr_strategy(), any::(), @@ -125,7 +135,7 @@ impl MetaAddrChange { .into_new_meta_addr(instant_now, local_now) .last_known_info_is_valid_for_outbound(Mainnet) { - Some(change) + Some(addr.port()) } else { None } diff --git a/zebra-network/src/meta_addr/tests/prop.rs b/zebra-network/src/meta_addr/tests/prop.rs index 19f66718ccb..dfd497b3d22 100644 --- a/zebra-network/src/meta_addr/tests/prop.rs +++ b/zebra-network/src/meta_addr/tests/prop.rs @@ -10,7 +10,10 @@ use tracing::Span; use zebra_chain::{parameters::Network::*, serialization::DateTime32}; use crate::{ - constants::{MAX_ADDRS_IN_ADDRESS_BOOK, MAX_RECENT_PEER_AGE, MIN_PEER_RECONNECTION_DELAY}, + constants::{ + DEFAULT_MAX_CONNS_PER_IP, MAX_ADDRS_IN_ADDRESS_BOOK, MAX_RECENT_PEER_AGE, + MIN_PEER_RECONNECTION_DELAY, + }, meta_addr::{ arbitrary::{MAX_ADDR_CHANGE, MAX_META_ADDR}, MetaAddr, MetaAddrChange, @@ -156,6 +159,7 @@ proptest! { let address_book = AddressBook::new_with_addrs( local_listener, Mainnet, + DEFAULT_MAX_CONNS_PER_IP, MAX_ADDRS_IN_ADDRESS_BOOK, Span::none(), address_book_addrs @@ -214,6 +218,7 @@ proptest! { let mut address_book = AddressBook::new_with_addrs( local_listener, Mainnet, + DEFAULT_MAX_CONNS_PER_IP, 1, Span::none(), Vec::new(), @@ -327,6 +332,7 @@ proptest! { let address_book = Arc::new(std::sync::Mutex::new(AddressBook::new_with_addrs( SocketAddr::from_str("0.0.0.0:0").unwrap(), Mainnet, + DEFAULT_MAX_CONNS_PER_IP, MAX_ADDRS_IN_ADDRESS_BOOK, Span::none(), addrs, diff --git a/zebra-network/src/peer_set/candidate_set/tests/prop.rs b/zebra-network/src/peer_set/candidate_set/tests/prop.rs index d77b190c67b..e5201e046ba 100644 --- a/zebra-network/src/peer_set/candidate_set/tests/prop.rs +++ b/zebra-network/src/peer_set/candidate_set/tests/prop.rs @@ -8,15 +8,20 @@ use std::{ time::{Duration, Instant}, }; -use proptest::{collection::vec, prelude::*}; +use proptest::{ + collection::{hash_map, vec}, + prelude::*, +}; use tokio::time::{sleep, timeout}; use tracing::Span; use zebra_chain::{parameters::Network::*, serialization::DateTime32}; use crate::{ - constants::MIN_OUTBOUND_PEER_CONNECTION_INTERVAL, + canonical_peer_addr, + constants::{DEFAULT_MAX_CONNS_PER_IP, MIN_OUTBOUND_PEER_CONNECTION_INTERVAL}, meta_addr::{MetaAddr, MetaAddrChange}, + protocol::types::PeerServices, AddressBook, BoxError, Request, Response, }; @@ -67,7 +72,7 @@ proptest! { }); // Since the address book is empty, there won't be any available peers - let address_book = AddressBook::new(SocketAddr::from_str("0.0.0.0:0").unwrap(), Mainnet, Span::none()); + let address_book = AddressBook::new(SocketAddr::from_str("0.0.0.0:0").unwrap(), Mainnet, DEFAULT_MAX_CONNS_PER_IP, Span::none()); let mut candidate_set = CandidateSet::new(Arc::new(std::sync::Mutex::new(address_book)), peer_service); @@ -94,18 +99,22 @@ proptest! { /// Test that new outbound peer connections are rate-limited. #[test] fn new_outbound_peer_connections_are_rate_limited( - peers in vec(MetaAddrChange::ready_outbound_strategy(), TEST_ADDRESSES), + peers in hash_map(MetaAddrChange::ready_outbound_strategy_ip(), MetaAddrChange::ready_outbound_strategy_port(), TEST_ADDRESSES), initial_candidates in 0..MAX_TEST_CANDIDATES, extra_candidates in 0..MAX_TEST_CANDIDATES, ) { let (runtime, _init_guard) = zebra_test::init_async(); let _guard = runtime.enter(); + let peers = peers.into_iter().map(|(ip, port)| { + MetaAddr::new_alternate(canonical_peer_addr(SocketAddr::new(ip, port)), &PeerServices::NODE_NETWORK) + }).collect::>(); + let peer_service = tower::service_fn(|_| async { unreachable!("Mock peer service is never used"); }); - let mut address_book = AddressBook::new(SocketAddr::from_str("0.0.0.0:0").unwrap(), Mainnet, Span::none()); + let mut address_book = AddressBook::new(SocketAddr::from_str("0.0.0.0:0").unwrap(), Mainnet, DEFAULT_MAX_CONNS_PER_IP, Span::none()); address_book.extend(peers); let mut candidate_set = CandidateSet::new(Arc::new(std::sync::Mutex::new(address_book)), peer_service); diff --git a/zebra-network/src/peer_set/candidate_set/tests/vectors.rs b/zebra-network/src/peer_set/candidate_set/tests/vectors.rs index 71811a75962..261fb2ff487 100644 --- a/zebra-network/src/peer_set/candidate_set/tests/vectors.rs +++ b/zebra-network/src/peer_set/candidate_set/tests/vectors.rs @@ -15,7 +15,7 @@ use zebra_chain::{parameters::Network::*, serialization::DateTime32}; use zebra_test::mock_service::{MockService, PanicAssertion}; use crate::{ - constants::{GET_ADDR_FANOUT, MIN_PEER_GET_ADDR_INTERVAL}, + constants::{DEFAULT_MAX_CONNS_PER_IP, GET_ADDR_FANOUT, MIN_PEER_GET_ADDR_INTERVAL}, types::{MetaAddr, PeerServices}, AddressBook, Request, Response, }; @@ -141,6 +141,7 @@ fn candidate_set_updates_are_rate_limited() { let address_book = AddressBook::new( SocketAddr::from_str("0.0.0.0:0").unwrap(), Mainnet, + DEFAULT_MAX_CONNS_PER_IP, Span::none(), ); let mut peer_service = MockService::build().for_unit_tests(); @@ -186,6 +187,7 @@ fn candidate_set_update_after_update_initial_is_rate_limited() { let address_book = AddressBook::new( SocketAddr::from_str("0.0.0.0:0").unwrap(), Mainnet, + DEFAULT_MAX_CONNS_PER_IP, Span::none(), ); let mut peer_service = MockService::build().for_unit_tests(); diff --git a/zebra-network/src/peer_set/initialize.rs b/zebra-network/src/peer_set/initialize.rs index 8b91320fef8..d3b99621f79 100644 --- a/zebra-network/src/peer_set/initialize.rs +++ b/zebra-network/src/peer_set/initialize.rs @@ -107,13 +107,6 @@ where S::Future: Send + 'static, C: ChainTip + Clone + Send + Sync + 'static, { - // If we want Zebra to operate with no network, - // we should implement a `zebrad` command that doesn't use `zebra-network`. - assert!( - config.peerset_initial_target_size > 0, - "Zebra must be allowed to connect to at least one peer" - ); - let (tcp_listener, listen_addr) = open_listener(&config.clone()).await; let (address_book, address_book_updater, address_metrics, address_book_updater_guard) = diff --git a/zebra-network/src/peer_set/initialize/tests/vectors.rs b/zebra-network/src/peer_set/initialize/tests/vectors.rs index a0abe128ce6..73c15077689 100644 --- a/zebra-network/src/peer_set/initialize/tests/vectors.rs +++ b/zebra-network/src/peer_set/initialize/tests/vectors.rs @@ -1509,7 +1509,12 @@ where } // Manually initialize an address book without a timestamp tracker. - let mut address_book = AddressBook::new(config.listen_addr, config.network, Span::current()); + let mut address_book = AddressBook::new( + config.listen_addr, + config.network, + config.max_connections_per_ip, + Span::current(), + ); // Add enough fake peers to go over the limit, even if the limit is zero. let over_limit_peers = config.peerset_outbound_connection_limit() * 2 + 1; diff --git a/zebra-network/src/peer_set/set/tests.rs b/zebra-network/src/peer_set/set/tests.rs index d111792e0e9..e044447e389 100644 --- a/zebra-network/src/peer_set/set/tests.rs +++ b/zebra-network/src/peer_set/set/tests.rs @@ -23,6 +23,7 @@ use zebra_chain::{ use crate::{ address_book::AddressMetrics, + constants::DEFAULT_MAX_CONNS_PER_IP, peer::{ClientTestHarness, LoadTrackedClient, MinimumPeerVersion}, peer_set::{set::MorePeers, InventoryChange, PeerSet}, protocol::external::types::Version, @@ -333,7 +334,12 @@ impl PeerSetGuard { let local_listener = "127.0.0.1:1000" .parse() .expect("Invalid local listener address"); - let address_book = AddressBook::new(local_listener, Network::Mainnet, Span::none()); + let address_book = AddressBook::new( + local_listener, + Network::Mainnet, + DEFAULT_MAX_CONNS_PER_IP, + Span::none(), + ); Arc::new(std::sync::Mutex::new(address_book)) } diff --git a/zebrad/src/components/inbound/tests/fake_peer_set.rs b/zebrad/src/components/inbound/tests/fake_peer_set.rs index fd30be4fdb1..1a383bb90b3 100644 --- a/zebrad/src/components/inbound/tests/fake_peer_set.rs +++ b/zebrad/src/components/inbound/tests/fake_peer_set.rs @@ -23,7 +23,9 @@ use zebra_chain::{ transaction::{UnminedTx, UnminedTxId, VerifiedUnminedTx}, }; use zebra_consensus::{error::TransactionError, transaction, Config as ConsensusConfig}; -use zebra_network::{AddressBook, InventoryResponse, Request, Response}; +use zebra_network::{ + constants::DEFAULT_MAX_CONNS_PER_IP, AddressBook, InventoryResponse, Request, Response, +}; use zebra_node_services::mempool; use zebra_state::{ChainTipChange, Config as StateConfig, CHAIN_TIP_UPDATE_WAIT_LIMIT}; use zebra_test::mock_service::{MockService, PanicAssertion}; @@ -771,6 +773,7 @@ async fn setup( let address_book = AddressBook::new( SocketAddr::from_str("0.0.0.0:0").unwrap(), Mainnet, + DEFAULT_MAX_CONNS_PER_IP, Span::none(), ); let address_book = Arc::new(std::sync::Mutex::new(address_book)); From 2492648b5f2fa6f62adc3cd604f8add7297f02fb Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 6 Jul 2023 18:01:29 +0000 Subject: [PATCH 194/265] build(deps): bump toml from 0.7.5 to 0.7.6 (#7164) Bumps [toml](https://github.com/toml-rs/toml) from 0.7.5 to 0.7.6. - [Commits](https://github.com/toml-rs/toml/compare/toml-v0.7.5...toml-v0.7.6) --- updated-dependencies: - dependency-name: toml dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 12 ++++++------ zebra-network/Cargo.toml | 2 +- zebrad/Cargo.toml | 2 +- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f2301475c50..7f108d2445f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4651,9 +4651,9 @@ dependencies = [ [[package]] name = "toml" -version = "0.7.5" +version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ebafdf5ad1220cb59e7d17cf4d2c72015297b75b19a10472f99b89225089240" +checksum = "c17e963a819c331dcacd7ab957d80bc2b9a9c1e71c804826d2f283dd65306542" dependencies = [ "serde", "serde_spanned", @@ -4672,9 +4672,9 @@ dependencies = [ [[package]] name = "toml_edit" -version = "0.19.11" +version = "0.19.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "266f016b7f039eec8a1a80dfe6156b633d208b9fccca5e4db1d6775b0c4e34a7" +checksum = "c500344a19072298cd05a7224b3c0c629348b78692bf48466c5238656e315a78" dependencies = [ "indexmap 2.0.0", "serde", @@ -5811,7 +5811,7 @@ dependencies = [ "tokio", "tokio-stream", "tokio-util 0.7.8", - "toml 0.7.5", + "toml 0.7.6", "tower", "tracing", "tracing-error", @@ -6009,7 +6009,7 @@ dependencies = [ "tinyvec", "tokio", "tokio-stream", - "toml 0.7.5", + "toml 0.7.6", "tonic", "tonic-build", "tower", diff --git a/zebra-network/Cargo.toml b/zebra-network/Cargo.toml index 3a1079d9720..6db3e1c8daf 100644 --- a/zebra-network/Cargo.toml +++ b/zebra-network/Cargo.toml @@ -91,7 +91,7 @@ proptest-derive = "0.3.0" static_assertions = "1.1.0" tokio = { version = "1.29.1", features = ["full", "tracing", "test-util"] } -toml = "0.7.5" +toml = "0.7.6" zebra-chain = { path = "../zebra-chain", features = ["proptest-impl"] } zebra-test = { path = "../zebra-test/" } diff --git a/zebrad/Cargo.toml b/zebrad/Cargo.toml index 081b16827ab..b502688888e 100644 --- a/zebrad/Cargo.toml +++ b/zebrad/Cargo.toml @@ -160,7 +160,7 @@ indexmap = "2.0.0" lazy_static = "1.4.0" semver = "1.0.17" serde = { version = "1.0.166", features = ["serde_derive"] } -toml = "0.7.5" +toml = "0.7.6" futures = "0.3.28" rayon = "1.7.0" From d73941c91fbbb4546f6d1ec14e7470ca9dbfeab1 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 6 Jul 2023 21:00:00 +0000 Subject: [PATCH 195/265] build(deps): bump vergen from 8.2.3 to 8.2.4 (#7163) Bumps [vergen](https://github.com/rustyhorde/vergen) from 8.2.3 to 8.2.4. - [Release notes](https://github.com/rustyhorde/vergen/releases) - [Commits](https://github.com/rustyhorde/vergen/compare/8.2.3...8.2.4) --- updated-dependencies: - dependency-name: vergen dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 8 ++++---- zebrad/Cargo.toml | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7f108d2445f..56b0cc57427 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3840,9 +3840,9 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.12" +version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f3208ce4d8448b3f3e7d168a73f5e0c43a61e32930de3bceeccedb388b6bf06" +checksum = "dc31bd9b61a32c31f9650d18add92aa83a49ba979c143eefd27fe7177b05bd5f" [[package]] name = "rusty-fork" @@ -5124,9 +5124,9 @@ checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191" [[package]] name = "vergen" -version = "8.2.3" +version = "8.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce38fc503fa57441ac2539c3e723b5adf76601eb4f1ad24025c6660d27f355b7" +checksum = "bbc5ad0d9d26b2c49a5ab7da76c3e79d3ee37e7821799f8223fcb8f2f391a2e7" dependencies = [ "anyhow", "git2", diff --git a/zebrad/Cargo.toml b/zebrad/Cargo.toml index b502688888e..146450e568e 100644 --- a/zebrad/Cargo.toml +++ b/zebrad/Cargo.toml @@ -223,7 +223,7 @@ proptest-derive = { version = "0.3.0", optional = true } console-subscriber = { version = "0.1.10", optional = true } [build-dependencies] -vergen = { version = "8.2.3", default-features = false, features = ["cargo", "git", "git2", "rustc"] } +vergen = { version = "8.2.4", default-features = false, features = ["cargo", "git", "git2", "rustc"] } # test feature lightwalletd-grpc-tests tonic-build = { version = "0.9.2", optional = true } From c1695c35e2e61cb8828680c075800e2b85064d50 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 6 Jul 2023 23:23:24 +0000 Subject: [PATCH 196/265] build(deps): bump serde_json from 1.0.99 to 1.0.100 (#7149) Bumps [serde_json](https://github.com/serde-rs/json) from 1.0.99 to 1.0.100. - [Release notes](https://github.com/serde-rs/json/releases) - [Commits](https://github.com/serde-rs/json/compare/v1.0.99...v1.0.100) --- updated-dependencies: - dependency-name: serde_json dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- zebra-chain/Cargo.toml | 2 +- zebra-node-services/Cargo.toml | 4 ++-- zebra-rpc/Cargo.toml | 2 +- zebra-state/Cargo.toml | 2 +- zebra-utils/Cargo.toml | 2 +- zebrad/Cargo.toml | 2 +- 7 files changed, 9 insertions(+), 9 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 56b0cc57427..ef5001d66d4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4089,9 +4089,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.99" +version = "1.0.100" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46266871c240a00b8f503b877622fe33430b3c7d963bdc0f2adc511e54a1eae3" +checksum = "0f1e14e89be7aa4c4b78bdbdc9eb5bf8517829a600ae8eaa39a6e1d960b5185c" dependencies = [ "indexmap 2.0.0", "itoa", diff --git a/zebra-chain/Cargo.toml b/zebra-chain/Cargo.toml index 4dda603ad95..05acba6e6cc 100644 --- a/zebra-chain/Cargo.toml +++ b/zebra-chain/Cargo.toml @@ -105,7 +105,7 @@ redjubjub = "0.7.0" reddsa = "0.5.0" # Production feature json-conversion -serde_json = { version = "1.0.99", optional = true } +serde_json = { version = "1.0.100", optional = true } # Experimental feature getblocktemplate-rpcs zcash_address = { version = "0.2.1", optional = true } diff --git a/zebra-node-services/Cargo.toml b/zebra-node-services/Cargo.toml index af993cfc421..444710708c5 100644 --- a/zebra-node-services/Cargo.toml +++ b/zebra-node-services/Cargo.toml @@ -45,7 +45,7 @@ jsonrpc-core = { version = "18.0.0", optional = true } # Security: avoid default dependency on openssl reqwest = { version = "0.11.18", default-features = false, features = ["rustls-tls"], optional = true } serde = { version = "1.0.166", optional = true } -serde_json = { version = "1.0.99", optional = true } +serde_json = { version = "1.0.100", optional = true } [dev-dependencies] @@ -53,4 +53,4 @@ color-eyre = "0.6.2" jsonrpc-core = "18.0.0" reqwest = { version = "0.11.18", default-features = false, features = ["rustls-tls"] } serde = "1.0.166" -serde_json = "1.0.97" +serde_json = "1.0.100" diff --git a/zebra-rpc/Cargo.toml b/zebra-rpc/Cargo.toml index a839884c655..1caa8be6658 100644 --- a/zebra-rpc/Cargo.toml +++ b/zebra-rpc/Cargo.toml @@ -51,7 +51,7 @@ jsonrpc-http-server = "18.0.0" num_cpus = "1.16.0" # zebra-rpc needs the preserve_order feature in serde_json, which is a dependency of jsonrpc-core -serde_json = { version = "1.0.99", features = ["preserve_order"] } +serde_json = { version = "1.0.100", features = ["preserve_order"] } indexmap = { version = "2.0.0", features = ["serde"] } tokio = { version = "1.29.1", features = ["time", "rt-multi-thread", "macros", "tracing"] } diff --git a/zebra-state/Cargo.toml b/zebra-state/Cargo.toml index 55e1b15376e..6c4761ae464 100644 --- a/zebra-state/Cargo.toml +++ b/zebra-state/Cargo.toml @@ -69,7 +69,7 @@ tracing = "0.1.37" # elasticsearch specific dependencies. # Security: avoid default dependency on openssl elasticsearch = { version = "8.5.0-alpha.1", default-features = false, features = ["rustls-tls"], optional = true } -serde_json = { version = "1.0.99", package = "serde_json", optional = true } +serde_json = { version = "1.0.100", package = "serde_json", optional = true } zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.27" } diff --git a/zebra-utils/Cargo.toml b/zebra-utils/Cargo.toml index 19dadb8088e..a9598a94a04 100644 --- a/zebra-utils/Cargo.toml +++ b/zebra-utils/Cargo.toml @@ -69,7 +69,7 @@ tinyvec = { version = "1.6.0", features = ["rustc_1_55"] } structopt = "0.3.26" hex = "0.4.3" -serde_json = "1.0.99" +serde_json = "1.0.100" tracing-error = "0.2.0" tracing-subscriber = "0.3.17" thiserror = "1.0.41" diff --git a/zebrad/Cargo.toml b/zebrad/Cargo.toml index 146450e568e..a0cc68e82b1 100644 --- a/zebrad/Cargo.toml +++ b/zebrad/Cargo.toml @@ -236,7 +236,7 @@ once_cell = "1.18.0" regex = "1.8.4" # zebra-rpc needs the preserve_order feature, it also makes test results more stable -serde_json = { version = "1.0.99", features = ["preserve_order"] } +serde_json = { version = "1.0.100", features = ["preserve_order"] } tempfile = "3.5.0" hyper = { version = "0.14.27", features = ["http1", "http2", "server"]} From 1014e3c7855df1c53d0bf0ef571d2cecd9b795f8 Mon Sep 17 00:00:00 2001 From: Marek Date: Fri, 7 Jul 2023 01:23:47 +0200 Subject: [PATCH 197/265] Add default RPC port depending on Zcash network (#7162) The `runtime-entrypoint.sh` uses the `RPC_PORT` env var when the user specifies the `getblocktemplate-rpc` feature, but this env var is unset unless the user sets it. This commit sets the default values for `RPC_PORT` depending on `NETWORK`. --- docker/runtime-entrypoint.sh | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/docker/runtime-entrypoint.sh b/docker/runtime-entrypoint.sh index 5eaeb7a4cc7..596cef19ac3 100755 --- a/docker/runtime-entrypoint.sh +++ b/docker/runtime-entrypoint.sh @@ -31,6 +31,11 @@ fi : "${TRACING_ENDPOINT_PORT:=3000}" # [rpc] : "${RPC_LISTEN_ADDR:=0.0.0.0}" +if [[ "${NETWORK}" = "Mainnet" ]]; then +: "${RPC_PORT:=8232}" +elif [[ "${NETWORK}" = "Testnet" ]]; then +: "${RPC_PORT:=18232}" +fi # Populate `zebrad.toml` before starting zebrad, using the environmental From f6afec2be81d8cc7af76c59b453ac5add9f7dccb Mon Sep 17 00:00:00 2001 From: teor Date: Fri, 7 Jul 2023 13:35:41 +1000 Subject: [PATCH 198/265] fix(ci): Increase peer cache startup wait time and test time (#7169) * Increase peer cache test time * Wait longer for DNS responses before writing peers to disk --- zebra-network/src/peer_cache_updater.rs | 2 +- zebrad/tests/common/launch.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/zebra-network/src/peer_cache_updater.rs b/zebra-network/src/peer_cache_updater.rs index 64c160e815f..c213b87345d 100644 --- a/zebra-network/src/peer_cache_updater.rs +++ b/zebra-network/src/peer_cache_updater.rs @@ -25,7 +25,7 @@ pub async fn peer_cache_updater( // // TODO: turn the initial sleep time into a parameter of this function, // and allow it to be set in tests - sleep(DNS_LOOKUP_TIMEOUT * 2).await; + sleep(DNS_LOOKUP_TIMEOUT * 4).await; loop { // Ignore errors because updating the cache is optional. diff --git a/zebrad/tests/common/launch.rs b/zebrad/tests/common/launch.rs index 8b8738b41d4..0ed4899b4f4 100644 --- a/zebrad/tests/common/launch.rs +++ b/zebrad/tests/common/launch.rs @@ -39,7 +39,7 @@ pub const LAUNCH_DELAY: Duration = Duration::from_secs(15); /// After we launch `zebrad`, wait this long in extended tests. /// See [`LAUNCH_DELAY`] for details. -pub const EXTENDED_LAUNCH_DELAY: Duration = Duration::from_secs(25); +pub const EXTENDED_LAUNCH_DELAY: Duration = Duration::from_secs(45); /// After we launch `lightwalletd`, wait this long for the command to start up, /// take the actions expected by the quick tests, and log the expected logs. From e733d719730f49a307c6b8204066138a7833836d Mon Sep 17 00:00:00 2001 From: teor Date: Mon, 10 Jul 2023 11:27:14 +1000 Subject: [PATCH 199/265] change(devops): Reduce number of dependabot PRs (#7156) * Reduce number of dependabot PRs * Validate dependabot config before merging * Fix indentation * Fix spacing again * Split large group * Expand comments, fix typos Co-authored-by: Marek --------- Co-authored-by: Marek Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> --- .github/dependabot.yml | 128 +++++++++++++++++++++++++++++++++++-- .github/workflows/lint.yml | 5 +- 2 files changed, 128 insertions(+), 5 deletions(-) diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 8f810e4b3c4..e5a061c8d86 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -2,21 +2,141 @@ version: 2 updates: - package-ecosystem: cargo directory: '/' + # serde, clap, and other dependencies sometimes have multiple updates in a week schedule: - interval: daily + interval: weekly timezone: America/New_York - open-pull-requests-limit: 10 + # Limit dependabot to 2 PRs per reviewer, but assume one reviewer is busy or away + open-pull-requests-limit: 8 labels: - 'C-trivial' - 'A-rust' - 'A-dependencies' - 'P-Low :snowflake:' + groups: + ecc: + patterns: + # deliberately include zcash_script (even though it is maintained by ZF) + - "zcash_*" + - "orchard" + - "halo2*" + - "incrementalmerkletree" + - "equihash" + # addresses + - "bs58" + - "ripemd" + # groups are limited to 10 items + crypto: + patterns: + - "bellman" + - "redjubjub" + - "reddsa" + - "jubjub" + - "group" + - "bls12_381" + - "blake*" + - "secp256k1" + - "sha2" + ed25519-zebra: + patterns: + - "ed25519*" + - "curve25519*" + - "x25519*" + tokio: + patterns: + - "tokio*" + - "console-subscriber" + tower: + patterns: + - "tower*" + dirs: + patterns: + - "dirs*" + - "directories*" + - "tempfile" + grpc: + patterns: + - "prost*" + - "tonic*" + vergen: + patterns: + - "vergen" + - "git*" + - "libgit*" + http: + patterns: + - "hyper*" + - "h2" + - "reqwest" + tracing: + patterns: + - "tracing*" + - "log" + error: + patterns: + - "*eyre*" + - "thiserror" + - "displaydoc" + - "spandoc" + - "owo-colors" + once-cell: + patterns: + - "once_cell" + - "lazy_static" + progress-bar: + patterns: + - "indicatif" + - "howudoin" + time: + patterns: + - "chrono*" + - "time*" + - "humantime*" + cli: + patterns: + - "abscissa*" + - "structopt*" + - "clap*" + - "atty*" + flamegraph: + patterns: + - "tracing-flame" + - "inferno" + serde: + patterns: + - "serde*" + futures: + patterns: + - "futures*" + sentry: + patterns: + - "sentry*" + metrics: + patterns: + - "metrics*" + bitflags: + patterns: + - "bitflags*" + jsonrpc: + patterns: + - "jsonrpc*" + - "serde_json" + rand: + patterns: + - "rand*" + pin-project: + patterns: + - "pin-project*" + proptest: + patterns: + - "proptest*" - package-ecosystem: github-actions directory: '/' schedule: - interval: daily + # tj-actions/changed-files often updates daily, which is too much for us + interval: weekly timezone: America/New_York - open-pull-requests-limit: 10 + open-pull-requests-limit: 6 labels: - 'C-trivial' - 'A-devops' diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 02c5e08b958..c75280be503 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -178,10 +178,13 @@ jobs: if: ${{ needs.changed-files.outputs.workflows == 'true' }} steps: - uses: actions/checkout@v3.5.3 - - uses: reviewdog/action-actionlint@v1.37.1 + - name: actionlint + uses: reviewdog/action-actionlint@v1.37.1 with: level: warning fail_on_error: false + - name: validate-dependabot + uses: marocchino/validate-dependabot@v2.1.0 codespell: runs-on: ubuntu-latest From 8c5bcb7487613ed72f992138aee1f76ad0f911d1 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 10 Jul 2023 03:37:10 +0000 Subject: [PATCH 200/265] build(deps): bump thiserror from 1.0.41 to 1.0.43 (#7174) Bumps [thiserror](https://github.com/dtolnay/thiserror) from 1.0.41 to 1.0.43. - [Release notes](https://github.com/dtolnay/thiserror/releases) - [Commits](https://github.com/dtolnay/thiserror/compare/1.0.41...1.0.43) --- updated-dependencies: - dependency-name: thiserror dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 8 ++++---- zebra-chain/Cargo.toml | 2 +- zebra-consensus/Cargo.toml | 2 +- zebra-network/Cargo.toml | 2 +- zebra-rpc/Cargo.toml | 2 +- zebra-script/Cargo.toml | 2 +- zebra-state/Cargo.toml | 2 +- zebra-test/Cargo.toml | 2 +- zebra-utils/Cargo.toml | 2 +- zebrad/Cargo.toml | 2 +- 10 files changed, 13 insertions(+), 13 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ef5001d66d4..7336b53998e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4443,18 +4443,18 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.41" +version = "1.0.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c16a64ba9387ef3fdae4f9c1a7f07a0997fce91985c0336f1ddc1822b3b37802" +checksum = "a35fc5b8971143ca348fa6df4f024d4d55264f3468c71ad1c2f365b0a4d58c42" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.41" +version = "1.0.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d14928354b01c4d6a4f0e549069adef399a284e7995c7ccca94e8a07a5346c59" +checksum = "463fe12d7993d3b327787537ce8dd4dfa058de32fc2b195ef3cde03dc4771e8f" dependencies = [ "proc-macro2 1.0.63", "quote 1.0.29", diff --git a/zebra-chain/Cargo.toml b/zebra-chain/Cargo.toml index 05acba6e6cc..dd8c91dab13 100644 --- a/zebra-chain/Cargo.toml +++ b/zebra-chain/Cargo.toml @@ -85,7 +85,7 @@ humantime = "2.1.0" # Error Handling & Formatting displaydoc = "0.2.4" static_assertions = "1.1.0" -thiserror = "1.0.41" +thiserror = "1.0.43" tracing = "0.1.37" # Serialization diff --git a/zebra-consensus/Cargo.toml b/zebra-consensus/Cargo.toml index 365b99d6038..4e825ccc8de 100644 --- a/zebra-consensus/Cargo.toml +++ b/zebra-consensus/Cargo.toml @@ -52,7 +52,7 @@ serde = { version = "1.0.166", features = ["serde_derive"] } futures = "0.3.28" futures-util = "0.3.28" metrics = "0.21.1" -thiserror = "1.0.41" +thiserror = "1.0.43" tokio = { version = "1.29.1", features = ["time", "sync", "tracing", "rt-multi-thread"] } tower = { version = "0.4.13", features = ["timeout", "util", "buffer"] } tracing = "0.1.37" diff --git a/zebra-network/Cargo.toml b/zebra-network/Cargo.toml index 6db3e1c8daf..d0c6bc80f02 100644 --- a/zebra-network/Cargo.toml +++ b/zebra-network/Cargo.toml @@ -58,7 +58,7 @@ rayon = "1.7.0" regex = "1.8.4" serde = { version = "1.0.166", features = ["serde_derive"] } tempfile = "3.5.0" -thiserror = "1.0.41" +thiserror = "1.0.43" futures = "0.3.28" tokio = { version = "1.29.1", features = ["fs", "io-util", "net", "time", "tracing", "macros", "rt-multi-thread"] } diff --git a/zebra-rpc/Cargo.toml b/zebra-rpc/Cargo.toml index 1caa8be6658..742a764c2cf 100644 --- a/zebra-rpc/Cargo.toml +++ b/zebra-rpc/Cargo.toml @@ -82,7 +82,7 @@ insta = { version = "1.30.0", features = ["redactions", "json", "ron"] } proptest = "1.2.0" -thiserror = "1.0.41" +thiserror = "1.0.43" tokio = { version = "1.29.1", features = ["full", "tracing", "test-util"] } zebra-chain = { path = "../zebra-chain", features = ["proptest-impl"] } diff --git a/zebra-script/Cargo.toml b/zebra-script/Cargo.toml index 6efe74fd543..eb284f9f957 100644 --- a/zebra-script/Cargo.toml +++ b/zebra-script/Cargo.toml @@ -19,7 +19,7 @@ zcash_script = "0.1.12" zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.27" } -thiserror = "1.0.41" +thiserror = "1.0.43" displaydoc = "0.2.4" [dev-dependencies] diff --git a/zebra-state/Cargo.toml b/zebra-state/Cargo.toml index 6c4761ae464..f91dc582d04 100644 --- a/zebra-state/Cargo.toml +++ b/zebra-state/Cargo.toml @@ -59,7 +59,7 @@ rocksdb = { version = "0.21.0", default-features = false, features = ["lz4"] } semver = "1.0.17" serde = { version = "1.0.166", features = ["serde_derive"] } tempfile = "3.5.0" -thiserror = "1.0.41" +thiserror = "1.0.43" rayon = "1.7.0" tokio = { version = "1.29.1", features = ["rt-multi-thread", "sync", "tracing"] } diff --git a/zebra-test/Cargo.toml b/zebra-test/Cargo.toml index 41706bf7d40..d32993376ca 100644 --- a/zebra-test/Cargo.toml +++ b/zebra-test/Cargo.toml @@ -36,7 +36,7 @@ tinyvec = { version = "1.6.0", features = ["rustc_1_55"] } humantime = "2.1.0" owo-colors = "3.5.0" spandoc = "0.2.2" -thiserror = "1.0.41" +thiserror = "1.0.43" tracing-subscriber = { version = "0.3.17", features = ["env-filter"] } tracing-error = "0.2.0" diff --git a/zebra-utils/Cargo.toml b/zebra-utils/Cargo.toml index a9598a94a04..0eb5ddf6ab9 100644 --- a/zebra-utils/Cargo.toml +++ b/zebra-utils/Cargo.toml @@ -72,7 +72,7 @@ hex = "0.4.3" serde_json = "1.0.100" tracing-error = "0.2.0" tracing-subscriber = "0.3.17" -thiserror = "1.0.41" +thiserror = "1.0.43" zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.27" } zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.27" } diff --git a/zebrad/Cargo.toml b/zebrad/Cargo.toml index a0cc68e82b1..78f63c1770f 100644 --- a/zebrad/Cargo.toml +++ b/zebrad/Cargo.toml @@ -173,7 +173,7 @@ color-eyre = { version = "0.6.2", default-features = false, features = ["issue-u # Enable a feature that makes tinyvec compile much faster. tinyvec = { version = "1.6.0", features = ["rustc_1_55"] } -thiserror = "1.0.41" +thiserror = "1.0.43" tracing-subscriber = { version = "0.3.17", features = ["env-filter"] } tracing-appender = "0.2.2" From 776c7c7cd2b18b96027c23a3bc73f203b2b288b9 Mon Sep 17 00:00:00 2001 From: Marek Date: Mon, 10 Jul 2023 08:07:32 +0200 Subject: [PATCH 201/265] Enable RPC port in general scenarios (#7177) --- docker/runtime-entrypoint.sh | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/docker/runtime-entrypoint.sh b/docker/runtime-entrypoint.sh index 596cef19ac3..b5c58f2c6a4 100755 --- a/docker/runtime-entrypoint.sh +++ b/docker/runtime-entrypoint.sh @@ -31,12 +31,15 @@ fi : "${TRACING_ENDPOINT_PORT:=3000}" # [rpc] : "${RPC_LISTEN_ADDR:=0.0.0.0}" +if [[ -z "${RPC_PORT}" ]]; then +if [[ " ${FEATURES} " =~ " getblocktemplate-rpcs " ]]; then if [[ "${NETWORK}" = "Mainnet" ]]; then : "${RPC_PORT:=8232}" elif [[ "${NETWORK}" = "Testnet" ]]; then : "${RPC_PORT:=18232}" fi - +fi +fi # Populate `zebrad.toml` before starting zebrad, using the environmental # variables set by the Dockerfile or the user. If the user has already created a config, don't replace it. @@ -65,9 +68,8 @@ endpoint_addr = "${METRICS_ENDPOINT_ADDR}:${METRICS_ENDPOINT_PORT}" EOF fi -# Set this to enable the RPC port -if [[ " $FEATURES " =~ " getblocktemplate-rpcs " ]]; then # spaces are important here to avoid partial matches -cat <> "$ZEBRA_CONF_PATH" +if [[ "${RPC_PORT}" ]]; then +cat <> "${ZEBRA_CONF_PATH}" [rpc] listen_addr = "${RPC_LISTEN_ADDR}:${RPC_PORT}" EOF From 0d332646bfb86abc212ac7e7cce9c474c76d936f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 10 Jul 2023 08:27:21 +0000 Subject: [PATCH 202/265] build(deps): bump the serde group with 1 update (#7184) Bumps the serde group with 1 update: [serde](https://github.com/serde-rs/serde). - [Release notes](https://github.com/serde-rs/serde/releases) - [Commits](https://github.com/serde-rs/serde/compare/v1.0.166...v1.0.168) --- updated-dependencies: - dependency-name: serde dependency-type: direct:production update-type: version-update:semver-patch dependency-group: serde ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 8 ++++---- zebra-chain/Cargo.toml | 2 +- zebra-consensus/Cargo.toml | 2 +- zebra-network/Cargo.toml | 2 +- zebra-node-services/Cargo.toml | 4 ++-- zebra-rpc/Cargo.toml | 2 +- zebra-state/Cargo.toml | 2 +- zebrad/Cargo.toml | 2 +- 8 files changed, 12 insertions(+), 12 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7336b53998e..471aa6c0e41 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4060,9 +4060,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.166" +version = "1.0.168" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d01b7404f9d441d3ad40e6a636a7782c377d2abdbe4fa2440e2edcc2f4f10db8" +checksum = "d614f89548720367ded108b3c843be93f3a341e22d5674ca0dd5cd57f34926af" dependencies = [ "serde_derive", ] @@ -4078,9 +4078,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.166" +version = "1.0.168" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5dd83d6dde2b6b2d466e14d9d1acce8816dedee94f735eac6395808b3483c6d6" +checksum = "d4fe589678c688e44177da4f27152ee2d190757271dc7f1d5b6b9f68d869d641" dependencies = [ "proc-macro2 1.0.63", "quote 1.0.29", diff --git a/zebra-chain/Cargo.toml b/zebra-chain/Cargo.toml index dd8c91dab13..95acadbaf5f 100644 --- a/zebra-chain/Cargo.toml +++ b/zebra-chain/Cargo.toml @@ -90,7 +90,7 @@ tracing = "0.1.37" # Serialization hex = { version = "0.4.3", features = ["serde"] } -serde = { version = "1.0.166", features = ["serde_derive", "rc"] } +serde = { version = "1.0.168", features = ["serde_derive", "rc"] } serde_with = "3.0.0" serde-big-array = "0.5.1" diff --git a/zebra-consensus/Cargo.toml b/zebra-consensus/Cargo.toml index 4e825ccc8de..639a27de3c2 100644 --- a/zebra-consensus/Cargo.toml +++ b/zebra-consensus/Cargo.toml @@ -47,7 +47,7 @@ chrono = { version = "0.4.26", default-features = false, features = ["clock", "s displaydoc = "0.2.4" lazy_static = "1.4.0" once_cell = "1.18.0" -serde = { version = "1.0.166", features = ["serde_derive"] } +serde = { version = "1.0.168", features = ["serde_derive"] } futures = "0.3.28" futures-util = "0.3.28" diff --git a/zebra-network/Cargo.toml b/zebra-network/Cargo.toml index d0c6bc80f02..a1cfb0af29e 100644 --- a/zebra-network/Cargo.toml +++ b/zebra-network/Cargo.toml @@ -56,7 +56,7 @@ pin-project = "1.1.2" rand = "0.8.5" rayon = "1.7.0" regex = "1.8.4" -serde = { version = "1.0.166", features = ["serde_derive"] } +serde = { version = "1.0.168", features = ["serde_derive"] } tempfile = "3.5.0" thiserror = "1.0.43" diff --git a/zebra-node-services/Cargo.toml b/zebra-node-services/Cargo.toml index 444710708c5..694636b959c 100644 --- a/zebra-node-services/Cargo.toml +++ b/zebra-node-services/Cargo.toml @@ -44,7 +44,7 @@ color-eyre = { version = "0.6.2", optional = true } jsonrpc-core = { version = "18.0.0", optional = true } # Security: avoid default dependency on openssl reqwest = { version = "0.11.18", default-features = false, features = ["rustls-tls"], optional = true } -serde = { version = "1.0.166", optional = true } +serde = { version = "1.0.168", optional = true } serde_json = { version = "1.0.100", optional = true } [dev-dependencies] @@ -52,5 +52,5 @@ serde_json = { version = "1.0.100", optional = true } color-eyre = "0.6.2" jsonrpc-core = "18.0.0" reqwest = { version = "0.11.18", default-features = false, features = ["rustls-tls"] } -serde = "1.0.166" +serde = "1.0.168" serde_json = "1.0.100" diff --git a/zebra-rpc/Cargo.toml b/zebra-rpc/Cargo.toml index 742a764c2cf..c387f31cf39 100644 --- a/zebra-rpc/Cargo.toml +++ b/zebra-rpc/Cargo.toml @@ -60,7 +60,7 @@ tower = "0.4.13" tracing = "0.1.37" hex = { version = "0.4.3", features = ["serde"] } -serde = { version = "1.0.166", features = ["serde_derive"] } +serde = { version = "1.0.168", features = ["serde_derive"] } # Experimental feature getblocktemplate-rpcs rand = { version = "0.8.5", optional = true } diff --git a/zebra-state/Cargo.toml b/zebra-state/Cargo.toml index f91dc582d04..19a11161654 100644 --- a/zebra-state/Cargo.toml +++ b/zebra-state/Cargo.toml @@ -57,7 +57,7 @@ regex = "1.8.4" rlimit = "0.10.0" rocksdb = { version = "0.21.0", default-features = false, features = ["lz4"] } semver = "1.0.17" -serde = { version = "1.0.166", features = ["serde_derive"] } +serde = { version = "1.0.168", features = ["serde_derive"] } tempfile = "3.5.0" thiserror = "1.0.43" diff --git a/zebrad/Cargo.toml b/zebrad/Cargo.toml index 78f63c1770f..908a4583ada 100644 --- a/zebrad/Cargo.toml +++ b/zebrad/Cargo.toml @@ -159,7 +159,7 @@ humantime-serde = "1.1.1" indexmap = "2.0.0" lazy_static = "1.4.0" semver = "1.0.17" -serde = { version = "1.0.166", features = ["serde_derive"] } +serde = { version = "1.0.168", features = ["serde_derive"] } toml = "0.7.6" futures = "0.3.28" From 32dd6180de7464c50432faac973001579eacae73 Mon Sep 17 00:00:00 2001 From: Marek Date: Mon, 10 Jul 2023 12:46:52 +0200 Subject: [PATCH 203/265] add(Docker): Enable miners to set their address for mining rewards. (#7178) * Enable RPC port in general scenarios * Add `mining.miner_address` to runtime entrypoint --- docker/runtime-entrypoint.sh | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/docker/runtime-entrypoint.sh b/docker/runtime-entrypoint.sh index b5c58f2c6a4..c111579aac7 100755 --- a/docker/runtime-entrypoint.sh +++ b/docker/runtime-entrypoint.sh @@ -105,6 +105,13 @@ use_color = false EOF fi fi + +if [[ -n "$MINER_ADDRESS" ]]; then +cat <> "$ZEBRA_CONF_PATH" +[mining] +miner_address = "${MINER_ADDRESS}" +EOF +fi fi echo "Using zebrad.toml:" From c4321083e77fd9eae8281675546e6530a8fd84f7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 10 Jul 2023 10:47:10 +0000 Subject: [PATCH 204/265] build(deps): bump tj-actions/changed-files from 37.0.5 to 37.1.0 (#7183) Bumps [tj-actions/changed-files](https://github.com/tj-actions/changed-files) from 37.0.5 to 37.1.0. - [Release notes](https://github.com/tj-actions/changed-files/releases) - [Changelog](https://github.com/tj-actions/changed-files/blob/main/HISTORY.md) - [Commits](https://github.com/tj-actions/changed-files/compare/v37.0.5...v37.1.0) --- updated-dependencies: - dependency-name: tj-actions/changed-files dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/lint.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index c75280be503..434d29b32ff 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -37,7 +37,7 @@ jobs: - name: Rust files id: changed-files-rust - uses: tj-actions/changed-files@v37.0.5 + uses: tj-actions/changed-files@v37.1.0 with: files: | **/*.rs @@ -49,7 +49,7 @@ jobs: - name: Workflow files id: changed-files-workflows - uses: tj-actions/changed-files@v37.0.5 + uses: tj-actions/changed-files@v37.1.0 with: files: | .github/workflows/*.yml From da07b11f1b31a1115c735229ad5636671f4b4e3c Mon Sep 17 00:00:00 2001 From: teor Date: Tue, 11 Jul 2023 04:20:50 +1000 Subject: [PATCH 205/265] fix(ci): Pass the network input to all cached state tests (#7186) * Pass the network input to all cached state tests * Check the network is correct before running tests --- .../continous-integration-docker.yml | 18 +++- .github/workflows/deploy-gcp-tests.yml | 84 +++++++++++++++++-- 2 files changed, 95 insertions(+), 7 deletions(-) diff --git a/.github/workflows/continous-integration-docker.yml b/.github/workflows/continous-integration-docker.yml index 902218ef266..aa08a79f874 100644 --- a/.github/workflows/continous-integration-docker.yml +++ b/.github/workflows/continous-integration-docker.yml @@ -135,6 +135,9 @@ jobs: # zebrad tests without cached state + # TODO: make the non-cached-state tests use: + # network: ${{ inputs.network || vars.ZCASH_NETWORK }} + # Run all the zebra tests, including tests that are ignored by default. # Skips tests that need a cached state disk or a lightwalletd binary. # @@ -309,6 +312,9 @@ jobs: env: ZEBRA_CONF_PATH: 'zebrad/tests/common/configs/v1.0.0-rc.2.toml' + # END TODO: make the non-cached-state tests use: + # network: ${{ inputs.network || vars.ZCASH_NETWORK }} + # zebrad cached checkpoint state tests # Regenerate mandatory checkpoint Zebra cached state disks. @@ -328,6 +334,7 @@ jobs: test_id: sync-to-checkpoint test_description: Test sync up to mandatory checkpoint test_variables: '-e TEST_DISK_REBUILD=1 -e ZEBRA_FORCE_USE_COLOR=1' + network: ${{ inputs.network || vars.ZCASH_NETWORK }} needs_zebra_state: false saves_to_disk: true disk_suffix: checkpoint @@ -356,6 +363,7 @@ jobs: test_id: sync-past-checkpoint test_description: Test full validation sync from a cached state test_variables: '-e TEST_CHECKPOINT_SYNC=1 -e ZEBRA_FORCE_USE_COLOR=1' + network: ${{ inputs.network || vars.ZCASH_NETWORK }} needs_zebra_state: true saves_to_disk: false disk_suffix: checkpoint @@ -427,6 +435,7 @@ jobs: test_id: update-to-tip test_description: Test syncing to tip with a Zebra tip state test_variables: '-e TEST_UPDATE_SYNC=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache' + network: ${{ inputs.network || vars.ZCASH_NETWORK }} needs_zebra_state: true # update the disk on every PR, to increase CI speed saves_to_disk: true @@ -572,7 +581,8 @@ jobs: test_id: lwd-full-sync test_description: Test lightwalletd full sync test_variables: '-e TEST_LWD_FULL_SYNC=1 -e ZEBRA_TEST_LIGHTWALLETD=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache -e LIGHTWALLETD_DATA_DIR=/var/cache/lwd-cache' - # This test runs for (just) longer than 6 hours, so it needs multiple jobs + network: ${{ inputs.network || vars.ZCASH_NETWORK }} + # This test runs for longer than 6 hours, so it needs multiple jobs is_long_test: true needs_zebra_state: true needs_lwd_state: false @@ -610,6 +620,7 @@ jobs: test_id: lwd-update-sync test_description: Test lightwalletd update sync with both states test_variables: '-e TEST_LWD_UPDATE_SYNC=1 -e ZEBRA_TEST_LIGHTWALLETD=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache -e LIGHTWALLETD_DATA_DIR=/var/cache/lwd-cache' + network: ${{ inputs.network || vars.ZCASH_NETWORK }} needs_zebra_state: true needs_lwd_state: true # since we do a full sync in every PR, the new cached state will only be a few minutes newer than the original one @@ -642,6 +653,7 @@ jobs: test_id: fully-synced-rpc test_description: Test lightwalletd RPC with a Zebra tip state test_variables: '-e TEST_LWD_RPC_CALL=1 -e ZEBRA_TEST_LIGHTWALLETD=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache' + network: ${{ inputs.network || vars.ZCASH_NETWORK }} needs_zebra_state: true saves_to_disk: false disk_suffix: tip @@ -667,6 +679,7 @@ jobs: test_id: lwd-send-transactions test_description: Test sending transactions via lightwalletd test_variables: '-e TEST_LWD_TRANSACTIONS=1 -e ZEBRA_TEST_LIGHTWALLETD=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache -e LIGHTWALLETD_DATA_DIR=/var/cache/lwd-cache' + network: ${{ inputs.network || vars.ZCASH_NETWORK }} needs_zebra_state: true needs_lwd_state: true saves_to_disk: false @@ -694,6 +707,7 @@ jobs: test_id: lwd-grpc-wallet test_description: Test gRPC calls via lightwalletd test_variables: '-e TEST_LWD_GRPC=1 -e ZEBRA_TEST_LIGHTWALLETD=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache -e LIGHTWALLETD_DATA_DIR=/var/cache/lwd-cache' + network: ${{ inputs.network || vars.ZCASH_NETWORK }} needs_zebra_state: true needs_lwd_state: true saves_to_disk: false @@ -725,6 +739,7 @@ jobs: test_id: get-block-template test_description: Test getblocktemplate RPC method via Zebra's rpc server test_variables: '-e TEST_GET_BLOCK_TEMPLATE=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache' + network: ${{ inputs.network || vars.ZCASH_NETWORK }} needs_zebra_state: true needs_lwd_state: false saves_to_disk: false @@ -751,6 +766,7 @@ jobs: test_id: submit-block test_description: Test submitting blocks via Zebra's rpc server test_variables: '-e TEST_SUBMIT_BLOCK=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache' + network: ${{ inputs.network || vars.ZCASH_NETWORK }} needs_zebra_state: true needs_lwd_state: false saves_to_disk: false diff --git a/.github/workflows/deploy-gcp-tests.yml b/.github/workflows/deploy-gcp-tests.yml index 263f7e1e85b..3f92e30ba6f 100644 --- a/.github/workflows/deploy-gcp-tests.yml +++ b/.github/workflows/deploy-gcp-tests.yml @@ -570,15 +570,16 @@ jobs: ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} \ " - # follow the logs of the test we just launched, up to Canopy activation (or the test finishing) + + # check the logs of the test we just launched for zebrad startup messages # - # If `inputs.is_long_test` is `false`, this job is skipped. - logs-heartwood: - name: Log ${{ inputs.test_id }} test (heartwood) + # this step makes sure `zebrad` is running, and configured for `inputs.network`. + logs-startup: + name: Check startup for ${{ inputs.test_id }} # We run exactly one of without-cached-state or with-cached-state, and we always skip the other one. needs: [ launch-with-cached-state, launch-without-cached-state ] # If the previous job fails, we still want to show the logs. - if: ${{ !cancelled() && inputs.is_long_test }} + if: ${{ !cancelled() }} runs-on: ubuntu-latest permissions: contents: 'read' @@ -618,13 +619,84 @@ jobs: uses: google-github-actions/setup-gcloud@v1.1.1 # Show all the logs since the container launched, - # following until Canopy activation (or the test finishes) + # following until we see zebrad startup messages. + # + # This check limits the number of log lines, so tests running on the wrong network don't + # run until the job timeout. If Zebra does a complete recompile, there are a few hundred log + # lines before the startup logs. So that's what we use here. # # The log pipeline ignores the exit status of `docker logs`. # It also ignores the expected 'broken pipe' error from `tee`, # which happens when `grep` finds a matching output and moves on to the next job. # # Errors in the tests are caught by the final test status job. + - name: Check startup logs for ${{ inputs.test_id }} + run: | + gcloud compute ssh ${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }} \ + --zone ${{ vars.GCP_ZONE }} \ + --ssh-flag="-o ServerAliveInterval=5" \ + --ssh-flag="-o ConnectionAttempts=20" \ + --ssh-flag="-o ConnectTimeout=5" \ + --command \ + "\ + sudo docker logs \ + --tail all \ + --follow \ + ${{ inputs.test_id }} | \ + head -700 | \ + tee --output-error=exit /dev/stderr | \ + grep --max-count=1 --extended-regexp --color=always \ + -e 'Zcash network: ${{ inputs.network }}' \ + " + + # follow the logs of the test we just launched, up to Canopy activation (or the test finishing) + # + # If `inputs.is_long_test` is `false`, this job is skipped. + logs-heartwood: + name: Log ${{ inputs.test_id }} test (heartwood) + needs: [ logs-startup ] + # If the previous job fails, we still want to show the logs. + if: ${{ !cancelled() && inputs.is_long_test }} + runs-on: ubuntu-latest + permissions: + contents: 'read' + id-token: 'write' + steps: + - uses: actions/checkout@v3.5.3 + with: + persist-credentials: false + fetch-depth: '2' + + - name: Inject slug/short variables + uses: rlespinasse/github-slug-action@v4 + with: + short-length: 7 + + # Install our SSH secret + - name: Install private SSH key + uses: shimataro/ssh-key-action@v2.5.1 + with: + key: ${{ secrets.GCP_SSH_PRIVATE_KEY }} + name: google_compute_engine + known_hosts: unnecessary + + - name: Generate public SSH key + run: ssh-keygen -y -f ~/.ssh/google_compute_engine > ~/.ssh/google_compute_engine.pub + + # Setup gcloud CLI + - name: Authenticate to Google Cloud + id: auth + uses: google-github-actions/auth@v1.1.1 + with: + retries: '3' + workload_identity_provider: '${{ vars.GCP_WIF }}' + service_account: '${{ vars.GCP_DEPLOYMENTS_SA }}' + + - name: Set up Cloud SDK + uses: google-github-actions/setup-gcloud@v1.1.1 + + # Show all the logs since the container launched, + # following until Canopy activation (or the test finishes) - name: Show logs for ${{ inputs.test_id }} test (heartwood) run: | gcloud compute ssh ${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }} \ From d3cc91e59481212d4f21c153a62a523ed14059c8 Mon Sep 17 00:00:00 2001 From: teor Date: Tue, 11 Jul 2023 09:34:15 +1000 Subject: [PATCH 206/265] change(merge): Require 2 reviews for PRs with an extra-reviews label (#7158) Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> --- .github/mergify.yml | 65 ++++++++++++++++++++++++++++--------- .github/release-drafter.yml | 43 +++++++++++++----------- 2 files changed, 73 insertions(+), 35 deletions(-) diff --git a/.github/mergify.yml b/.github/mergify.yml index 11cf9aeecf2..a0a5b7e282d 100644 --- a/.github/mergify.yml +++ b/.github/mergify.yml @@ -29,34 +29,67 @@ queue_rules: conditions: - base=main +# These rules are checked in order, the first one to be satisfied applies pull_request_rules: - - name: move to urgent queue when CI passes with 1 review and not WIP targeting main + - name: move to urgent queue when CI passes with multiple reviews conditions: - # This queue handles a PR if: - # - it targets main - # - is not in draft - # - does not include the do-not-merge label - # - is labeled with Critical priority + # This queue handles a PR if it: + # has multiple approving reviewers + - "#approved-reviews-by>=2" + # is labeled with Critical priority + - 'label~=^P-Critical' + # and satisfies the standard merge conditions: + # targets main - base=main + # is not in draft - -draft + # does not include the do-not-merge label - label!=do-not-merge + actions: + queue: + name: urgent + method: squash + + - name: move to urgent queue when CI passes with 1 review + conditions: + # This queue handles a PR if it: + # has at least one approving reviewer (branch protection rule) + # does not need extra reviews + - 'label!=extra-reviews' + # is labeled with Critical priority - 'label~=^P-Critical' + # and satisfies the standard merge conditions: + - base=main + - -draft + - label!=do-not-merge actions: queue: name: urgent method: squash - - name: move to batched queue when CI passes with 1 review and not WIP targeting main + - name: move to batched queue when CI passes with multiple reviews + conditions: + # This queue handles a PR if it: + # has multiple approving reviewers + - "#approved-reviews-by>=2" + # is labeled with any other priority (rules are checked in order) + # and satisfies the standard merge conditions: + - base=main + - -draft + - label!=do-not-merge + actions: + queue: + name: batched + method: squash + + - name: move to batched queue when CI passes with 1 review conditions: - # This queue handles a PR if: - # - it targets main - # - is not in draft - # - does not include the do-not-merge label - # - is labeled with any other priority except Critical, or does not have a priority label, - # including automated dependabot PRs. - # - # We don't need to check priority labels here, because the rules are evaluated in order: - # https://docs.mergify.com/configuration/#pull-request-rules + # This queue handles a PR if it: + # has at least one approving reviewer (branch protection rule) + # does not need extra reviews + - 'label!=extra-reviews' + # is labeled with any other priority (rules are checked in order) + # and satisfies the standard merge conditions: - base=main - -draft - label!=do-not-merge diff --git a/.github/release-drafter.yml b/.github/release-drafter.yml index 65e55e76cf3..f291980b693 100644 --- a/.github/release-drafter.yml +++ b/.github/release-drafter.yml @@ -11,34 +11,33 @@ autolabeler: - '/secur/i' title: - '/secur/i' - - label: 'C-removed' + - '/crash/i' + - '/destr/i' + - '/unsafe/i' + - label: 'C-deprecated' branch: - - '/remov/i' + - '/deprecat/i' title: - - '/remov/i' - - label: 'C-deprecated' + - '/deprecat/i' + - label: 'extra-reviews' branch: + - '/remov/i' - '/deprecat/i' title: + - '/remov/i' - '/deprecat/i' + - '/crash/i' + - '/destr/i' + - '/unsafe/i' - label: 'C-feature' branch: - - '/add/i' - '/feat/i' title: - - '/add/i' - '/feat/i' - - label: 'C-enhancement' - branch: - - '/chang/i' - title: - - '/chang/i' - label: 'C-bug' branch: - - '/fix/i' - '/bug/i' title: - - '/fix/i' - '/bug/i' # Changes that are almost always trivial for users - label: 'C-trivial' @@ -46,16 +45,24 @@ autolabeler: - '/clean/i' - '/chore/i' - '/clippy/i' + - '/test/i' title: - '/clean/i' - '/chore/i' - '/clippy/i' + - '/test/i' + - '/(ci)/i' + - '/(cd)/i' + - '/job/i' + - '/patch/i' + - '/actions/i' files: # Regular changes that don't need to go in the CHANGELOG - 'CHANGELOG.md' - 'zebra-consensus/src/checkpoint/*-checkpoints.txt' # Developer-only changes - '.gitignore' + - '.dockerignore' # Test-only changes - 'zebra-test' - '.cargo/config.toml' @@ -80,8 +87,7 @@ categories: labels: - 'C-security' # Other labels that are usually security issues - - 'I-bad-code' - - 'I-bad-data' + - 'I-invalid-data' - 'I-consensus' - 'I-crash' - 'I-destructive' @@ -90,11 +96,10 @@ categories: - 'I-privacy' - 'I-remote-node-overload' - 'I-unbounded-growth' - - 'I-unsound' + - 'I-memory-safety' - title: 'Removed' labels: - 'C-removal' - - 'C-breaking' - title: 'Deprecated' labels: - 'C-deprecation' @@ -164,9 +169,9 @@ template: | ### Breaking Changes This release has the following breaking changes: - - *TODO*: Check the `Removed` section for any breaking changes + - *TODO*: Check the `Removed` and `Deprecated` sections for any breaking changes - *TODO*: Add a short description of the user impact of each breaking change, and any actions users need to take - + $CHANGES ### Contributors From cf1e07248ec8367f8efcfa41c100dae4f6c6395a Mon Sep 17 00:00:00 2001 From: teor Date: Wed, 12 Jul 2023 07:04:53 +1000 Subject: [PATCH 207/265] change(release): Document the release level for RPC and command-line changes (#7196) * Document the release level for RPC and command-line changes * Add RPCs and command-line arguments --- .github/PULL_REQUEST_TEMPLATE/release-checklist.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/PULL_REQUEST_TEMPLATE/release-checklist.md b/.github/PULL_REQUEST_TEMPLATE/release-checklist.md index b60584b88ae..d7cf1ffebb6 100644 --- a/.github/PULL_REQUEST_TEMPLATE/release-checklist.md +++ b/.github/PULL_REQUEST_TEMPLATE/release-checklist.md @@ -68,7 +68,7 @@ Zebra follows [semantic versioning](https://semver.org). Semantic versions look Choose a release level for `zebrad`. Release levels are based on user-visible changes from the changelog: - Mainnet Network Upgrades are `major` releases -- significant new features, large changes, deprecations, and removals are `minor` releases +- significant new features or behaviour changes; changes to RPCs, command-line, or configs; and deprecations or removals are `minor` releases - otherwise, it is a `patch` release Zebra's Rust API doesn't have any support or stability guarantees, so we keep all the `zebra-*` and `tower-*` crates on a beta `pre-release` version. From 80b1b227f30fc3478cafba87a9265afc402a7593 Mon Sep 17 00:00:00 2001 From: teor Date: Wed, 12 Jul 2023 19:33:51 +1000 Subject: [PATCH 208/265] Stop requiring testnet job for CI to pass (#7208) --- .github/workflows/continous-delivery.yml | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/.github/workflows/continous-delivery.yml b/.github/workflows/continous-delivery.yml index ba4d5272e9b..8c846874977 100644 --- a/.github/workflows/continous-delivery.yml +++ b/.github/workflows/continous-delivery.yml @@ -120,8 +120,7 @@ jobs: zebra_skip_ipv6_tests: '1' rust_log: info - # Test that Zebra works using the default config with the latest Zebra version, - # and test reconfiguring the docker image for testnet. + # Test that Zebra works using the default config with the latest Zebra version. test-configuration-file: name: Test Zebra CD Docker config file timeout-minutes: 15 @@ -161,6 +160,20 @@ jobs: fi exit "$EXIT_STATUS" + # Test reconfiguring the docker image for testnet. + test-configuration-file-testnet: + name: Test testnet Zebra CD Docker config file + timeout-minutes: 15 + runs-on: ubuntu-latest + needs: build + steps: + - uses: r7kamura/rust-problem-matchers@v1.3.0 + + - name: Inject slug/short variables + uses: rlespinasse/github-slug-action@v4 + with: + short-length: 7 + # Make sure Zebra can sync the genesis block on testnet - name: Run tests using a testnet config run: | From f9a48266adc789baa0d18d7ce78bee91e9014fa0 Mon Sep 17 00:00:00 2001 From: teor Date: Thu, 13 Jul 2023 03:07:10 +1000 Subject: [PATCH 209/265] change(test): Add a recalculate_root() method to trees for tests (#7204) * Add a recalculate_root() method to trees for tests * Rename test tree types to make them easier to change * Add TODOs for tests for old and new serialization formats * fix doc typos --------- Co-authored-by: Alfredo Garcia --- zebra-chain/src/orchard/tree.rs | 7 +- zebra-chain/src/sapling/tree.rs | 7 +- zebra-chain/src/sprout/tree.rs | 7 +- .../service/finalized_state/tests/vectors.rs | 74 +++++++++++++------ 4 files changed, 69 insertions(+), 26 deletions(-) diff --git a/zebra-chain/src/orchard/tree.rs b/zebra-chain/src/orchard/tree.rs index 0924b3d8445..c212033ac12 100644 --- a/zebra-chain/src/orchard/tree.rs +++ b/zebra-chain/src/orchard/tree.rs @@ -345,7 +345,7 @@ impl NoteCommitmentTree { Some(root) => root, None => { // Compute root and cache it. - let root = Root(self.inner.root().0); + let root = self.recalculate_root(); *write_root = Some(root); root } @@ -361,6 +361,11 @@ impl NoteCommitmentTree { .expect("a thread that previously held exclusive lock access panicked") } + /// Calculates and returns the current root of the tree, ignoring any caching. + pub fn recalculate_root(&self) -> Root { + Root(self.inner.root().0) + } + /// Get the Pallas-based Sinsemilla hash / root node of this merkle tree of /// note commitments. pub fn hash(&self) -> [u8; 32] { diff --git a/zebra-chain/src/sapling/tree.rs b/zebra-chain/src/sapling/tree.rs index 2b6feeb0d5e..06029731108 100644 --- a/zebra-chain/src/sapling/tree.rs +++ b/zebra-chain/src/sapling/tree.rs @@ -348,7 +348,7 @@ impl NoteCommitmentTree { Some(root) => root, None => { // Compute root and cache it. - let root = Root::try_from(self.inner.root().0).unwrap(); + let root = self.recalculate_root(); *write_root = Some(root); root } @@ -364,6 +364,11 @@ impl NoteCommitmentTree { .expect("a thread that previously held exclusive lock access panicked") } + /// Calculates and returns the current root of the tree, ignoring any caching. + pub fn recalculate_root(&self) -> Root { + Root::try_from(self.inner.root().0).unwrap() + } + /// Gets the Jubjub-based Pedersen hash of root node of this merkle tree of /// note commitments. pub fn hash(&self) -> [u8; 32] { diff --git a/zebra-chain/src/sprout/tree.rs b/zebra-chain/src/sprout/tree.rs index d28738be7d8..af1d964d123 100644 --- a/zebra-chain/src/sprout/tree.rs +++ b/zebra-chain/src/sprout/tree.rs @@ -282,7 +282,7 @@ impl NoteCommitmentTree { Some(root) => root, None => { // Compute root and cache it. - let root = Root(self.inner.root().0); + let root = self.recalculate_root(); *write_root = Some(root); root } @@ -298,6 +298,11 @@ impl NoteCommitmentTree { .expect("a thread that previously held exclusive lock access panicked") } + /// Calculates and returns the current root of the tree, ignoring any caching. + pub fn recalculate_root(&self) -> Root { + Root(self.inner.root().0) + } + /// Returns a hash of the Sprout note commitment tree root. pub fn hash(&self) -> [u8; 32] { self.root().into() diff --git a/zebra-state/src/service/finalized_state/tests/vectors.rs b/zebra-state/src/service/finalized_state/tests/vectors.rs index 078c0267596..8df81b66a89 100644 --- a/zebra-state/src/service/finalized_state/tests/vectors.rs +++ b/zebra-state/src/service/finalized_state/tests/vectors.rs @@ -4,20 +4,48 @@ //! We don't need to check empty trees, because the database format snapshot tests //! use empty trees. -use halo2::pasta::{group::ff::PrimeField, pallas}; use hex::FromHex; use rand::random; -use zebra_chain::{orchard, sapling, sprout}; +use halo2::pasta::{group::ff::PrimeField, pallas}; + +use zebra_chain::{ + orchard::tree::NoteCommitmentTree as OrchardNoteCommitmentTree, + sapling::tree::NoteCommitmentTree as SaplingNoteCommitmentTree, + sprout::{ + tree::NoteCommitmentTree as SproutNoteCommitmentTree, + NoteCommitment as SproutNoteCommitment, + }, +}; use crate::service::finalized_state::disk_format::{FromDisk, IntoDisk}; +// Currently, these tests check these structs are equal: +// * commitments -> tree struct +// * commitments -> tree struct -> serialize -> deserialize -> tree struct +// And these serialized formats are equal: +// * fixed serialized test vector +// * commitments -> tree struct -> serialize +// * commitments -> tree struct -> serialize -> deserialize -> tree struct -> serialize +// +// TODO: apply these tests to the new tree structs, and update the serialization format +// (keeping the tests for the old format is optional, because the tests below cover it) +// +// TODO: test that old and new serializations produce the same format: +// Tree roots built from the same commitments should match: +// * commitments -> old tree struct -> new tree struct -> un-cached root +// * commitments -> new tree struct -> un-cached root +// Even when serialized and deserialized: +// * commitments -> old tree struct -> old serialize -> old deserialize -> old tree struct -> new tree struct -> un-cached root +// * commitments -> new tree struct -> new serialize -> new deserialize -> new tree struct -> un-cached root +// * commitments -> new tree struct -> un-cached root + /// Check that the sprout tree database serialization format has not changed. #[test] fn sprout_note_commitment_tree_serialization() { let _init_guard = zebra_test::init(); - let mut incremental_tree = sprout::tree::NoteCommitmentTree::default(); + let mut incremental_tree = SproutNoteCommitmentTree::default(); // Some commitments from zebra-chain/src/sprout/tests/test_vectors.rs let hex_commitments = [ @@ -29,7 +57,7 @@ fn sprout_note_commitment_tree_serialization() { for (idx, cm_hex) in hex_commitments.iter().enumerate() { let bytes = <[u8; 32]>::from_hex(cm_hex).unwrap(); - let cm = sprout::NoteCommitment::from(bytes); + let cm = SproutNoteCommitment::from(bytes); incremental_tree.append(cm).unwrap(); if random() { info!(?idx, "randomly caching root for note commitment tree index"); @@ -48,7 +76,7 @@ fn sprout_note_commitment_tree_serialization() { let serialized_tree = incremental_tree.as_bytes(); assert_eq!(hex::encode(&serialized_tree), expected_serialized_tree_hex); - let deserialized_tree = sprout::tree::NoteCommitmentTree::from_bytes(&serialized_tree); + let deserialized_tree = SproutNoteCommitmentTree::from_bytes(&serialized_tree); // This check isn't enough to show that the entire struct is the same, because it just compares // the cached serialized/deserialized roots. (NoteCommitmentTree::eq() also just compares @@ -67,7 +95,7 @@ fn sprout_note_commitment_tree_serialization() { fn sprout_note_commitment_tree_serialization_one() { let _init_guard = zebra_test::init(); - let mut incremental_tree = sprout::tree::NoteCommitmentTree::default(); + let mut incremental_tree = SproutNoteCommitmentTree::default(); // Some commitments from zebra-chain/src/sprout/tests/test_vectors.rs let hex_commitments = ["836045484077cf6390184ea7cd48b460e2d0f22b2293b69633bb152314a692fb"]; @@ -75,7 +103,7 @@ fn sprout_note_commitment_tree_serialization_one() { for (idx, cm_hex) in hex_commitments.iter().enumerate() { let bytes = <[u8; 32]>::from_hex(cm_hex).unwrap(); - let cm = sprout::NoteCommitment::from(bytes); + let cm = SproutNoteCommitment::from(bytes); incremental_tree.append(cm).unwrap(); if random() { info!(?idx, "randomly caching root for note commitment tree index"); @@ -94,7 +122,7 @@ fn sprout_note_commitment_tree_serialization_one() { let serialized_tree = incremental_tree.as_bytes(); assert_eq!(hex::encode(&serialized_tree), expected_serialized_tree_hex); - let deserialized_tree = sprout::tree::NoteCommitmentTree::from_bytes(&serialized_tree); + let deserialized_tree = SproutNoteCommitmentTree::from_bytes(&serialized_tree); // This check isn't enough to show that the entire struct is the same, because it just compares // the cached serialized/deserialized roots. (NoteCommitmentTree::eq() also just compares @@ -117,7 +145,7 @@ fn sprout_note_commitment_tree_serialization_one() { fn sprout_note_commitment_tree_serialization_pow2() { let _init_guard = zebra_test::init(); - let mut incremental_tree = sprout::tree::NoteCommitmentTree::default(); + let mut incremental_tree = SproutNoteCommitmentTree::default(); // Some commitments from zebra-chain/src/sprout/tests/test_vectors.rs let hex_commitments = [ @@ -130,7 +158,7 @@ fn sprout_note_commitment_tree_serialization_pow2() { for (idx, cm_hex) in hex_commitments.iter().enumerate() { let bytes = <[u8; 32]>::from_hex(cm_hex).unwrap(); - let cm = sprout::NoteCommitment::from(bytes); + let cm = SproutNoteCommitment::from(bytes); incremental_tree.append(cm).unwrap(); if random() { info!(?idx, "randomly caching root for note commitment tree index"); @@ -149,7 +177,7 @@ fn sprout_note_commitment_tree_serialization_pow2() { let serialized_tree = incremental_tree.as_bytes(); assert_eq!(hex::encode(&serialized_tree), expected_serialized_tree_hex); - let deserialized_tree = sprout::tree::NoteCommitmentTree::from_bytes(&serialized_tree); + let deserialized_tree = SproutNoteCommitmentTree::from_bytes(&serialized_tree); // This check isn't enough to show that the entire struct is the same, because it just compares // the cached serialized/deserialized roots. (NoteCommitmentTree::eq() also just compares @@ -168,7 +196,7 @@ fn sprout_note_commitment_tree_serialization_pow2() { fn sapling_note_commitment_tree_serialization() { let _init_guard = zebra_test::init(); - let mut incremental_tree = sapling::tree::NoteCommitmentTree::default(); + let mut incremental_tree = SaplingNoteCommitmentTree::default(); // Some commitments from zebra-chain/src/sapling/tests/test_vectors.rs let hex_commitments = [ @@ -199,7 +227,7 @@ fn sapling_note_commitment_tree_serialization() { let serialized_tree = incremental_tree.as_bytes(); assert_eq!(hex::encode(&serialized_tree), expected_serialized_tree_hex); - let deserialized_tree = sapling::tree::NoteCommitmentTree::from_bytes(&serialized_tree); + let deserialized_tree = SaplingNoteCommitmentTree::from_bytes(&serialized_tree); // This check isn't enough to show that the entire struct is the same, because it just compares // the cached serialized/deserialized roots. (NoteCommitmentTree::eq() also just compares @@ -218,7 +246,7 @@ fn sapling_note_commitment_tree_serialization() { fn sapling_note_commitment_tree_serialization_one() { let _init_guard = zebra_test::init(); - let mut incremental_tree = sapling::tree::NoteCommitmentTree::default(); + let mut incremental_tree = SaplingNoteCommitmentTree::default(); // Some commitments from zebra-chain/src/sapling/tests/test_vectors.rs let hex_commitments = ["225747f3b5d5dab4e5a424f81f85c904ff43286e0f3fd07ef0b8c6a627b11458"]; @@ -245,7 +273,7 @@ fn sapling_note_commitment_tree_serialization_one() { let serialized_tree = incremental_tree.as_bytes(); assert_eq!(hex::encode(&serialized_tree), expected_serialized_tree_hex); - let deserialized_tree = sapling::tree::NoteCommitmentTree::from_bytes(&serialized_tree); + let deserialized_tree = SaplingNoteCommitmentTree::from_bytes(&serialized_tree); // This check isn't enough to show that the entire struct is the same, because it just compares // the cached serialized/deserialized roots. (NoteCommitmentTree::eq() also just compares @@ -268,7 +296,7 @@ fn sapling_note_commitment_tree_serialization_one() { fn sapling_note_commitment_tree_serialization_pow2() { let _init_guard = zebra_test::init(); - let mut incremental_tree = sapling::tree::NoteCommitmentTree::default(); + let mut incremental_tree = SaplingNoteCommitmentTree::default(); // Some commitments from zebra-chain/src/sapling/tests/test_vectors.rs let hex_commitments = [ @@ -304,7 +332,7 @@ fn sapling_note_commitment_tree_serialization_pow2() { let serialized_tree = incremental_tree.as_bytes(); assert_eq!(hex::encode(&serialized_tree), expected_serialized_tree_hex); - let deserialized_tree = sapling::tree::NoteCommitmentTree::from_bytes(&serialized_tree); + let deserialized_tree = SaplingNoteCommitmentTree::from_bytes(&serialized_tree); // This check isn't enough to show that the entire struct is the same, because it just compares // the cached serialized/deserialized roots. (NoteCommitmentTree::eq() also just compares @@ -323,7 +351,7 @@ fn sapling_note_commitment_tree_serialization_pow2() { fn orchard_note_commitment_tree_serialization() { let _init_guard = zebra_test::init(); - let mut incremental_tree = orchard::tree::NoteCommitmentTree::default(); + let mut incremental_tree = OrchardNoteCommitmentTree::default(); // Some commitments from zebra-chain/src/orchard/tests/tree.rs let commitments = [ @@ -364,7 +392,7 @@ fn orchard_note_commitment_tree_serialization() { let serialized_tree = incremental_tree.as_bytes(); assert_eq!(hex::encode(&serialized_tree), expected_serialized_tree_hex); - let deserialized_tree = orchard::tree::NoteCommitmentTree::from_bytes(&serialized_tree); + let deserialized_tree = OrchardNoteCommitmentTree::from_bytes(&serialized_tree); // This check isn't enough to show that the entire struct is the same, because it just compares // the cached serialized/deserialized roots. (NoteCommitmentTree::eq() also just compares @@ -383,7 +411,7 @@ fn orchard_note_commitment_tree_serialization() { fn orchard_note_commitment_tree_serialization_one() { let _init_guard = zebra_test::init(); - let mut incremental_tree = orchard::tree::NoteCommitmentTree::default(); + let mut incremental_tree = OrchardNoteCommitmentTree::default(); // Some commitments from zebra-chain/src/orchard/tests/tree.rs let commitments = [[ @@ -412,7 +440,7 @@ fn orchard_note_commitment_tree_serialization_one() { let serialized_tree = incremental_tree.as_bytes(); assert_eq!(hex::encode(&serialized_tree), expected_serialized_tree_hex); - let deserialized_tree = orchard::tree::NoteCommitmentTree::from_bytes(&serialized_tree); + let deserialized_tree = OrchardNoteCommitmentTree::from_bytes(&serialized_tree); // This check isn't enough to show that the entire struct is the same, because it just compares // the cached serialized/deserialized roots. (NoteCommitmentTree::eq() also just compares @@ -435,7 +463,7 @@ fn orchard_note_commitment_tree_serialization_one() { fn orchard_note_commitment_tree_serialization_pow2() { let _init_guard = zebra_test::init(); - let mut incremental_tree = orchard::tree::NoteCommitmentTree::default(); + let mut incremental_tree = OrchardNoteCommitmentTree::default(); // Some commitments from zebra-chain/src/orchard/tests/tree.rs let commitments = [ @@ -471,7 +499,7 @@ fn orchard_note_commitment_tree_serialization_pow2() { let serialized_tree = incremental_tree.as_bytes(); assert_eq!(hex::encode(&serialized_tree), expected_serialized_tree_hex); - let deserialized_tree = orchard::tree::NoteCommitmentTree::from_bytes(&serialized_tree); + let deserialized_tree = OrchardNoteCommitmentTree::from_bytes(&serialized_tree); // This check isn't enough to show that the entire struct is the same, because it just compares // the cached serialized/deserialized roots. (NoteCommitmentTree::eq() also just compares From 797df674cbd68ba230c92307c0420e0f6ebda289 Mon Sep 17 00:00:00 2001 From: Arya Date: Wed, 12 Jul 2023 17:30:07 -0400 Subject: [PATCH 210/265] cleanup(consensus): Avoid blocking threads by awaiting proof verification results from rayon in async context (#6887) * Replaces rayon::iter::once with spawn_fifo * Removes spawn_blocking in flush_spawning methods * Logs warning and returns error for RecvErrors * Uses BoxError in proof verifiers * Adds async spawn_fifo fns * Updates verify_single_spawning and flush_spawning methods to use new async spawn_fifo fns * Removes outdated TODOs and docs. * removes outdated TODO --------- Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> --- zebra-consensus/src/primitives.rs | 38 +++++++++++++ zebra-consensus/src/primitives/ed25519.rs | 48 ++++++---------- zebra-consensus/src/primitives/groth16.rs | 62 +++++++++------------ zebra-consensus/src/primitives/halo2.rs | 57 +++++++------------ zebra-consensus/src/primitives/redjubjub.rs | 48 ++++++---------- zebra-consensus/src/primitives/redpallas.rs | 48 ++++++---------- 6 files changed, 135 insertions(+), 166 deletions(-) diff --git a/zebra-consensus/src/primitives.rs b/zebra-consensus/src/primitives.rs index 333ff1156f9..e3ab3a4f865 100644 --- a/zebra-consensus/src/primitives.rs +++ b/zebra-consensus/src/primitives.rs @@ -1,5 +1,9 @@ //! Asynchronous verification of cryptographic primitives. +use tokio::sync::oneshot::error::RecvError; + +use crate::BoxError; + pub mod ed25519; pub mod groth16; pub mod halo2; @@ -11,3 +15,37 @@ const MAX_BATCH_SIZE: usize = 64; /// The maximum latency bound for any of the batch verifiers. const MAX_BATCH_LATENCY: std::time::Duration = std::time::Duration::from_millis(100); + +/// Fires off a task into the Rayon threadpool, awaits the result through a oneshot channel, +/// then converts the error to a [`BoxError`]. +pub async fn spawn_fifo_and_convert< + E: 'static + std::error::Error + Into + Sync + Send, + F: 'static + FnOnce() -> Result<(), E> + Send, +>( + f: F, +) -> Result<(), BoxError> { + spawn_fifo(f) + .await + .map_err(|_| { + "threadpool unexpectedly dropped response channel sender. Is Zebra shutting down?" + })? + .map_err(BoxError::from) +} + +/// Fires off a task into the Rayon threadpool and awaits the result through a oneshot channel. +pub async fn spawn_fifo< + E: 'static + std::error::Error + Sync + Send, + F: 'static + FnOnce() -> Result<(), E> + Send, +>( + f: F, +) -> Result, RecvError> { + // Rayon doesn't have a spawn function that returns a value, + // so we use a oneshot channel instead. + let (rsp_tx, rsp_rx) = tokio::sync::oneshot::channel(); + + rayon::spawn_fifo(move || { + let _ = rsp_tx.send(f()); + }); + + rsp_rx.await +} diff --git a/zebra-consensus/src/primitives/ed25519.rs b/zebra-consensus/src/primitives/ed25519.rs index 49bb6c4ac1d..7a17ac9e14a 100644 --- a/zebra-consensus/src/primitives/ed25519.rs +++ b/zebra-consensus/src/primitives/ed25519.rs @@ -11,13 +11,16 @@ use futures::{future::BoxFuture, FutureExt}; use once_cell::sync::Lazy; use rand::thread_rng; -use rayon::prelude::*; use tokio::sync::watch; use tower::{util::ServiceFn, Service}; use tower_batch_control::{Batch, BatchControl}; use tower_fallback::Fallback; use zebra_chain::primitives::ed25519::{batch, *}; +use crate::BoxError; + +use super::{spawn_fifo, spawn_fifo_and_convert}; + #[cfg(test)] mod tests; @@ -43,7 +46,10 @@ pub type Item = batch::Item; /// you should call `.clone()` on the global handle to create a local, mutable /// handle. pub static VERIFIER: Lazy< - Fallback, ServiceFn BoxFuture<'static, VerifyResult>>>, + Fallback< + Batch, + ServiceFn BoxFuture<'static, Result<(), BoxError>>>, + >, > = Lazy::new(|| { Fallback::new( Batch::new( @@ -120,43 +126,22 @@ impl Verifier { /// Flush the batch using a thread pool, and return the result via the channel. /// This function returns a future that becomes ready when the batch is completed. - fn flush_spawning(batch: BatchVerifier, tx: Sender) -> impl Future { + async fn flush_spawning(batch: BatchVerifier, tx: Sender) { // Correctness: Do CPU-intensive work on a dedicated thread, to avoid blocking other futures. - tokio::task::spawn_blocking(|| { - // TODO: - // - spawn batches so rayon executes them in FIFO order - // possible implementation: return a closure in a Future, - // then run it using scope_fifo() in the worker task, - // limiting the number of concurrent batches to the number of rayon threads - rayon::scope_fifo(|s| s.spawn_fifo(|_s| Self::verify(batch, tx))) - }) - .map(|join_result| join_result.expect("panic in ed25519 batch verifier")) + let _ = tx.send(spawn_fifo(move || batch.verify(thread_rng())).await.ok()); } /// Verify a single item using a thread pool, and return the result. - /// This function returns a future that becomes ready when the item is completed. - fn verify_single_spawning(item: Item) -> impl Future { + async fn verify_single_spawning(item: Item) -> Result<(), BoxError> { // Correctness: Do CPU-intensive work on a dedicated thread, to avoid blocking other futures. - tokio::task::spawn_blocking(|| { - // Rayon doesn't have a spawn function that returns a value, - // so we use a parallel iterator instead. - // - // TODO: - // - when a batch fails, spawn all its individual items into rayon using Vec::par_iter() - // - spawn fallback individual verifications so rayon executes them in FIFO order, - // if possible - rayon::iter::once(item) - .map(|item| item.verify_single()) - .collect() - }) - .map(|join_result| join_result.expect("panic in ed25519 fallback verifier")) + spawn_fifo_and_convert(move || item.verify_single()).await } } impl Service> for Verifier { type Response = (); - type Error = Error; - type Future = Pin + Send + 'static>>; + type Error = BoxError; + type Future = Pin> + Send + 'static>>; fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll> { Poll::Ready(Ok(())) @@ -174,7 +159,8 @@ impl Service> for Verifier { Ok(()) => { // We use a new channel for each batch, // so we always get the correct batch result here. - let result = rx.borrow().expect("completed batch must send a value"); + let result = rx.borrow() + .ok_or("threadpool unexpectedly dropped response channel sender. Is Zebra shutting down?")?; if result.is_ok() { tracing::trace!(?result, "validated ed25519 signature"); @@ -183,7 +169,7 @@ impl Service> for Verifier { tracing::trace!(?result, "invalid ed25519 signature"); metrics::counter!("signatures.ed25519.invalid", 1); } - result + result.map_err(BoxError::from) } Err(_recv_error) => panic!("ed25519 verifier was dropped without flushing"), } diff --git a/zebra-consensus/src/primitives/groth16.rs b/zebra-consensus/src/primitives/groth16.rs index 0013c048b15..e6d7ad17a35 100644 --- a/zebra-consensus/src/primitives/groth16.rs +++ b/zebra-consensus/src/primitives/groth16.rs @@ -18,7 +18,6 @@ use futures::{future::BoxFuture, FutureExt}; use once_cell::sync::Lazy; use rand::thread_rng; -use rayon::prelude::*; use tokio::sync::watch; use tower::{util::ServiceFn, Service}; @@ -34,6 +33,10 @@ use zebra_chain::{ sprout::{JoinSplit, Nullifier, RandomSeed}, }; +use crate::BoxError; + +use super::{spawn_fifo, spawn_fifo_and_convert}; + mod params; #[cfg(test)] mod tests; @@ -74,7 +77,10 @@ pub type ItemVerifyingKey = PreparedVerifyingKey; /// you should call `.clone()` on the global handle to create a local, mutable /// handle. pub static SPEND_VERIFIER: Lazy< - Fallback, ServiceFn BoxFuture<'static, VerifyResult>>>, + Fallback< + Batch, + ServiceFn BoxFuture<'static, Result<(), BoxError>>>, + >, > = Lazy::new(|| { Fallback::new( Batch::new( @@ -113,7 +119,10 @@ pub static SPEND_VERIFIER: Lazy< /// you should call `.clone()` on the global handle to create a local, mutable /// handle. pub static OUTPUT_VERIFIER: Lazy< - Fallback, ServiceFn BoxFuture<'static, VerifyResult>>>, + Fallback< + Batch, + ServiceFn BoxFuture<'static, Result<(), BoxError>>>, + >, > = Lazy::new(|| { Fallback::new( Batch::new( @@ -417,43 +426,22 @@ impl Verifier { /// Flush the batch using a thread pool, and return the result via the channel. /// This function returns a future that becomes ready when the batch is completed. - fn flush_spawning( - batch: BatchVerifier, - vk: &'static BatchVerifyingKey, - tx: Sender, - ) -> impl Future { + async fn flush_spawning(batch: BatchVerifier, vk: &'static BatchVerifyingKey, tx: Sender) { // Correctness: Do CPU-intensive work on a dedicated thread, to avoid blocking other futures. - tokio::task::spawn_blocking(move || { - // TODO: - // - spawn batches so rayon executes them in FIFO order - // possible implementation: return a closure in a Future, - // then run it using scope_fifo() in the worker task, - // limiting the number of concurrent batches to the number of rayon threads - rayon::scope_fifo(move |s| s.spawn_fifo(move |_s| Self::verify(batch, vk, tx))) - }) - .map(|join_result| join_result.expect("panic in groth16 batch verifier")) + let _ = tx.send( + spawn_fifo(move || batch.verify(thread_rng(), vk)) + .await + .ok(), + ); } /// Verify a single item using a thread pool, and return the result. - /// This function returns a future that becomes ready when the item is completed. - fn verify_single_spawning( + async fn verify_single_spawning( item: Item, pvk: &'static ItemVerifyingKey, - ) -> impl Future { + ) -> Result<(), BoxError> { // Correctness: Do CPU-intensive work on a dedicated thread, to avoid blocking other futures. - tokio::task::spawn_blocking(move || { - // Rayon doesn't have a spawn function that returns a value, - // so we use a parallel iterator instead. - // - // TODO: - // - when a batch fails, spawn all its individual items into rayon using Vec::par_iter() - // - spawn fallback individual verifications so rayon executes them in FIFO order, - // if possible - rayon::iter::once(item) - .map(move |item| item.verify_single(pvk)) - .collect() - }) - .map(|join_result| join_result.expect("panic in groth16 fallback verifier")) + spawn_fifo_and_convert(move || item.verify_single(pvk)).await } } @@ -470,8 +458,8 @@ impl fmt::Debug for Verifier { impl Service> for Verifier { type Response = (); - type Error = VerificationError; - type Future = Pin + Send + 'static>>; + type Error = BoxError; + type Future = Pin> + Send + 'static>>; fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll> { Poll::Ready(Ok(())) @@ -492,7 +480,7 @@ impl Service> for Verifier { let result = rx .borrow() .as_ref() - .expect("completed batch must send a value") + .ok_or("threadpool unexpectedly dropped response channel sender. Is Zebra shutting down?")? .clone(); if result.is_ok() { @@ -503,7 +491,7 @@ impl Service> for Verifier { metrics::counter!("proofs.groth16.invalid", 1); } - result + result.map_err(BoxError::from) } Err(_recv_error) => panic!("verifier was dropped without flushing"), } diff --git a/zebra-consensus/src/primitives/halo2.rs b/zebra-consensus/src/primitives/halo2.rs index b747b4b0cf0..e9cbc4262e6 100644 --- a/zebra-consensus/src/primitives/halo2.rs +++ b/zebra-consensus/src/primitives/halo2.rs @@ -13,13 +13,16 @@ use once_cell::sync::Lazy; use orchard::circuit::VerifyingKey; use rand::{thread_rng, CryptoRng, RngCore}; -use rayon::prelude::*; use thiserror::Error; use tokio::sync::watch; use tower::{util::ServiceFn, Service}; use tower_batch_control::{Batch, BatchControl}; use tower_fallback::Fallback; +use crate::BoxError; + +use super::{spawn_fifo, spawn_fifo_and_convert}; + #[cfg(test)] mod tests; @@ -199,7 +202,10 @@ impl From for Halo2Error { /// you should call `.clone()` on the global handle to create a local, mutable /// handle. pub static VERIFIER: Lazy< - Fallback, ServiceFn BoxFuture<'static, VerifyResult>>>, + Fallback< + Batch, + ServiceFn BoxFuture<'static, Result<(), BoxError>>>, + >, > = Lazy::new(|| { Fallback::new( Batch::new( @@ -284,43 +290,22 @@ impl Verifier { /// Flush the batch using a thread pool, and return the result via the channel. /// This function returns a future that becomes ready when the batch is completed. - fn flush_spawning( - batch: BatchVerifier, - vk: &'static BatchVerifyingKey, - tx: Sender, - ) -> impl Future { + async fn flush_spawning(batch: BatchVerifier, vk: &'static BatchVerifyingKey, tx: Sender) { // Correctness: Do CPU-intensive work on a dedicated thread, to avoid blocking other futures. - tokio::task::spawn_blocking(move || { - // TODO: - // - spawn batches so rayon executes them in FIFO order - // possible implementation: return a closure in a Future, - // then run it using scope_fifo() in the worker task, - // limiting the number of concurrent batches to the number of rayon threads - rayon::scope_fifo(move |s| s.spawn_fifo(move |_s| Self::verify(batch, vk, tx))) - }) - .map(|join_result| join_result.expect("panic in halo2 batch verifier")) + let _ = tx.send( + spawn_fifo(move || batch.verify(thread_rng(), vk).map_err(Halo2Error::from)) + .await + .ok(), + ); } /// Verify a single item using a thread pool, and return the result. - /// This function returns a future that becomes ready when the item is completed. - fn verify_single_spawning( + async fn verify_single_spawning( item: Item, pvk: &'static ItemVerifyingKey, - ) -> impl Future { + ) -> Result<(), BoxError> { // Correctness: Do CPU-intensive work on a dedicated thread, to avoid blocking other futures. - tokio::task::spawn_blocking(move || { - // Rayon doesn't have a spawn function that returns a value, - // so we use a parallel iterator instead. - // - // TODO: - // - when a batch fails, spawn all its individual items into rayon using Vec::par_iter() - // - spawn fallback individual verifications so rayon executes them in FIFO order, - // if possible - rayon::iter::once(item) - .map(move |item| item.verify_single(pvk).map_err(Halo2Error::from)) - .collect() - }) - .map(|join_result| join_result.expect("panic in halo2 fallback verifier")) + spawn_fifo_and_convert(move || item.verify_single(pvk).map_err(Halo2Error::from)).await } } @@ -337,8 +322,8 @@ impl fmt::Debug for Verifier { impl Service> for Verifier { type Response = (); - type Error = Halo2Error; - type Future = Pin + Send + 'static>>; + type Error = BoxError; + type Future = Pin> + Send + 'static>>; fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll> { Poll::Ready(Ok(())) @@ -358,7 +343,7 @@ impl Service> for Verifier { let result = rx .borrow() .as_ref() - .expect("completed batch must send a value") + .ok_or("threadpool unexpectedly dropped response channel sender. Is Zebra shutting down?")? .clone(); if result.is_ok() { @@ -369,7 +354,7 @@ impl Service> for Verifier { metrics::counter!("proofs.halo2.invalid", 1); } - result + result.map_err(BoxError::from) } Err(_recv_error) => panic!("verifier was dropped without flushing"), } diff --git a/zebra-consensus/src/primitives/redjubjub.rs b/zebra-consensus/src/primitives/redjubjub.rs index b7f65a2c176..94be0cdb5f8 100644 --- a/zebra-consensus/src/primitives/redjubjub.rs +++ b/zebra-consensus/src/primitives/redjubjub.rs @@ -11,7 +11,6 @@ use futures::{future::BoxFuture, FutureExt}; use once_cell::sync::Lazy; use rand::thread_rng; -use rayon::prelude::*; use tokio::sync::watch; use tower::{util::ServiceFn, Service}; use tower_batch_control::{Batch, BatchControl}; @@ -19,6 +18,10 @@ use tower_fallback::Fallback; use zebra_chain::primitives::redjubjub::{batch, *}; +use crate::BoxError; + +use super::{spawn_fifo, spawn_fifo_and_convert}; + #[cfg(test)] mod tests; @@ -44,7 +47,10 @@ pub type Item = batch::Item; /// you should call `.clone()` on the global handle to create a local, mutable /// handle. pub static VERIFIER: Lazy< - Fallback, ServiceFn BoxFuture<'static, VerifyResult>>>, + Fallback< + Batch, + ServiceFn BoxFuture<'static, Result<(), BoxError>>>, + >, > = Lazy::new(|| { Fallback::new( Batch::new( @@ -121,43 +127,22 @@ impl Verifier { /// Flush the batch using a thread pool, and return the result via the channel. /// This function returns a future that becomes ready when the batch is completed. - fn flush_spawning(batch: BatchVerifier, tx: Sender) -> impl Future { + async fn flush_spawning(batch: BatchVerifier, tx: Sender) { // Correctness: Do CPU-intensive work on a dedicated thread, to avoid blocking other futures. - tokio::task::spawn_blocking(|| { - // TODO: - // - spawn batches so rayon executes them in FIFO order - // possible implementation: return a closure in a Future, - // then run it using scope_fifo() in the worker task, - // limiting the number of concurrent batches to the number of rayon threads - rayon::scope_fifo(|s| s.spawn_fifo(|_s| Self::verify(batch, tx))) - }) - .map(|join_result| join_result.expect("panic in redjubjub batch verifier")) + let _ = tx.send(spawn_fifo(move || batch.verify(thread_rng())).await.ok()); } /// Verify a single item using a thread pool, and return the result. - /// This function returns a future that becomes ready when the item is completed. - fn verify_single_spawning(item: Item) -> impl Future { + async fn verify_single_spawning(item: Item) -> Result<(), BoxError> { // Correctness: Do CPU-intensive work on a dedicated thread, to avoid blocking other futures. - tokio::task::spawn_blocking(|| { - // Rayon doesn't have a spawn function that returns a value, - // so we use a parallel iterator instead. - // - // TODO: - // - when a batch fails, spawn all its individual items into rayon using Vec::par_iter() - // - spawn fallback individual verifications so rayon executes them in FIFO order, - // if possible - rayon::iter::once(item) - .map(|item| item.verify_single()) - .collect() - }) - .map(|join_result| join_result.expect("panic in redjubjub fallback verifier")) + spawn_fifo_and_convert(move || item.verify_single()).await } } impl Service> for Verifier { type Response = (); - type Error = Error; - type Future = Pin + Send + 'static>>; + type Error = BoxError; + type Future = Pin> + Send + 'static>>; fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll> { Poll::Ready(Ok(())) @@ -175,7 +160,8 @@ impl Service> for Verifier { Ok(()) => { // We use a new channel for each batch, // so we always get the correct batch result here. - let result = rx.borrow().expect("completed batch must send a value"); + let result = rx.borrow() + .ok_or("threadpool unexpectedly dropped response channel sender. Is Zebra shutting down?")?; if result.is_ok() { tracing::trace!(?result, "validated redjubjub signature"); @@ -185,7 +171,7 @@ impl Service> for Verifier { metrics::counter!("signatures.redjubjub.invalid", 1); } - result + result.map_err(BoxError::from) } Err(_recv_error) => panic!("verifier was dropped without flushing"), } diff --git a/zebra-consensus/src/primitives/redpallas.rs b/zebra-consensus/src/primitives/redpallas.rs index 77b6b08bc9d..5064fa817fb 100644 --- a/zebra-consensus/src/primitives/redpallas.rs +++ b/zebra-consensus/src/primitives/redpallas.rs @@ -11,7 +11,6 @@ use futures::{future::BoxFuture, FutureExt}; use once_cell::sync::Lazy; use rand::thread_rng; -use rayon::prelude::*; use tokio::sync::watch; use tower::{util::ServiceFn, Service}; use tower_batch_control::{Batch, BatchControl}; @@ -19,6 +18,10 @@ use tower_fallback::Fallback; use zebra_chain::primitives::reddsa::{batch, orchard, Error}; +use crate::BoxError; + +use super::{spawn_fifo, spawn_fifo_and_convert}; + #[cfg(test)] mod tests; @@ -44,7 +47,10 @@ pub type Item = batch::Item; /// you should call `.clone()` on the global handle to create a local, mutable /// handle. pub static VERIFIER: Lazy< - Fallback, ServiceFn BoxFuture<'static, VerifyResult>>>, + Fallback< + Batch, + ServiceFn BoxFuture<'static, Result<(), BoxError>>>, + >, > = Lazy::new(|| { Fallback::new( Batch::new( @@ -121,43 +127,22 @@ impl Verifier { /// Flush the batch using a thread pool, and return the result via the channel. /// This function returns a future that becomes ready when the batch is completed. - fn flush_spawning(batch: BatchVerifier, tx: Sender) -> impl Future { + async fn flush_spawning(batch: BatchVerifier, tx: Sender) { // Correctness: Do CPU-intensive work on a dedicated thread, to avoid blocking other futures. - tokio::task::spawn_blocking(|| { - // TODO: - // - spawn batches so rayon executes them in FIFO order - // possible implementation: return a closure in a Future, - // then run it using scope_fifo() in the worker task, - // limiting the number of concurrent batches to the number of rayon threads - rayon::scope_fifo(|s| s.spawn_fifo(|_s| Self::verify(batch, tx))) - }) - .map(|join_result| join_result.expect("panic in ed25519 batch verifier")) + let _ = tx.send(spawn_fifo(move || batch.verify(thread_rng())).await.ok()); } /// Verify a single item using a thread pool, and return the result. - /// This function returns a future that becomes ready when the item is completed. - fn verify_single_spawning(item: Item) -> impl Future { + async fn verify_single_spawning(item: Item) -> Result<(), BoxError> { // Correctness: Do CPU-intensive work on a dedicated thread, to avoid blocking other futures. - tokio::task::spawn_blocking(|| { - // Rayon doesn't have a spawn function that returns a value, - // so we use a parallel iterator instead. - // - // TODO: - // - when a batch fails, spawn all its individual items into rayon using Vec::par_iter() - // - spawn fallback individual verifications so rayon executes them in FIFO order, - // if possible - rayon::iter::once(item) - .map(|item| item.verify_single()) - .collect() - }) - .map(|join_result| join_result.expect("panic in redpallas fallback verifier")) + spawn_fifo_and_convert(move || item.verify_single()).await } } impl Service> for Verifier { type Response = (); - type Error = Error; - type Future = Pin + Send + 'static>>; + type Error = BoxError; + type Future = Pin> + Send + 'static>>; fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll> { Poll::Ready(Ok(())) @@ -174,7 +159,8 @@ impl Service> for Verifier { Ok(()) => { // We use a new channel for each batch, // so we always get the correct batch result here. - let result = rx.borrow().expect("completed batch must send a value"); + let result = rx.borrow() + .ok_or("threadpool unexpectedly dropped response channel sender. Is Zebra shutting down?")?; if result.is_ok() { tracing::trace!(?result, "validated redpallas signature"); @@ -184,7 +170,7 @@ impl Service> for Verifier { metrics::counter!("signatures.redpallas.invalid", 1); } - result + result.map_err(BoxError::from) } Err(_recv_error) => panic!("verifier was dropped without flushing"), } From 7f3a4e97b7999e10155e98f14e098fc7be8396af Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 12 Jul 2023 21:30:36 +0000 Subject: [PATCH 211/265] build(deps): bump regex from 1.8.4 to 1.9.1 (#7189) Bumps [regex](https://github.com/rust-lang/regex) from 1.8.4 to 1.9.1. - [Release notes](https://github.com/rust-lang/regex/releases) - [Changelog](https://github.com/rust-lang/regex/blob/master/CHANGELOG.md) - [Commits](https://github.com/rust-lang/regex/compare/1.8.4...1.9.1) --- updated-dependencies: - dependency-name: regex dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 24 ++++++++++++++++++------ zebra-network/Cargo.toml | 2 +- zebra-state/Cargo.toml | 2 +- zebra-test/Cargo.toml | 2 +- zebra-utils/Cargo.toml | 2 +- zebrad/Cargo.toml | 2 +- 6 files changed, 23 insertions(+), 11 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 471aa6c0e41..11bdc8d9081 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2428,7 +2428,7 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8263075bb86c5a1b1427b5ae862e8889656f126e9f77c484496e8b47cf5c5558" dependencies = [ - "regex-automata", + "regex-automata 0.1.10", ] [[package]] @@ -3608,13 +3608,14 @@ dependencies = [ [[package]] name = "regex" -version = "1.8.4" +version = "1.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0ab3ca65655bb1e41f2a8c8cd662eb4fb035e67c3f78da1d61dffe89d07300f" +checksum = "b2eae68fc220f7cf2532e4494aded17545fce192d59cd996e0fe7887f4ceb575" dependencies = [ "aho-corasick 1.0.2", "memchr", - "regex-syntax 0.7.2", + "regex-automata 0.3.2", + "regex-syntax 0.7.3", ] [[package]] @@ -3626,6 +3627,17 @@ dependencies = [ "regex-syntax 0.6.29", ] +[[package]] +name = "regex-automata" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "83d3daa6976cffb758ec878f108ba0e062a45b2d6ca3a2cca965338855476caf" +dependencies = [ + "aho-corasick 1.0.2", + "memchr", + "regex-syntax 0.7.3", +] + [[package]] name = "regex-syntax" version = "0.6.29" @@ -3634,9 +3646,9 @@ checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] name = "regex-syntax" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "436b050e76ed2903236f032a59761c1eb99e1b0aead2c257922771dab1fc8c78" +checksum = "2ab07dc67230e4a4718e70fd5c20055a4334b121f1f9db8fe63ef39ce9b8c846" [[package]] name = "reqwest" diff --git a/zebra-network/Cargo.toml b/zebra-network/Cargo.toml index a1cfb0af29e..e017f1aa71f 100644 --- a/zebra-network/Cargo.toml +++ b/zebra-network/Cargo.toml @@ -55,7 +55,7 @@ ordered-map = "0.4.2" pin-project = "1.1.2" rand = "0.8.5" rayon = "1.7.0" -regex = "1.8.4" +regex = "1.9.1" serde = { version = "1.0.168", features = ["serde_derive"] } tempfile = "3.5.0" thiserror = "1.0.43" diff --git a/zebra-state/Cargo.toml b/zebra-state/Cargo.toml index 19a11161654..a459609f99f 100644 --- a/zebra-state/Cargo.toml +++ b/zebra-state/Cargo.toml @@ -53,7 +53,7 @@ itertools = "0.11.0" lazy_static = "1.4.0" metrics = "0.21.1" mset = "0.1.1" -regex = "1.8.4" +regex = "1.9.1" rlimit = "0.10.0" rocksdb = { version = "0.21.0", default-features = false, features = ["lz4"] } semver = "1.0.17" diff --git a/zebra-test/Cargo.toml b/zebra-test/Cargo.toml index d32993376ca..817f6d531d4 100644 --- a/zebra-test/Cargo.toml +++ b/zebra-test/Cargo.toml @@ -22,7 +22,7 @@ insta = "1.30.0" proptest = "1.2.0" once_cell = "1.18.0" rand = "0.8.5" -regex = "1.8.4" +regex = "1.9.1" tokio = { version = "1.29.1", features = ["full", "tracing", "test-util"] } tower = { version = "0.4.13", features = ["util"] } diff --git a/zebra-utils/Cargo.toml b/zebra-utils/Cargo.toml index 0eb5ddf6ab9..194e543d151 100644 --- a/zebra-utils/Cargo.toml +++ b/zebra-utils/Cargo.toml @@ -84,7 +84,7 @@ zebra-rpc = { path = "../zebra-rpc", version = "1.0.0-beta.27", optional = true itertools = { version = "0.11.0", optional = true } # These crates are needed for the search-issue-refs binary -regex = { version = "1.8.4", optional = true } +regex = { version = "1.9.1", optional = true } reqwest = { version = "0.11.18", optional = true } # These crates are needed for the zebra-checkpoints and search-issue-refs binaries diff --git a/zebrad/Cargo.toml b/zebrad/Cargo.toml index 908a4583ada..dd9d33f24c7 100644 --- a/zebrad/Cargo.toml +++ b/zebrad/Cargo.toml @@ -233,7 +233,7 @@ abscissa_core = { version = "0.7.0", features = ["testing"] } hex = "0.4.3" jsonrpc-core = "18.0.0" once_cell = "1.18.0" -regex = "1.8.4" +regex = "1.9.1" # zebra-rpc needs the preserve_order feature, it also makes test results more stable serde_json = { version = "1.0.100", features = ["preserve_order"] } From 3b34e48bf2f43b18318bf051ed8400137aa0010f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 12 Jul 2023 23:59:49 +0000 Subject: [PATCH 212/265] build(deps): bump the ed25519-zebra group with 1 update (#7188) * cargo upgrade --workspace ed25519-zebra x25519-dalek; cargo check * Remove unused libm dependency exception --------- Co-authored-by: teor --- Cargo.lock | 47 ++++++++++++++++------------------ deny.toml | 3 --- tower-batch-control/Cargo.toml | 2 +- zebra-chain/Cargo.toml | 4 +-- 4 files changed, 25 insertions(+), 31 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 11bdc8d9081..8f52415c561 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1043,20 +1043,33 @@ dependencies = [ [[package]] name = "curve25519-dalek" -version = "4.0.0-rc.2" +version = "4.0.0-rc.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03d928d978dbec61a1167414f5ec534f24bea0d7a0d24dd9b6233d3d8223e585" +checksum = "436ace70fc06e06f7f689d2624dc4e2f0ea666efb5aa704215f7249ae6e047a7" dependencies = [ "cfg-if 1.0.0", + "cpufeatures", + "curve25519-dalek-derive", "digest 0.10.7", "fiat-crypto", - "packed_simd_2", "platforms", + "rustc_version 0.4.0", "serde", "subtle", "zeroize", ] +[[package]] +name = "curve25519-dalek-derive" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "83fdaf97f4804dcebfa5862639bc9ce4121e82140bec2a987ac5140294865b5b" +dependencies = [ + "proc-macro2 1.0.63", + "quote 1.0.29", + "syn 2.0.23", +] + [[package]] name = "cxx" version = "1.0.94" @@ -1257,9 +1270,9 @@ dependencies = [ [[package]] name = "ed25519-zebra" -version = "4.0.0" +version = "4.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6af5e1fb700a3c779c7a7ed25c8c0b7f193db101de3773ac46e704bcb882d772" +checksum = "ffb0d653b2c06ec7ec1b4c570bb4eac748035d6f44dd14e5fd9e7e2549938488" dependencies = [ "curve25519-dalek", "ed25519", @@ -1483,7 +1496,7 @@ checksum = "26c4b37de5ae15812a764c958297cfc50f5c010438f60c6ce75d11b802abd404" dependencies = [ "cbc", "cipher", - "libm 0.2.7", + "libm", "num-bigint", "num-integer", "num-traits", @@ -2321,12 +2334,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "libm" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fc7aa29613bd6a620df431842069224d8bc9011086b1db4c0e0cd47fa03ec9a" - [[package]] name = "libm" version = "0.2.7" @@ -2715,7 +2722,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "578ede34cf02f8924ab9447f50c28075b4d3e5b269972345e7e0372b38c6cdcd" dependencies = [ "autocfg", - "libm 0.2.7", + "libm", ] [[package]] @@ -2881,16 +2888,6 @@ version = "3.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c1b04fb49957986fdce4d6ee7a65027d55d4b6d2265e5848bbb507b58ccfdb6f" -[[package]] -name = "packed_simd_2" -version = "0.3.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1914cd452d8fccd6f9db48147b29fd4ae05bea9dc5d9ad578509f72415de282" -dependencies = [ - "cfg-if 1.0.0", - "libm 0.1.4", -] - [[package]] name = "pairing" version = "0.23.0" @@ -5531,9 +5528,9 @@ dependencies = [ [[package]] name = "x25519-dalek" -version = "2.0.0-rc.2" +version = "2.0.0-rc.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fabd6e16dd08033932fc3265ad4510cc2eab24656058a6dcb107ffe274abcc95" +checksum = "ec7fae07da688e17059d5886712c933bb0520f15eff2e09cfa18e30968f4e63a" dependencies = [ "curve25519-dalek", "rand_core 0.6.4", diff --git a/deny.toml b/deny.toml index fa51dc2e439..2e8e9738c7d 100644 --- a/deny.toml +++ b/deny.toml @@ -119,9 +119,6 @@ skip-tree = [ # upgrade abscissa (required dependency) and arti (optional dependency) { name = "semver", version = "=0.9.0" }, - # wait for packed_simd_2 to upgrade - { name = "libm", version = "=0.1.4" }, - # Elasticsearch dependencies # wait for elasticsearch to update base64, darling, rustc_version, serde_with diff --git a/tower-batch-control/Cargo.toml b/tower-batch-control/Cargo.toml index cbb89fed857..1f9e89df2c9 100644 --- a/tower-batch-control/Cargo.toml +++ b/tower-batch-control/Cargo.toml @@ -38,7 +38,7 @@ color-eyre = "0.6.2" # Enable a feature that makes tinyvec compile much faster. tinyvec = { version = "1.6.0", features = ["rustc_1_55"] } -ed25519-zebra = "4.0.0" +ed25519-zebra = "4.0.1" rand = "0.8.5" tokio = { version = "1.29.1", features = ["full", "tracing", "test-util"] } diff --git a/zebra-chain/Cargo.toml b/zebra-chain/Cargo.toml index 95acadbaf5f..6c200b32f80 100644 --- a/zebra-chain/Cargo.toml +++ b/zebra-chain/Cargo.toml @@ -68,7 +68,7 @@ ripemd = "0.1.3" secp256k1 = { version = "0.21.3", features = ["serde"] } sha2 = { version = "0.9.9", features = ["compress"] } uint = "0.9.5" -x25519-dalek = { version = "2.0.0-pre.1", features = ["serde"] } +x25519-dalek = { version = "2.0.0-rc.3", features = ["serde"] } # ECC deps halo2 = { package = "halo2_proofs", version = "0.3.0" } @@ -100,7 +100,7 @@ itertools = "0.11.0" rayon = "1.7.0" # ZF deps -ed25519-zebra = "4.0.0" +ed25519-zebra = "4.0.1" redjubjub = "0.7.0" reddsa = "0.5.0" From be5cfad07f0e401de84f19ec9b418684a93afac2 Mon Sep 17 00:00:00 2001 From: teor Date: Fri, 14 Jul 2023 07:36:15 +1000 Subject: [PATCH 213/265] change(state): Prepare for in-place database format upgrades, but don't make any format changes yet (#7031) * Move format upgrades to their own module and enum * Launch a format change thread if needed, and shut it down during shutdown * Add some TODOs and remove a redundant timer * Regularly check for panics in the state upgrade task * Only run example upgrade once, change version field names * Increment database format to 25.0.2: add format change task * Log the running and initial disk database format versions on startup * Add initial disk and running state versions to cached state images in CI * Fix missing imports * Fix typo in logs workflow command * Add a force_save_to_disk argument to the CI workflow * Move use_internet_connection into zebrad_config() * fastmod can_spawn_zebrad_for_rpc can_spawn_zebrad_for_test_type zebra* * Add a spawn_zebrad_without_rpc() function * Remove unused copy_state() test code * Assert that upgrades and downgrades happen with the correct versions * Add a kill_and_return_output() method for tests * Add a test for new_state_format() versions (no upgrades or downgrades) * Add use_internet_connection to can_spawn_zebrad_for_test_type() * Fix workflow parameter passing * Check that reopening a new database doesn't upgrade (or downgrade) the format * Allow ephemeral to be set to false even if we don't have a cached state * Add a test type that will accept any kind of state * When re-using a directory, configure the state test config with that path * Actually mark newly created databases with their format versions * Wait for the state to be opened before testing the format * Run state format tests on mainnet and testnet configs (no network access) * run multiple reopens in tests * Test upgrades run correctly * Test that version downgrades work as expected (best effort) * Add a TODO for testing partial updates * Fix missing test arguments * clippy if chain * Fix typo * another typo * Pass a database instance to the format upgrade task * Fix a timing issue in the tests * Fix version matching in CI * Use correct env var reference * Use correct github env file * Wait for the database to be written before killing Zebra * Use correct workflow syntax * Version changes aren't always upgrades --------- Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> --- .../continous-integration-docker.yml | 11 + .github/workflows/deploy-gcp-tests.yml | 113 +++- zebra-state/src/config.rs | 24 +- zebra-state/src/constants.rs | 16 +- zebra-state/src/lib.rs | 8 +- zebra-state/src/service.rs | 8 +- .../src/service/finalized_state/disk_db.rs | 73 +-- .../service/finalized_state/disk_format.rs | 1 + .../finalized_state/disk_format/upgrade.rs | 533 ++++++++++++++++++ .../src/service/finalized_state/zebra_db.rs | 91 ++- zebra-test/src/command.rs | 92 ++- zebrad/src/application.rs | 29 +- zebrad/tests/acceptance.rs | 207 ++++++- zebrad/tests/common/cached_state.rs | 80 --- .../get_block_template.rs | 4 +- .../get_block_template_rpcs/get_peer_info.rs | 6 +- .../get_block_template_rpcs/submit_block.rs | 6 +- zebrad/tests/common/launch.rs | 103 +++- .../lightwalletd/send_transaction_test.rs | 4 +- zebrad/tests/common/sync.rs | 30 +- zebrad/tests/common/test_type.rs | 71 ++- 21 files changed, 1260 insertions(+), 250 deletions(-) create mode 100644 zebra-state/src/service/finalized_state/disk_format/upgrade.rs diff --git a/.github/workflows/continous-integration-docker.yml b/.github/workflows/continous-integration-docker.yml index aa08a79f874..1a5fbb27423 100644 --- a/.github/workflows/continous-integration-docker.yml +++ b/.github/workflows/continous-integration-docker.yml @@ -35,6 +35,11 @@ on: default: false description: 'Just run a lightwalletd full sync and update tip disks' required: true + force_save_to_disk: + required: false + type: boolean + default: false + description: 'Force tests to always create a cached state disk, if they already create disks' no_cache: description: 'Disable the Docker cache for this build' required: false @@ -337,6 +342,7 @@ jobs: network: ${{ inputs.network || vars.ZCASH_NETWORK }} needs_zebra_state: false saves_to_disk: true + force_save_to_disk: ${{ inputs.force_save_to_disk || false }} disk_suffix: checkpoint height_grep_text: 'flushing database to disk .*height.*=.*Height.*\(' secrets: inherit @@ -399,6 +405,7 @@ jobs: is_long_test: true needs_zebra_state: false saves_to_disk: true + force_save_to_disk: ${{ inputs.force_save_to_disk || false }} disk_suffix: tip height_grep_text: 'current_height.*=.*Height.*\(' secrets: inherit @@ -439,6 +446,7 @@ jobs: needs_zebra_state: true # update the disk on every PR, to increase CI speed saves_to_disk: true + force_save_to_disk: ${{ inputs.force_save_to_disk || false }} disk_suffix: tip root_state_path: '/var/cache' zebra_state_dir: 'zebrad-cache' @@ -511,6 +519,7 @@ jobs: is_long_test: true needs_zebra_state: false saves_to_disk: true + force_save_to_disk: ${{ inputs.force_save_to_disk || false }} disk_suffix: tip height_grep_text: 'current_height.*=.*Height.*\(' secrets: inherit @@ -554,6 +563,7 @@ jobs: # update the disk on every PR, to increase CI speed # we don't have a test-update-sync-testnet job, so we need to update the disk here saves_to_disk: true + force_save_to_disk: ${{ inputs.force_save_to_disk || false }} disk_suffix: tip root_state_path: '/var/cache' zebra_state_dir: 'zebrad-cache' @@ -587,6 +597,7 @@ jobs: needs_zebra_state: true needs_lwd_state: false saves_to_disk: true + force_save_to_disk: ${{ inputs.force_save_to_disk || false }} disk_prefix: lwd-cache disk_suffix: tip root_state_path: '/var/cache' diff --git a/.github/workflows/deploy-gcp-tests.yml b/.github/workflows/deploy-gcp-tests.yml index 3f92e30ba6f..da8442fa9ad 100644 --- a/.github/workflows/deploy-gcp-tests.yml +++ b/.github/workflows/deploy-gcp-tests.yml @@ -79,7 +79,12 @@ on: saves_to_disk: required: true type: boolean - description: 'Does the test create a new cached state disk?' + description: 'Can this test create new or updated cached state disks?' + force_save_to_disk: + required: false + type: boolean + default: false + description: 'Force this test to create a new or updated cached state disk' app_name: required: false type: string @@ -1702,7 +1707,7 @@ jobs: # We run exactly one of without-cached-state or with-cached-state, and we always skip the other one. # Normally, if a job is skipped, all the jobs that depend on it are also skipped. # So we need to override the default success() check to make this job run. - if: ${{ !cancelled() && !failure() && inputs.saves_to_disk }} + if: ${{ !cancelled() && !failure() && (inputs.saves_to_disk || inputs.force_save_to_disk) }} permissions: contents: 'read' id-token: 'write' @@ -1791,6 +1796,96 @@ jobs: echo "UPDATE_SUFFIX=$UPDATE_SUFFIX" >> "$GITHUB_ENV" echo "TIME_SUFFIX=$TIME_SUFFIX" >> "$GITHUB_ENV" + # Get the full initial and running database versions from the test logs. + # These versions are used as part of the disk description and labels. + # + # If these versions are missing from the logs, the job fails. + # + # Typically, the database versions are around line 20 in the logs.. + # But we check the first 1000 log lines, just in case the test harness recompiles all the + # dependencies before running the test. (This can happen if the cache is invalid.) + # + # Passes the versions to subsequent steps using the $INITIAL_DISK_DB_VERSION, + # $RUNNING_DB_VERSION, and $DB_VERSION_SUMMARY env variables. + - name: Get database versions from logs + run: | + INITIAL_DISK_DB_VERSION="" + RUNNING_DB_VERSION="" + DB_VERSION_SUMMARY="" + + DOCKER_LOGS=$( \ + gcloud compute ssh ${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }} \ + --zone ${{ vars.GCP_ZONE }} \ + --ssh-flag="-o ServerAliveInterval=5" \ + --ssh-flag="-o ConnectionAttempts=20" \ + --ssh-flag="-o ConnectTimeout=5" \ + --command=" \ + sudo docker logs ${{ inputs.test_id }} | head -1000 \ + ") + + # either a semantic version or "creating new database" + INITIAL_DISK_DB_VERSION=$( \ + echo "$DOCKER_LOGS" | \ + grep --extended-regexp --only-matching 'initial disk state version: [0-9a-z\.]+' | \ + grep --extended-regexp --only-matching '[0-9a-z\.]+' | \ + tail -1 || \ + [[ $? == 1 ]] \ + ) + + if [[ -z "$INITIAL_DISK_DB_VERSION" ]]; then + echo "Checked logs:" + echo "" + echo "$DOCKER_LOGS" + echo "" + echo "Missing initial disk database version in logs: $INITIAL_DISK_DB_VERSION" + # Fail the tests, because Zebra didn't log the initial disk database version, + # or the regex in this step is wrong. + false + fi + + if [[ "$INITIAL_DISK_DB_VERSION" = "creating.new.database" ]]; then + INITIAL_DISK_DB_VERSION="new" + else + INITIAL_DISK_DB_VERSION="v${INITIAL_DISK_DB_VERSION//./-}" + fi + + echo "Found initial disk database version in logs: $INITIAL_DISK_DB_VERSION" + echo "INITIAL_DISK_DB_VERSION=$INITIAL_DISK_DB_VERSION" >> "$GITHUB_ENV" + + RUNNING_DB_VERSION=$( \ + echo "$DOCKER_LOGS" | \ + grep --extended-regexp --only-matching 'running state version: [0-9\.]+' | \ + grep --extended-regexp --only-matching '[0-9\.]+' | \ + tail -1 || \ + [[ $? == 1 ]] \ + ) + + if [[ -z "$RUNNING_DB_VERSION" ]]; then + echo "Checked logs:" + echo "" + echo "$DOCKER_LOGS" + echo "" + echo "Missing running database version in logs: $RUNNING_DB_VERSION" + # Fail the tests, because Zebra didn't log the running database version, + # or the regex in this step is wrong. + false + fi + + RUNNING_DB_VERSION="v${RUNNING_DB_VERSION//./-}" + echo "Found running database version in logs: $RUNNING_DB_VERSION" + echo "RUNNING_DB_VERSION=$RUNNING_DB_VERSION" >> "$GITHUB_ENV" + + if [[ "$INITIAL_DISK_DB_VERSION" = "$RUNNING_DB_VERSION" ]]; then + DB_VERSION_SUMMARY="$RUNNING_DB_VERSION" + elif [[ "$INITIAL_DISK_DB_VERSION" = "new" ]]; then + DB_VERSION_SUMMARY="$RUNNING_DB_VERSION in new database" + else + DB_VERSION_SUMMARY="$INITIAL_DISK_DB_VERSION changing to $RUNNING_DB_VERSION" + fi + + echo "Summarised database versions from logs: $DB_VERSION_SUMMARY" + echo "DB_VERSION_SUMMARY=$DB_VERSION_SUMMARY" >> "$GITHUB_ENV" + # Get the sync height from the test logs, which is later used as part of the # disk description and labels. # @@ -1800,7 +1895,7 @@ jobs: # # If the sync height is missing from the logs, the job fails. # - # Passes the sync height to subsequent steps using $SYNC_HEIGHT env variable. + # Passes the sync height to subsequent steps using the $SYNC_HEIGHT env variable. - name: Get sync height from logs run: | SYNC_HEIGHT="" @@ -1818,12 +1913,16 @@ jobs: SYNC_HEIGHT=$( \ echo "$DOCKER_LOGS" | \ grep --extended-regexp --only-matching '${{ inputs.height_grep_text }}[0-9]+' | \ - grep --extended-regexp --only-matching '[0-9]+' | \ + grep --extended-regexp --only-matching '[0-9]+' | \ tail -1 || \ [[ $? == 1 ]] \ ) if [[ -z "$SYNC_HEIGHT" ]]; then + echo "Checked logs:" + echo "" + echo "$DOCKER_LOGS" + echo "" echo "Missing sync height in logs: $SYNC_HEIGHT" # Fail the tests, because Zebra and lightwalletd didn't log their sync heights, # or the CI workflow sync height regex is wrong. @@ -1885,15 +1984,15 @@ jobs: - name: Create image from state disk run: | MINIMUM_UPDATE_HEIGHT=$((ORIGINAL_HEIGHT+CACHED_STATE_UPDATE_LIMIT)) - if [[ -z "$UPDATE_SUFFIX" ]] || [[ "$SYNC_HEIGHT" -gt "$MINIMUM_UPDATE_HEIGHT" ]]; then + if [[ -z "$UPDATE_SUFFIX" ]] || [[ "$SYNC_HEIGHT" -gt "$MINIMUM_UPDATE_HEIGHT" ]] || [[ "${{ inputs.force_save_to_disk }}" == "true" ]]; then gcloud compute images create \ "${{ inputs.disk_prefix }}-${SHORT_GITHUB_REF}-${{ env.GITHUB_SHA_SHORT }}-v${{ env.STATE_VERSION }}-${NETWORK}-${{ inputs.disk_suffix }}${UPDATE_SUFFIX}-${TIME_SUFFIX}" \ --force \ --source-disk=${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }} \ --source-disk-zone=${{ vars.GCP_ZONE }} \ --storage-location=us \ - --description="Created from commit ${{ env.GITHUB_SHA_SHORT }} with height ${{ env.SYNC_HEIGHT }}" \ - --labels="height=${{ env.SYNC_HEIGHT }},purpose=${{ inputs.disk_prefix }},commit=${{ env.GITHUB_SHA_SHORT }},state-version=${{ env.STATE_VERSION }},network=${NETWORK},target-height-kind=${{ inputs.disk_suffix }},update-flag=${UPDATE_SUFFIX},updated-from-height=${ORIGINAL_HEIGHT},test-id=${{ inputs.test_id }},app-name=${{ inputs.app_name }}" + --description="Created from commit ${{ env.GITHUB_SHA_SHORT }} with height ${{ env.SYNC_HEIGHT }} and database format ${{ env.DB_VERSION_SUMMARY }}" \ + --labels="height=${{ env.SYNC_HEIGHT }},purpose=${{ inputs.disk_prefix }},commit=${{ env.GITHUB_SHA_SHORT }},state-version=${{ env.STATE_VERSION }},state-running-version=${RUNNING_DB_VERSION},initial-state-disk-version=${INITIAL_DISK_DB_VERSION},network=${NETWORK},target-height-kind=${{ inputs.disk_suffix }},update-flag=${UPDATE_SUFFIX},force-save=${{ inputs.force_save_to_disk }},updated-from-height=${ORIGINAL_HEIGHT},test-id=${{ inputs.test_id }},app-name=${{ inputs.app_name }}" else echo "Skipped cached state update because the new sync height $SYNC_HEIGHT was less than $CACHED_STATE_UPDATE_LIMIT blocks above the original height $ORIGINAL_HEIGHT" fi diff --git a/zebra-state/src/config.rs b/zebra-state/src/config.rs index 3d09aecdc8c..69020061ff3 100644 --- a/zebra-state/src/config.rs +++ b/zebra-state/src/config.rs @@ -175,6 +175,7 @@ impl Default for Config { } // Cleaning up old database versions +// TODO: put this in a different module? /// Spawns a task that checks if there are old database folders, /// and deletes them from the filesystem. @@ -292,6 +293,8 @@ fn parse_version_number(dir_name: &str) -> Option { .and_then(|version| version.parse().ok()) } +// TODO: move these to the format upgrade module + /// Returns the full semantic version of the currently running database format code. /// /// This is the version implemented by the Zebra code that's currently running, @@ -336,29 +339,40 @@ pub fn database_format_version_on_disk( ))) } -/// Writes the currently running semantic database version to the on-disk database. +/// Writes `changed_version` to the on-disk database after the format is changed. +/// (Or a new database is created.) /// /// # Correctness /// -/// This should only be called after all running format upgrades are complete. +/// This should only be called: +/// - after each format upgrade is complete, +/// - when creating a new database, or +/// - when an older Zebra version opens a newer database. /// /// # Concurrency /// /// This must only be called while RocksDB has an open database for `config`. /// Otherwise, multiple Zebra processes could write the version at the same time, /// corrupting the file. +/// +/// # Panics +/// +/// If the major versions do not match. (The format is incompatible.) pub fn write_database_format_version_to_disk( + changed_version: &Version, config: &Config, network: Network, ) -> Result<(), BoxError> { let version_path = config.version_file_path(network); // The major version is already in the directory path. - let version = format!( - "{}.{}", - DATABASE_FORMAT_MINOR_VERSION, DATABASE_FORMAT_PATCH_VERSION + assert_eq!( + changed_version.major, DATABASE_FORMAT_VERSION, + "tried to do in-place database format change to an incompatible version" ); + let version = format!("{}.{}", changed_version.minor, changed_version.patch); + // # Concurrency // // The caller handles locking for this file write. diff --git a/zebra-state/src/constants.rs b/zebra-state/src/constants.rs index 011b4115eda..bd60f3d6198 100644 --- a/zebra-state/src/constants.rs +++ b/zebra-state/src/constants.rs @@ -5,7 +5,7 @@ use regex::Regex; // For doc comment links #[allow(unused_imports)] -use crate::config::{database_format_version_in_code, database_format_version_on_disk}; +use crate::config::{self, Config}; pub use zebra_chain::transparent::MIN_TRANSPARENT_COINBASE_MATURITY; @@ -37,9 +37,9 @@ pub const MAX_BLOCK_REORG_HEIGHT: u32 = MIN_TRANSPARENT_COINBASE_MATURITY - 1; /// - we previously added compatibility code, and /// - it's available in all supported Zebra versions. /// -/// Use [`database_format_version_in_code()`] or [`database_format_version_on_disk()`] -/// to get the full semantic format version. -pub const DATABASE_FORMAT_VERSION: u64 = 25; +/// Use [`config::database_format_version_in_code()`] or +/// [`config::database_format_version_on_disk()`] to get the full semantic format version. +pub(crate) const DATABASE_FORMAT_VERSION: u64 = 25; /// The database format minor version, incremented each time the on-disk database format has a /// significant data format change. @@ -48,14 +48,16 @@ pub const DATABASE_FORMAT_VERSION: u64 = 25; /// - adding new column families, /// - changing the format of a column family in a compatible way, or /// - breaking changes with compatibility code in all supported Zebra versions. -pub const DATABASE_FORMAT_MINOR_VERSION: u64 = 0; +pub(crate) const DATABASE_FORMAT_MINOR_VERSION: u64 = 0; /// The database format patch version, incremented each time the on-disk database format has a /// significant format compatibility fix. -pub const DATABASE_FORMAT_PATCH_VERSION: u64 = 1; +pub(crate) const DATABASE_FORMAT_PATCH_VERSION: u64 = 2; /// The name of the file containing the minor and patch database versions. -pub const DATABASE_FORMAT_VERSION_FILE_NAME: &str = "version"; +/// +/// Use [`Config::version_file_path()`] to get the path to this file. +pub(crate) const DATABASE_FORMAT_VERSION_FILE_NAME: &str = "version"; /// The maximum number of blocks to check for NU5 transactions, /// before we assume we are on a pre-NU5 legacy chain. diff --git a/zebra-state/src/lib.rs b/zebra-state/src/lib.rs index f75721c5d57..eedb90d1328 100644 --- a/zebra-state/src/lib.rs +++ b/zebra-state/src/lib.rs @@ -29,7 +29,10 @@ mod service; #[cfg(test)] mod tests; -pub use config::{check_and_delete_old_databases, Config}; +pub use config::{ + check_and_delete_old_databases, database_format_version_in_code, + database_format_version_on_disk, Config, +}; pub use constants::MAX_BLOCK_REORG_HEIGHT; pub use error::{ BoxError, CloneError, CommitSemanticallyVerifiedError, DuplicateNullifierError, @@ -57,4 +60,7 @@ pub use service::{ init_test, init_test_services, ReadStateService, }; +#[cfg(any(test, feature = "proptest-impl"))] +pub use config::write_database_format_version_to_disk; + pub(crate) use request::ContextuallyVerifiedBlock; diff --git a/zebra-state/src/service.rs b/zebra-state/src/service.rs index 897903ed4ab..e3955b63e86 100644 --- a/zebra-state/src/service.rs +++ b/zebra-state/src/service.rs @@ -263,6 +263,7 @@ impl Drop for ReadStateService { // The read state service shares the state, // so dropping it should check if we can shut down. + // TODO: move this into a try_shutdown() method if let Some(block_write_task) = self.block_write_task.take() { if let Some(block_write_task_handle) = Arc::into_inner(block_write_task) { // We're the last database user, so we can tell it to shut down (blocking): @@ -280,6 +281,7 @@ impl Drop for ReadStateService { #[cfg(test)] debug!("waiting for the block write task to finish"); + // TODO: move this into a check_for_panics() method if let Err(thread_panic) = block_write_task_handle.join() { std::panic::resume_unwind(thread_panic); } else { @@ -343,9 +345,7 @@ impl StateService { .tip_block() .map(CheckpointVerifiedBlock::from) .map(ChainTipBlock::from); - timer.finish(module_path!(), line!(), "fetching database tip"); - let timer = CodeTimer::start(); let (chain_tip_sender, latest_chain_tip, chain_tip_change) = ChainTipSender::new(initial_tip, network); @@ -1161,6 +1161,8 @@ impl Service for ReadStateService { fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll> { // Check for panics in the block write task + // + // TODO: move into a check_for_panics() method let block_write_task = self.block_write_task.take(); if let Some(block_write_task) = block_write_task { @@ -1177,6 +1179,8 @@ impl Service for ReadStateService { } } + self.db.check_for_panics(); + Poll::Ready(Ok(())) } diff --git a/zebra-state/src/service/finalized_state/disk_db.rs b/zebra-state/src/service/finalized_state/disk_db.rs index 7e8ebe44662..b61d6abdf5f 100644 --- a/zebra-state/src/service/finalized_state/disk_db.rs +++ b/zebra-state/src/service/finalized_state/disk_db.rs @@ -7,11 +7,10 @@ //! //! # Correctness //! -//! The [`crate::constants::DATABASE_FORMAT_VERSION`] constant must +//! The [`crate::constants::DATABASE_FORMAT_VERSION`] constants must //! be incremented each time the database format (column, serialization, etc) changes. use std::{ - cmp::Ordering, collections::{BTreeMap, HashMap}, fmt::Debug, ops::RangeBounds, @@ -25,10 +24,6 @@ use rlimit::increase_nofile_limit; use zebra_chain::parameters::Network; use crate::{ - config::{ - database_format_version_in_code, database_format_version_on_disk, - write_database_format_version_to_disk, - }, service::finalized_state::disk_format::{FromDisk, IntoDisk}, Config, }; @@ -528,35 +523,6 @@ impl DiskDb { pub fn new(config: &Config, network: Network) -> DiskDb { let path = config.db_path(network); - let running_version = database_format_version_in_code(); - let disk_version = database_format_version_on_disk(config, network) - .expect("unable to read database format version file"); - - match disk_version.as_ref().map(|disk| disk.cmp(&running_version)) { - // TODO: if the on-disk format is older, actually run the upgrade task after the - // database has been opened (#6642) - Some(Ordering::Less) => info!( - ?running_version, - ?disk_version, - "trying to open older database format: launching upgrade task" - ), - // TODO: if the on-disk format is newer, downgrade the version after the - // database has been opened (#6642) - Some(Ordering::Greater) => info!( - ?running_version, - ?disk_version, - "trying to open newer database format: data should be compatible" - ), - Some(Ordering::Equal) => info!( - ?running_version, - "trying to open compatible database format" - ), - None => info!( - ?running_version, - "creating new database with the current format" - ), - } - let db_options = DiskDb::options(); // When opening the database in read/write mode, all column families must be opened. @@ -590,27 +556,6 @@ impl DiskDb { db.assert_default_cf_is_empty(); - // Now we've checked that the database format is up-to-date, - // mark it as updated on disk. - // - // # Concurrency - // - // The version must only be updated while RocksDB is holding the database - // directory lock. This prevents multiple Zebra instances corrupting the version - // file. - // - // # TODO - // - // - only update the version at the end of the format upgrade task (#6642) - // - add a note to the format upgrade task code to update the version constants - // whenever the format changes - // - add a test that the format upgrade runs exactly once when: - // 1. if an older cached state format is opened, the format is upgraded, - // then if Zebra is launched again the format is not upgraded - // 2. if the current cached state format is opened, the format is not upgraded - write_database_format_version_to_disk(config, network) - .expect("unable to write database format version file to disk"); - db } @@ -809,6 +754,19 @@ impl DiskDb { // Cleanup methods + /// Returns the number of shared instances of this database. + /// + /// # Concurrency + /// + /// The actual number of owners can be higher or lower than the returned value, + /// because databases can simultaneously be cloned or dropped in other threads. + /// + /// However, if the number of owners is 1, and the caller has exclusive access, + /// the count can't increase unless that caller clones the database. + pub(crate) fn shared_database_owners(&self) -> usize { + Arc::strong_count(&self.db) + Arc::weak_count(&self.db) + } + /// Shut down the database, cleaning up background tasks and ephemeral data. /// /// If `force` is true, clean up regardless of any shared references. @@ -829,9 +787,8 @@ impl DiskDb { // instance. If they do, they must drop it before: // - shutting down database threads, or // - deleting database files. - let shared_database_owners = Arc::strong_count(&self.db) + Arc::weak_count(&self.db); - if shared_database_owners > 1 { + if self.shared_database_owners() > 1 { let path = self.path(); let mut ephemeral_note = ""; diff --git a/zebra-state/src/service/finalized_state/disk_format.rs b/zebra-state/src/service/finalized_state/disk_format.rs index e731ff20d8d..716792f1cb1 100644 --- a/zebra-state/src/service/finalized_state/disk_format.rs +++ b/zebra-state/src/service/finalized_state/disk_format.rs @@ -11,6 +11,7 @@ pub mod block; pub mod chain; pub mod shielded; pub mod transparent; +pub mod upgrade; #[cfg(test)] mod tests; diff --git a/zebra-state/src/service/finalized_state/disk_format/upgrade.rs b/zebra-state/src/service/finalized_state/disk_format/upgrade.rs new file mode 100644 index 00000000000..8527c8f4c76 --- /dev/null +++ b/zebra-state/src/service/finalized_state/disk_format/upgrade.rs @@ -0,0 +1,533 @@ +//! In-place format upgrades for the Zebra state database. + +use std::{ + cmp::Ordering, + panic, + sync::{mpsc, Arc}, + thread::{self, JoinHandle}, +}; + +use semver::Version; +use tracing::Span; + +use zebra_chain::{block::Height, parameters::Network}; + +use DbFormatChange::*; + +use crate::{ + config::write_database_format_version_to_disk, database_format_version_in_code, + database_format_version_on_disk, service::finalized_state::ZebraDb, Config, +}; + +/// The kind of database format change we're performing. +#[derive(Clone, Debug, Eq, PartialEq)] +pub enum DbFormatChange { + /// Marking the format as newly created by `running_version`. + /// + /// Newly created databases have no disk version. + NewlyCreated { running_version: Version }, + + /// Upgrading the format from `older_disk_version` to `newer_running_version`. + /// + /// Until this upgrade is complete, the format is a mixture of both versions. + Upgrade { + older_disk_version: Version, + newer_running_version: Version, + }, + + /// Marking the format as downgraded from `newer_disk_version` to `older_running_version`. + /// + /// Until the state is upgraded to `newer_disk_version` by a Zebra version with that state + /// version (or greater), the format will be a mixture of both versions. + Downgrade { + newer_disk_version: Version, + older_running_version: Version, + }, +} + +/// A handle to a spawned format change thread. +/// +/// Cloning this struct creates an additional handle to the same thread. +/// +/// # Concurrency +/// +/// Cancelling the thread on drop has a race condition, because two handles can be dropped at +/// the same time. +/// +/// If cancelling the thread is important, the owner of the handle must call force_cancel(). +#[derive(Clone, Debug)] +pub struct DbFormatChangeThreadHandle { + /// A handle that can wait for the running format change thread to finish. + /// + /// Panics from this thread are propagated into Zebra's state service. + update_task: Option>>, + + /// A channel that tells the running format thread to finish early. + cancel_handle: mpsc::SyncSender, +} + +/// Marker for cancelling a format upgrade. +#[derive(Copy, Clone, Debug, Eq, PartialEq)] +pub struct CancelFormatChange; + +impl DbFormatChange { + /// Check if loading `disk_version` into `running_version` needs a format change, + /// and if it does, return the required format change. + /// + /// Also logs the kind of change at info level. + /// + /// If `disk_version` is `None`, Zebra is creating a new database. + pub fn new(running_version: Version, disk_version: Option) -> Option { + let Some(disk_version) = disk_version else { + info!( + ?running_version, + "creating new database with the current format" + ); + + return Some(NewlyCreated { running_version }); + }; + + match disk_version.cmp(&running_version) { + Ordering::Less => { + info!( + ?running_version, + ?disk_version, + "trying to open older database format: launching upgrade task" + ); + + Some(Upgrade { + older_disk_version: disk_version, + newer_running_version: running_version, + }) + } + Ordering::Greater => { + info!( + ?running_version, + ?disk_version, + "trying to open newer database format: data should be compatible" + ); + + Some(Downgrade { + newer_disk_version: disk_version, + older_running_version: running_version, + }) + } + Ordering::Equal => { + info!(?running_version, "trying to open current database format"); + + None + } + } + } + + /// Returns true if this change is an upgrade. + #[allow(dead_code)] + pub fn is_upgrade(&self) -> bool { + matches!(self, Upgrade { .. }) + } + + /// Launch a `std::thread` that applies this format change to the database. + /// + /// `initial_tip_height` is the database height when it was opened, and `upgrade_db` is the + /// database instance to upgrade. + pub fn spawn_format_change( + self, + config: Config, + network: Network, + initial_tip_height: Option, + upgrade_db: ZebraDb, + ) -> DbFormatChangeThreadHandle { + // # Correctness + // + // Cancel handles must use try_send() to avoid blocking waiting for the format change + // thread to shut down. + let (cancel_handle, cancel_receiver) = mpsc::sync_channel(1); + + let span = Span::current(); + let update_task = thread::spawn(move || { + span.in_scope(move || { + self.apply_format_change( + config, + network, + initial_tip_height, + upgrade_db, + cancel_receiver, + ); + }) + }); + + let mut handle = DbFormatChangeThreadHandle { + update_task: Some(Arc::new(update_task)), + cancel_handle, + }; + + handle.check_for_panics(); + + handle + } + + /// Apply this format change to the database. + /// + /// Format changes should be launched in an independent `std::thread`, which runs until the + /// upgrade is finished. + /// + /// See `apply_format_upgrade()` for details. + fn apply_format_change( + self, + config: Config, + network: Network, + initial_tip_height: Option, + upgrade_db: ZebraDb, + cancel_receiver: mpsc::Receiver, + ) { + match self { + // Handled in the rest of this function. + Upgrade { .. } => self.apply_format_upgrade( + config, + network, + initial_tip_height, + upgrade_db, + cancel_receiver, + ), + + NewlyCreated { .. } => { + Self::mark_as_newly_created(&config, network); + } + Downgrade { .. } => { + // # Correctness + // + // At the start of a format downgrade, the database must be marked as partially or + // fully downgraded. This lets newer Zebra versions know that some blocks with older + // formats have been added to the database. + Self::mark_as_downgraded(&config, network); + + // Older supported versions just assume they can read newer formats, + // because they can't predict all changes a newer Zebra version could make. + // + // The responsibility of staying backwards-compatible is on the newer version. + // We do this on a best-effort basis for versions that are still supported. + } + } + } + + /// Apply any required format updates to the database. + /// Format changes should be launched in an independent `std::thread`. + /// + /// If `cancel_receiver` gets a message, or its sender is dropped, + /// the format change stops running early. + // + // New format upgrades must be added to the *end* of this method. + fn apply_format_upgrade( + self, + config: Config, + network: Network, + initial_tip_height: Option, + upgrade_db: ZebraDb, + cancel_receiver: mpsc::Receiver, + ) { + let Upgrade { + newer_running_version, + older_disk_version, + } = self + else { + unreachable!("already checked for Upgrade") + }; + + // # New Upgrades Sometimes Go Here + // + // If the format change is outside RocksDb, put new code above this comment! + let Some(initial_tip_height) = initial_tip_height else { + // If the database is empty, then the RocksDb format doesn't need any changes. + info!( + ?newer_running_version, + ?older_disk_version, + "marking empty database as upgraded" + ); + + Self::mark_as_upgraded_to(&database_format_version_in_code(), &config, network); + + info!( + ?newer_running_version, + ?older_disk_version, + "empty database is fully upgraded" + ); + + return; + }; + + // Example format change. + // + // TODO: link to format upgrade instructions doc here + + // Check if we need to do this upgrade. + let database_format_add_format_change_task = + Version::parse("25.0.2").expect("version constant is valid"); + + if older_disk_version < database_format_add_format_change_task { + let mut upgrade_height = Height(0); + + // Go through every height from genesis to the tip of the old version. + // If the state was downgraded, some heights might already be upgraded. + // (Since the upgraded format is added to the tip, the database can switch between + // lower and higher versions at any block.) + // + // Keep upgrading until the initial database has been upgraded, + // or this task is cancelled by a shutdown. + while upgrade_height <= initial_tip_height + && matches!(cancel_receiver.try_recv(), Err(mpsc::TryRecvError::Empty)) + { + // TODO: Do one format upgrade step here + // + // This fake step just shows how to access the database. + let _replace_me_ = upgrade_db.tip(); + + upgrade_height = (upgrade_height + 1).expect("task exits before maximum height"); + } + } + + // At the end of each format upgrade, the database is marked as upgraded to that version. + // Upgrades can be run more than once if Zebra is restarted, so this is just a performance + // optimisation. + info!( + ?initial_tip_height, + ?newer_running_version, + ?older_disk_version, + "marking database as upgraded" + ); + Self::mark_as_upgraded_to(&database_format_add_format_change_task, &config, network); + + // End of example format change. + + // # New Upgrades Usually Go Here + // + // New code goes above this comment! + // + // Run the latest format upgrade code after the other upgrades are complete, + // then mark the format as upgraded. The code should check `cancel_receiver` + // every time it runs its inner update loop. + info!( + ?initial_tip_height, + ?newer_running_version, + ?older_disk_version, + "database is fully upgraded" + ); + } + + /// Mark a newly created database with the current format version. + /// + /// This should be called when a newly created database is opened. + /// + /// # Concurrency + /// + /// The version must only be updated while RocksDB is holding the database + /// directory lock. This prevents multiple Zebra instances corrupting the version + /// file. + /// + /// # Panics + /// + /// If the format should not have been upgraded, because the database is not newly created. + fn mark_as_newly_created(config: &Config, network: Network) { + let disk_version = database_format_version_on_disk(config, network) + .expect("unable to read database format version file path"); + let running_version = database_format_version_in_code(); + + assert_eq!( + disk_version, None, + "can't overwrite the format version in an existing database:\n\ + disk: {disk_version:?}\n\ + running: {running_version}" + ); + + write_database_format_version_to_disk(&running_version, config, network) + .expect("unable to write database format version file to disk"); + + info!( + ?running_version, + ?disk_version, + "marked database format as newly created" + ); + } + + /// Mark the database as upgraded to `format_upgrade_version`. + /// + /// This should be called when an older database is opened by an older Zebra version, + /// after each version upgrade is complete. + /// + /// # Concurrency + /// + /// The version must only be updated while RocksDB is holding the database + /// directory lock. This prevents multiple Zebra instances corrupting the version + /// file. + /// + /// # Panics + /// + /// If the format should not have been upgraded, because the running version is: + /// - older than the disk version (that's a downgrade) + /// - the same as to the disk version (no upgrade needed) + /// + /// If the format should not have been upgraded, because the format upgrade version is: + /// - older or the same as the disk version + /// (multiple upgrades to the same version are not allowed) + /// - greater than the running version (that's a logic bug) + fn mark_as_upgraded_to(format_upgrade_version: &Version, config: &Config, network: Network) { + let disk_version = database_format_version_on_disk(config, network) + .expect("unable to read database format version file") + .expect("tried to upgrade a newly created database"); + let running_version = database_format_version_in_code(); + + assert!( + running_version > disk_version, + "can't upgrade a database that is being opened by an older or the same Zebra version:\n\ + disk: {disk_version}\n\ + upgrade: {format_upgrade_version}\n\ + running: {running_version}" + ); + + assert!( + format_upgrade_version > &disk_version, + "can't upgrade a database that has already been upgraded, or is newer:\n\ + disk: {disk_version}\n\ + upgrade: {format_upgrade_version}\n\ + running: {running_version}" + ); + + assert!( + format_upgrade_version <= &running_version, + "can't upgrade to a newer version than the running Zebra version:\n\ + disk: {disk_version}\n\ + upgrade: {format_upgrade_version}\n\ + running: {running_version}" + ); + + write_database_format_version_to_disk(format_upgrade_version, config, network) + .expect("unable to write database format version file to disk"); + + info!( + ?running_version, + ?format_upgrade_version, + ?disk_version, + "marked database format as upgraded" + ); + } + + /// Mark the database as downgraded to the running database version. + /// This should be called after a newer database is opened by an older Zebra version. + /// + /// # Concurrency + /// + /// The version must only be updated while RocksDB is holding the database + /// directory lock. This prevents multiple Zebra instances corrupting the version + /// file. + /// + /// # Panics + /// + /// If the format should have been upgraded, because the running version is newer. + /// If the state is newly created, because the running version should be the same. + /// + /// Multiple downgrades are allowed, because they all downgrade to the same running version. + fn mark_as_downgraded(config: &Config, network: Network) { + let disk_version = database_format_version_on_disk(config, network) + .expect("unable to read database format version file") + .expect("can't downgrade a newly created database"); + let running_version = database_format_version_in_code(); + + assert!( + disk_version >= running_version, + "can't downgrade a database that is being opened by a newer Zebra version:\n\ + disk: {disk_version}\n\ + running: {running_version}" + ); + + write_database_format_version_to_disk(&running_version, config, network) + .expect("unable to write database format version file to disk"); + + info!( + ?running_version, + ?disk_version, + "marked database format as downgraded" + ); + } +} + +impl DbFormatChangeThreadHandle { + /// Cancel the running format change thread, if this is the last handle. + /// Returns true if it was actually cancelled. + pub fn cancel_if_needed(&self) -> bool { + // # Correctness + // + // Checking the strong count has a race condition, because two handles can be dropped at + // the same time. + // + // If cancelling the thread is important, the owner of the handle must call force_cancel(). + if let Some(update_task) = self.update_task.as_ref() { + if Arc::strong_count(update_task) <= 1 { + self.force_cancel(); + return true; + } + } + + false + } + + /// Force the running format change thread to cancel, even if there are other handles. + pub fn force_cancel(&self) { + // There's nothing we can do about errors here. + // If the channel is disconnected, the task has exited. + // If it's full, it's already been cancelled. + let _ = self.cancel_handle.try_send(CancelFormatChange); + } + + /// Check for panics in the code running in the spawned thread. + /// If the thread exited with a panic, resume that panic. + /// + /// This method should be called regularly, so that panics are detected as soon as possible. + pub fn check_for_panics(&mut self) { + let update_task = self.update_task.take(); + + if let Some(update_task) = update_task { + if update_task.is_finished() { + // We use into_inner() because it guarantees that exactly one of the tasks + // gets the JoinHandle. try_unwrap() lets us keep the JoinHandle, but it can also + // miss panics. + if let Some(update_task) = Arc::into_inner(update_task) { + // We are the last handle with a reference to this task, + // so we can propagate any panics + if let Err(thread_panic) = update_task.join() { + panic::resume_unwind(thread_panic); + } + } + } else { + // It hasn't finished, so we need to put it back + self.update_task = Some(update_task); + } + } + } + + /// Wait for the spawned thread to finish. If it exited with a panic, resume that panic. + /// + /// This method should be called during shutdown. + pub fn wait_for_panics(&mut self) { + if let Some(update_task) = self.update_task.take() { + // We use into_inner() because it guarantees that exactly one of the tasks + // gets the JoinHandle. See the comments in check_for_panics(). + if let Some(update_task) = Arc::into_inner(update_task) { + // We are the last handle with a reference to this task, + // so we can propagate any panics + if let Err(thread_panic) = update_task.join() { + panic::resume_unwind(thread_panic); + } + } + } + } +} + +impl Drop for DbFormatChangeThreadHandle { + fn drop(&mut self) { + // Only cancel the format change if the state service is shutting down. + if self.cancel_if_needed() { + self.wait_for_panics(); + } else { + self.check_for_panics(); + } + } +} diff --git a/zebra-state/src/service/finalized_state/zebra_db.rs b/zebra-state/src/service/finalized_state/zebra_db.rs index 8b6e261050a..63decfe10db 100644 --- a/zebra-state/src/service/finalized_state/zebra_db.rs +++ b/zebra-state/src/service/finalized_state/zebra_db.rs @@ -14,7 +14,14 @@ use std::path::Path; use zebra_chain::parameters::Network; use crate::{ - service::finalized_state::{disk_db::DiskDb, disk_format::block::MAX_ON_DISK_HEIGHT}, + config::{database_format_version_in_code, database_format_version_on_disk}, + service::finalized_state::{ + disk_db::DiskDb, + disk_format::{ + block::MAX_ON_DISK_HEIGHT, + upgrade::{DbFormatChange, DbFormatChangeThreadHandle}, + }, + }, Config, }; @@ -32,12 +39,20 @@ pub mod arbitrary; /// `rocksdb` allows concurrent writes through a shared reference, /// so database instances are cloneable. When the final clone is dropped, /// the database is closed. -#[derive(Clone, Debug, Eq, PartialEq)] +#[derive(Clone, Debug)] pub struct ZebraDb { // Owned State // // Everything contained in this state must be shared by all clones, or read-only. // + /// A handle to a running format change task, which cancels the task when dropped. + /// + /// # Concurrency + /// + /// This field should be dropped before the database field, so the format upgrade task is + /// cancelled before the database is dropped. This helps avoid some kinds of deadlocks. + format_change_handle: Option, + /// The inner low-level database wrapper for the RocksDB database. db: DiskDb, } @@ -46,12 +61,49 @@ impl ZebraDb { /// Opens or creates the database at `config.path` for `network`, /// and returns a shared high-level typed database wrapper. pub fn new(config: &Config, network: Network) -> ZebraDb { - let db = ZebraDb { + let running_version = database_format_version_in_code(); + let disk_version = database_format_version_on_disk(config, network) + .expect("unable to read database format version file"); + + // Log any format changes before opening the database, in case opening fails. + let format_change = DbFormatChange::new(running_version, disk_version); + + // Open the database and do initial checks. + let mut db = ZebraDb { + format_change_handle: None, db: DiskDb::new(config, network), }; db.check_max_on_disk_tip_height(); + // We have to get this height before we spawn the upgrade task, because threads can take + // a while to start, and new blocks can be committed as soon as we return from this method. + let initial_tip_height = db.finalized_tip_height(); + + // Start any required format changes. + // + // TODO: should debug_stop_at_height wait for these upgrades, or not? + if let Some(format_change) = format_change { + // Launch the format change and install its handle in the database. + // + // `upgrade_db` is a special clone of the database, which can't be used to shut down + // the upgrade task. (Because the task hasn't been launched yet, + // `db.format_change_handle` is always None.) + // + // It can be a FinalizedState if needed, or the FinalizedState methods needed for + // upgrades can be moved to ZebraDb. + let upgrade_db = db.clone(); + + let format_change_handle = format_change.spawn_format_change( + config.clone(), + network, + initial_tip_height, + upgrade_db, + ); + + db.format_change_handle = Some(format_change_handle); + } + db } @@ -60,6 +112,19 @@ impl ZebraDb { self.db.path() } + /// Check for panics in code running in spawned threads. + /// If a thread exited with a panic, resume that panic. + /// + /// This method should be called regularly, so that panics are detected as soon as possible. + pub fn check_for_panics(&mut self) { + if let Some(format_change_handle) = self.format_change_handle.as_mut() { + format_change_handle.check_for_panics(); + } + + // This check doesn't panic, but we want to check it regularly anyway. + self.check_max_on_disk_tip_height(); + } + /// Shut down the database, cleaning up background tasks and ephemeral data. /// /// If `force` is true, clean up regardless of any shared references. @@ -68,7 +133,19 @@ impl ZebraDb { /// /// See [`DiskDb::shutdown`] for details. pub fn shutdown(&mut self, force: bool) { - self.check_max_on_disk_tip_height(); + // # Concurrency + // + // The format upgrade task should be cancelled before the database is flushed or shut down. + // This helps avoid some kinds of deadlocks. + // + // See also the correctness note in `DiskDb::shutdown()`. + if force || self.db.shared_database_owners() <= 1 { + if let Some(format_change_handle) = self.format_change_handle.as_mut() { + format_change_handle.force_cancel(); + } + } + + self.check_for_panics(); self.db.shutdown(force); } @@ -93,3 +170,9 @@ impl ZebraDb { } } } + +impl Drop for ZebraDb { + fn drop(&mut self) { + self.shutdown(false); + } +} diff --git a/zebra-test/src/command.rs b/zebra-test/src/command.rs index dae47defba3..75f45fc6d65 100644 --- a/zebra-test/src/command.rs +++ b/zebra-test/src/command.rs @@ -540,7 +540,9 @@ impl TestChild { // Read unread child output. // // This checks for failure logs, and prevents some test hangs and deadlocks. - if self.child.is_some() || self.stdout.is_some() { + // + // TODO: this could block if stderr is full and stdout is waiting for stderr to be read. + if self.stdout.is_some() { let wrote_lines = self.wait_for_stdout_line(format!("\n{} Child Stdout:", self.command_path)); @@ -552,7 +554,7 @@ impl TestChild { } } - if self.child.is_some() || self.stderr.is_some() { + if self.stderr.is_some() { let wrote_lines = self.wait_for_stderr_line(format!("\n{} Child Stderr:", self.command_path)); @@ -566,6 +568,56 @@ impl TestChild { kill_result } + /// Kill the process, and return all its remaining standard output and standard error output. + /// + /// If `ignore_exited` is `true`, log "can't kill an exited process" errors, + /// rather than returning them. + /// + /// Returns `Ok(output)`, or an error if the kill failed. + pub fn kill_and_return_output(&mut self, ignore_exited: bool) -> Result { + self.apply_failure_regexes_to_outputs(); + + // Prevent a hang when consuming output, + // by making sure the child's output actually finishes. + let kill_result = self.kill(ignore_exited); + + // Read unread child output. + let mut stdout_buf = String::new(); + let mut stderr_buf = String::new(); + + // This also checks for failure logs, and prevents some test hangs and deadlocks. + loop { + let mut remaining_output = false; + + if let Some(stdout) = self.stdout.as_mut() { + if let Some(line) = + Self::wait_and_return_output_line(stdout, self.bypass_test_capture) + { + stdout_buf.push_str(&line); + remaining_output = true; + } + } + + if let Some(stderr) = self.stderr.as_mut() { + if let Some(line) = + Self::wait_and_return_output_line(stderr, self.bypass_test_capture) + { + stderr_buf.push_str(&line); + remaining_output = true; + } + } + + if !remaining_output { + break; + } + } + + let mut output = stdout_buf; + output.push_str(&stderr_buf); + + kill_result.map(|()| output) + } + /// Waits until a line of standard output is available, then consumes it. /// /// If there is a line, and `write_context` is `Some`, writes the context to the test logs. @@ -632,15 +684,40 @@ impl TestChild { false } + /// Waits until a line of `output` is available, then returns it. + /// + /// If there is a line, and `write_context` is `Some`, writes the context to the test logs. + /// Always writes the line to the test logs. + /// + /// Returns `true` if a line was available, + /// or `false` if the standard output has finished. + #[allow(clippy::unwrap_in_result)] + fn wait_and_return_output_line( + mut output: impl Iterator>, + bypass_test_capture: bool, + ) -> Option { + if let Some(line_result) = output.next() { + let line_result = line_result.expect("failure reading test process logs"); + + Self::write_to_test_logs(&line_result, bypass_test_capture); + + return Some(line_result); + } + + None + } + /// Waits for the child process to exit, then returns its output. /// + /// # Correctness + /// /// The other test child output methods take one or both outputs, /// making them unavailable to this method. /// /// Ignores any configured timeouts. /// - /// Returns an error if the child has already been taken, - /// or both outputs have already been taken. + /// Returns an error if the child has already been taken. + /// TODO: return an error if both outputs have already been taken. #[spandoc::spandoc] pub fn wait_with_output(mut self) -> Result> { let child = match self.child.take() { @@ -708,6 +785,8 @@ impl TestChild { /// /// Kills the child on error, or after the configured timeout has elapsed. /// See [`Self::expect_line_matching_regex_set`] for details. + // + // TODO: these methods could block if stderr is full and stdout is waiting for stderr to be read #[instrument(skip(self))] #[allow(clippy::unwrap_in_result)] pub fn expect_stdout_line_matches(&mut self, success_regex: R) -> Result @@ -1293,6 +1372,11 @@ impl TestOutput { fn was_killed(&self) -> bool { self.output.status.signal() == Some(9) } + + /// Takes the generic `dir` parameter out of this `TestOutput`. + pub fn take_dir(&mut self) -> Option { + self.dir.take() + } } /// Add context to an error report diff --git a/zebrad/src/application.rs b/zebrad/src/application.rs index d701bd80870..133465ffa58 100644 --- a/zebrad/src/application.rs +++ b/zebrad/src/application.rs @@ -12,7 +12,9 @@ use abscissa_core::{ use semver::{BuildMetadata, Version}; use zebra_network::constants::PORT_IN_USE_ERROR; -use zebra_state::constants::{DATABASE_FORMAT_VERSION, LOCK_FILE_ERROR}; +use zebra_state::{ + constants::LOCK_FILE_ERROR, database_format_version_in_code, database_format_version_on_disk, +}; use crate::{ commands::EntryPoint, @@ -260,13 +262,32 @@ impl Application for ZebradApp { // collect the common metadata for the issue URL and panic report, // skipping any env vars that aren't present + // reads state disk version file, doesn't open RocksDB database + let disk_db_version = + match database_format_version_on_disk(&config.state, config.network.network) { + Ok(Some(version)) => version.to_string(), + // This "version" is specially formatted to match a relaxed version regex in CI + Ok(None) => "creating.new.database".to_string(), + Err(error) => { + let mut error = format!("error: {error:?}"); + error.truncate(100); + error + } + }; + let app_metadata = vec![ - // cargo or git tag + short commit + // build-time constant: cargo or git tag + short commit ("version", build_version().to_string()), // config ("Zcash network", config.network.network.to_string()), - // constants - ("state version", DATABASE_FORMAT_VERSION.to_string()), + // code constant + ( + "running state version", + database_format_version_in_code().to_string(), + ), + // state disk file, doesn't open database + ("initial disk state version", disk_db_version), + // build-time constant ("features", env!("VERGEN_CARGO_FEATURES").to_string()), ]; diff --git a/zebrad/tests/acceptance.rs b/zebrad/tests/acceptance.rs index 57062d59877..a35c05f5d44 100644 --- a/zebrad/tests/acceptance.rs +++ b/zebrad/tests/acceptance.rs @@ -136,6 +136,7 @@ //! ``` use std::{ + cmp::Ordering, collections::HashSet, env, fs, panic, path::PathBuf, @@ -146,6 +147,7 @@ use color_eyre::{ eyre::{eyre, Result, WrapErr}, Help, }; +use semver::Version; use zebra_chain::{ block::{self, Height}, @@ -153,7 +155,7 @@ use zebra_chain::{ }; use zebra_network::constants::PORT_IN_USE_ERROR; use zebra_node_services::rpc_client::RpcRequestClient; -use zebra_state::constants::LOCK_FILE_ERROR; +use zebra_state::{constants::LOCK_FILE_ERROR, database_format_version_in_code}; use zebra_test::{args, command::ContextFrom, net::random_known_port, prelude::*}; @@ -166,8 +168,8 @@ use common::{ config_file_full_path, configs_dir, default_test_config, persistent_test_config, testdir, }, launch::{ - spawn_zebrad_for_rpc, ZebradTestDirExt, BETWEEN_NODES_DELAY, EXTENDED_LAUNCH_DELAY, - LAUNCH_DELAY, + spawn_zebrad_for_rpc, spawn_zebrad_without_rpc, ZebradTestDirExt, BETWEEN_NODES_DELAY, + EXTENDED_LAUNCH_DELAY, LAUNCH_DELAY, }, lightwalletd::{can_spawn_lightwalletd_for_rpc, spawn_lightwalletd_for_rpc}, sync::{ @@ -2089,7 +2091,7 @@ fn zebra_state_conflict() -> Result<()> { dir_conflict_full.push("state"); dir_conflict_full.push(format!( "v{}", - zebra_state::constants::DATABASE_FORMAT_VERSION + zebra_state::database_format_version_in_code().major, )); dir_conflict_full.push(config.network.network.to_string().to_lowercase()); format!( @@ -2381,6 +2383,7 @@ fn end_of_support_is_checked_at_start() -> Result<()> { Ok(()) } + /// Test `zebra-checkpoints` on mainnet. /// /// If you want to run this test individually, see the module documentation. @@ -2403,3 +2406,199 @@ async fn generate_checkpoints_mainnet() -> Result<()> { async fn generate_checkpoints_testnet() -> Result<()> { common::checkpoints::run(Testnet).await } + +/// Check that new states are created with the current state format version, +/// and that restarting `zebrad` doesn't change the format version. +#[tokio::test] +async fn new_state_format() -> Result<()> { + for network in [Mainnet, Testnet] { + state_format_test("new_state_format_test", network, 2, None).await?; + } + + Ok(()) +} + +/// Check that outdated states are updated to the current state format version, +/// and that restarting `zebrad` doesn't change the updated format version. +/// +/// TODO: test partial updates, once we have some updates that take a while. +/// (or just add a delay during tests) +#[tokio::test] +async fn update_state_format() -> Result<()> { + let mut fake_version = database_format_version_in_code(); + fake_version.minor = 0; + fake_version.patch = 0; + + for network in [Mainnet, Testnet] { + state_format_test("update_state_format_test", network, 3, Some(&fake_version)).await?; + } + + Ok(()) +} + +/// Check that newer state formats are downgraded to the current state format version, +/// and that restarting `zebrad` doesn't change the format version. +/// +/// Future version compatibility is a best-effort attempt, this test can be disabled if it fails. +#[tokio::test] +async fn downgrade_state_format() -> Result<()> { + let mut fake_version = database_format_version_in_code(); + fake_version.minor = u16::MAX.into(); + fake_version.patch = 0; + + for network in [Mainnet, Testnet] { + state_format_test( + "downgrade_state_format_test", + network, + 3, + Some(&fake_version), + ) + .await?; + } + + Ok(()) +} + +/// Test state format changes, see calling tests for details. +async fn state_format_test( + base_test_name: &str, + network: Network, + reopen_count: usize, + fake_version: Option<&Version>, +) -> Result<()> { + let _init_guard = zebra_test::init(); + + let test_name = &format!("{base_test_name}/new"); + + // # Create a new state and check it has the current version + + let zebrad = spawn_zebrad_without_rpc(network, test_name, false, false, None, false)?; + + // Skip the test unless it has the required state and environmental variables. + let Some(mut zebrad) = zebrad else { + return Ok(()); + }; + + tracing::info!(?network, "running {test_name} using zebrad"); + + zebrad.expect_stdout_line_matches("creating new database with the current format")?; + zebrad.expect_stdout_line_matches("loaded Zebra state cache")?; + + // Give Zebra enough time to actually write the database to disk. + tokio::time::sleep(Duration::from_secs(1)).await; + + let logs = zebrad.kill_and_return_output(false)?; + + assert!( + !logs.contains("marked database format as upgraded"), + "unexpected format upgrade in logs:\n\ + {logs}" + ); + assert!( + !logs.contains("marked database format as downgraded"), + "unexpected format downgrade in logs:\n\ + {logs}" + ); + + let output = zebrad.wait_with_output()?; + let mut output = output.assert_failure()?; + + let mut dir = output + .take_dir() + .expect("dir should not already have been taken"); + + // [Note on port conflict](#Note on port conflict) + output + .assert_was_killed() + .wrap_err("Possible port conflict. Are there other acceptance tests running?")?; + + // # Apply the fake version if needed + let mut expect_older_version = false; + let mut expect_newer_version = false; + + if let Some(fake_version) = fake_version { + let test_name = &format!("{base_test_name}/apply_fake_version/{fake_version}"); + tracing::info!(?network, "running {test_name} using zebra-state"); + + let mut config = UseAnyState + .zebrad_config(test_name, false, Some(dir.path())) + .expect("already checked config")?; + config.network.network = network; + + zebra_state::write_database_format_version_to_disk(fake_version, &config.state, network) + .expect("can't write fake database version to disk"); + + // Give zebra_state enough time to actually write the database version to disk. + tokio::time::sleep(Duration::from_secs(1)).await; + + let running_version = database_format_version_in_code(); + + match fake_version.cmp(&running_version) { + Ordering::Less => expect_older_version = true, + Ordering::Equal => {} + Ordering::Greater => expect_newer_version = true, + } + } + + // # Reopen that state and check the version hasn't changed + + for reopened in 0..reopen_count { + let test_name = &format!("{base_test_name}/reopen/{reopened}"); + + if reopened > 0 { + expect_older_version = false; + expect_newer_version = false; + } + + let mut zebrad = spawn_zebrad_without_rpc(network, test_name, false, false, dir, false)? + .expect("unexpectedly missing required state or env vars"); + + tracing::info!(?network, "running {test_name} using zebrad"); + + if expect_older_version { + zebrad.expect_stdout_line_matches("trying to open older database format")?; + zebrad.expect_stdout_line_matches("marked database format as upgraded")?; + zebrad.expect_stdout_line_matches("database is fully upgraded")?; + } else if expect_newer_version { + zebrad.expect_stdout_line_matches("trying to open newer database format")?; + zebrad.expect_stdout_line_matches("marked database format as downgraded")?; + } else { + zebrad.expect_stdout_line_matches("trying to open current database format")?; + zebrad.expect_stdout_line_matches("loaded Zebra state cache")?; + } + + // Give Zebra enough time to actually write the database to disk. + tokio::time::sleep(Duration::from_secs(1)).await; + + let logs = zebrad.kill_and_return_output(false)?; + + if !expect_older_version { + assert!( + !logs.contains("marked database format as upgraded"), + "unexpected format upgrade in logs:\n\ + {logs}" + ); + } + + if !expect_newer_version { + assert!( + !logs.contains("marked database format as downgraded"), + "unexpected format downgrade in logs:\n\ + {logs}" + ); + } + + let output = zebrad.wait_with_output()?; + let mut output = output.assert_failure()?; + + dir = output + .take_dir() + .expect("dir should not already have been taken"); + + // [Note on port conflict](#Note on port conflict) + output + .assert_was_killed() + .wrap_err("Possible port conflict. Are there other acceptance tests running?")?; + } + Ok(()) +} diff --git a/zebrad/tests/common/cached_state.rs b/zebrad/tests/common/cached_state.rs index 432e7ae5322..284add1c0df 100644 --- a/zebrad/tests/common/cached_state.rs +++ b/zebrad/tests/common/cached_state.rs @@ -11,8 +11,6 @@ use std::{ }; use color_eyre::eyre::{eyre, Result}; -use tempfile::TempDir; -use tokio::fs; use tower::{util::BoxService, Service}; use zebra_chain::{ @@ -25,7 +23,6 @@ use zebra_node_services::rpc_client::RpcRequestClient; use zebra_state::{ChainTipChange, LatestChainTip, MAX_BLOCK_REORG_HEIGHT}; use crate::common::{ - config::testdir, launch::spawn_zebrad_for_rpc, sync::{check_sync_logs_until, MempoolBehavior, SYNC_FINISHED_REGEX}, test_type::TestType, @@ -78,83 +75,6 @@ pub async fn load_tip_height_from_state_directory( Ok(chain_tip_height) } -/// Recursively copy a chain state database directory into a new temporary directory. -pub async fn copy_state_directory(network: Network, source: impl AsRef) -> Result { - // Copy the database files for this state and network, excluding testnet and other state versions - let source = source.as_ref(); - let state_config = zebra_state::Config { - cache_dir: source.into(), - ..Default::default() - }; - let source_net_dir = state_config.db_path(network); - let source_net_dir = source_net_dir.as_path(); - let state_suffix = source_net_dir - .strip_prefix(source) - .expect("db_path() is a subdirectory"); - - let destination = testdir()?; - let destination_net_dir = destination.path().join(state_suffix); - - tracing::info!( - ?source, - ?source_net_dir, - ?state_suffix, - ?destination, - ?destination_net_dir, - "copying cached state files (this may take some time)...", - ); - - let mut remaining_directories = vec![PathBuf::from(source_net_dir)]; - - while let Some(directory) = remaining_directories.pop() { - let sub_directories = - copy_directory(&directory, source_net_dir, destination_net_dir.as_ref()).await?; - - remaining_directories.extend(sub_directories); - } - - Ok(destination) -} - -/// Copy the contents of a directory, and return the sub-directories it contains. -/// -/// Copies all files from the `directory` into the destination specified by the concatenation of -/// the `base_destination_path` and `directory` stripped of its `prefix`. -#[tracing::instrument] -async fn copy_directory( - directory: &Path, - prefix: &Path, - base_destination_path: &Path, -) -> Result> { - let mut sub_directories = Vec::new(); - let mut entries = fs::read_dir(directory).await?; - - let destination = - base_destination_path.join(directory.strip_prefix(prefix).expect("Invalid path prefix")); - - fs::create_dir_all(&destination).await?; - - while let Some(entry) = entries.next_entry().await? { - let entry_path = entry.path(); - let file_type = entry.file_type().await?; - - if file_type.is_file() { - let file_name = entry_path.file_name().expect("Missing file name"); - let destination_path = destination.join(file_name); - - fs::copy(&entry_path, destination_path).await?; - } else if file_type.is_dir() { - sub_directories.push(entry_path); - } else if file_type.is_symlink() { - unimplemented!("Symbolic link support is currently not necessary"); - } else { - panic!("Unknown file type"); - } - } - - Ok(sub_directories) -} - /// Accepts a network, test_type, test_name, and num_blocks (how many blocks past the finalized tip to try getting) /// /// Syncs zebra until the tip, gets some blocks near the tip, via getblock rpc calls, diff --git a/zebrad/tests/common/get_block_template_rpcs/get_block_template.rs b/zebrad/tests/common/get_block_template_rpcs/get_block_template.rs index 78631f66bfb..81a8ad4946c 100644 --- a/zebrad/tests/common/get_block_template_rpcs/get_block_template.rs +++ b/zebrad/tests/common/get_block_template_rpcs/get_block_template.rs @@ -21,7 +21,7 @@ use zebra_rpc::methods::get_block_template_rpcs::{ }; use crate::common::{ - launch::{can_spawn_zebrad_for_rpc, spawn_zebrad_for_rpc}, + launch::{can_spawn_zebrad_for_test_type, spawn_zebrad_for_rpc}, sync::{check_sync_logs_until, MempoolBehavior, SYNC_FINISHED_REGEX}, test_type::TestType, }; @@ -66,7 +66,7 @@ pub(crate) async fn run() -> Result<()> { let network = Network::Mainnet; // Skip the test unless the user specifically asked for it and there is a zebrad_state_path - if !can_spawn_zebrad_for_rpc(test_name, test_type) { + if !can_spawn_zebrad_for_test_type(test_name, test_type, true) { return Ok(()); } diff --git a/zebrad/tests/common/get_block_template_rpcs/get_peer_info.rs b/zebrad/tests/common/get_block_template_rpcs/get_peer_info.rs index 30dbe7db3d1..5dd0fd81604 100644 --- a/zebrad/tests/common/get_block_template_rpcs/get_peer_info.rs +++ b/zebrad/tests/common/get_block_template_rpcs/get_peer_info.rs @@ -7,7 +7,7 @@ use zebra_node_services::rpc_client::RpcRequestClient; use zebra_rpc::methods::get_block_template_rpcs::types::peer_info::PeerInfo; use crate::common::{ - launch::{can_spawn_zebrad_for_rpc, spawn_zebrad_for_rpc}, + launch::{can_spawn_zebrad_for_test_type, spawn_zebrad_for_rpc}, test_type::TestType, }; @@ -21,7 +21,7 @@ pub(crate) async fn run() -> Result<()> { let network = Network::Mainnet; // Skip the test unless the user specifically asked for it and there is a zebrad_state_path - if !can_spawn_zebrad_for_rpc(test_name, test_type) { + if !can_spawn_zebrad_for_test_type(test_name, test_type, true) { return Ok(()); } @@ -29,7 +29,7 @@ pub(crate) async fn run() -> Result<()> { let (mut zebrad, zebra_rpc_address) = spawn_zebrad_for_rpc(network, test_name, test_type, true)? - .expect("Already checked zebra state path with can_spawn_zebrad_for_rpc"); + .expect("Already checked zebra state path with can_spawn_zebrad_for_test_type"); let rpc_address = zebra_rpc_address.expect("getpeerinfo test must have RPC port"); diff --git a/zebrad/tests/common/get_block_template_rpcs/submit_block.rs b/zebrad/tests/common/get_block_template_rpcs/submit_block.rs index de034460c8d..8e606606389 100644 --- a/zebrad/tests/common/get_block_template_rpcs/submit_block.rs +++ b/zebrad/tests/common/get_block_template_rpcs/submit_block.rs @@ -15,7 +15,7 @@ use zebra_node_services::rpc_client::RpcRequestClient; use crate::common::{ cached_state::get_raw_future_blocks, - launch::{can_spawn_zebrad_for_rpc, spawn_zebrad_for_rpc}, + launch::{can_spawn_zebrad_for_test_type, spawn_zebrad_for_rpc}, test_type::TestType, }; @@ -31,7 +31,7 @@ pub(crate) async fn run() -> Result<()> { let network = Network::Mainnet; // Skip the test unless the user specifically asked for it and there is a zebrad_state_path - if !can_spawn_zebrad_for_rpc(test_name, test_type) { + if !can_spawn_zebrad_for_test_type(test_name, test_type, true) { return Ok(()); } @@ -50,7 +50,7 @@ pub(crate) async fn run() -> Result<()> { let should_sync = false; let (mut zebrad, zebra_rpc_address) = spawn_zebrad_for_rpc(network, test_name, test_type, should_sync)? - .expect("Already checked zebra state path with can_spawn_zebrad_for_rpc"); + .expect("Already checked zebra state path with can_spawn_zebrad_for_test_type"); let rpc_address = zebra_rpc_address.expect("submitblock test must have RPC port"); diff --git a/zebrad/tests/common/launch.rs b/zebrad/tests/common/launch.rs index 0ed4899b4f4..315a7e3cb6a 100644 --- a/zebrad/tests/common/launch.rs +++ b/zebrad/tests/common/launch.rs @@ -7,13 +7,13 @@ use std::{ env, + fmt::Debug, net::SocketAddr, path::{Path, PathBuf}, time::Duration, }; use color_eyre::eyre::Result; -use indexmap::IndexSet; use tempfile::TempDir; use zebra_chain::parameters::Network; @@ -221,7 +221,7 @@ where /// /// `zebra_rpc_address` is `None` if the test type doesn't need an RPC port. #[tracing::instrument] -pub fn spawn_zebrad_for_rpc + std::fmt::Debug>( +pub fn spawn_zebrad_for_rpc + Debug>( network: Network, test_name: S, test_type: TestType, @@ -230,25 +230,16 @@ pub fn spawn_zebrad_for_rpc + std::fmt::Debug>( let test_name = test_name.as_ref(); // Skip the test unless the user specifically asked for it - if !can_spawn_zebrad_for_rpc(test_name, test_type) { + if !can_spawn_zebrad_for_test_type(test_name, test_type, use_internet_connection) { return Ok(None); } // Get the zebrad config let mut config = test_type - .zebrad_config(test_name) + .zebrad_config(test_name, use_internet_connection, None) .expect("already checked config")?; - // TODO: move this into zebrad_config() config.network.network = network; - if !use_internet_connection { - config.network.initial_mainnet_peers = IndexSet::new(); - config.network.initial_testnet_peers = IndexSet::new(); - // Avoid re-using cached peers from disk when we're supposed to be a disconnected instance - config.network.cache_dir = CacheDir::disabled(); - - config.mempool.debug_enable_at_height = Some(0); - } let (zebrad_failure_messages, zebrad_ignore_messages) = test_type.zebrad_failure_messages(); @@ -264,13 +255,90 @@ pub fn spawn_zebrad_for_rpc + std::fmt::Debug>( Ok(Some((zebrad, config.rpc.listen_addr))) } +/// Spawns a zebrad instance on `network` without RPCs or `lightwalletd`. +/// +/// If `use_cached_state` is `true`, then update the cached state to the tip. +/// If `ephemeral` is `true`, then use an ephemeral state path. +/// If `reuse_state_path` is `Some(path)`, then use the state at that path, and take ownership of +/// the temporary directory, so it isn't deleted until the test ends. +/// Otherwise, just create an empty state in this test's new temporary directory. +/// +/// If `use_internet_connection` is `false` then spawn, but without any peers. +/// This prevents it from downloading blocks. Instead, use the `ZEBRA_CACHED_STATE_DIR` +/// environmental variable to provide an initial state to the zebrad instance. +/// +/// Returns: +/// - `Ok(Some(zebrad))` on success, +/// - `Ok(None)` if the test doesn't have the required network or cached state, and +/// - `Err(_)` if spawning zebrad fails. +#[tracing::instrument] +pub fn spawn_zebrad_without_rpc( + network: Network, + test_name: Str, + use_cached_state: bool, + ephemeral: bool, + reuse_state_path: Dir, + use_internet_connection: bool, +) -> Result>> +where + Str: AsRef + Debug, + Dir: Into> + Debug, +{ + use TestType::*; + + let test_name = test_name.as_ref(); + + let reuse_state_path = reuse_state_path.into(); + let testdir = reuse_state_path + .unwrap_or_else(|| testdir().expect("failed to create test temporary directory")); + + let (test_type, replace_cache_dir) = if use_cached_state { + (UpdateZebraCachedStateNoRpc, None) + } else if ephemeral { + ( + LaunchWithEmptyState { + launches_lightwalletd: false, + }, + None, + ) + } else { + (UseAnyState, Some(testdir.path())) + }; + + // Skip the test unless the user specifically asked for it + if !can_spawn_zebrad_for_test_type(test_name, test_type, use_internet_connection) { + return Ok(None); + } + + // Get the zebrad config + let mut config = test_type + .zebrad_config(test_name, use_internet_connection, replace_cache_dir) + .expect("already checked config")?; + + config.network.network = network; + + let (zebrad_failure_messages, zebrad_ignore_messages) = test_type.zebrad_failure_messages(); + + // Writes a configuration that does not have RPC listen_addr set. + // If the state path env var is set, uses it in the config. + let zebrad = testdir + .with_exact_config(&config)? + .spawn_child(args!["start"])? + .bypass_test_capture(true) + .with_timeout(test_type.zebrad_timeout()) + .with_failure_regex_iter(zebrad_failure_messages, zebrad_ignore_messages); + + Ok(Some(zebrad)) +} + /// Returns `true` if a zebrad test for `test_type` has everything it needs to run. #[tracing::instrument] -pub fn can_spawn_zebrad_for_rpc + std::fmt::Debug>( +pub fn can_spawn_zebrad_for_test_type + Debug>( test_name: S, test_type: TestType, + use_internet_connection: bool, ) -> bool { - if zebra_test::net::zebra_skip_network_tests() { + if use_internet_connection && zebra_test::net::zebra_skip_network_tests() { return false; } @@ -281,8 +349,9 @@ pub fn can_spawn_zebrad_for_rpc + std::fmt::Debug>( return false; } - // Check if we have any necessary cached states for the zebrad config - test_type.zebrad_config(test_name).is_some() + // Check if we have any necessary cached states for the zebrad config. + // The cache_dir value doesn't matter here. + test_type.zebrad_config(test_name, true, None).is_some() } /// Panics if `$pred` is false, with an error report containing: diff --git a/zebrad/tests/common/lightwalletd/send_transaction_test.rs b/zebrad/tests/common/lightwalletd/send_transaction_test.rs index c58c7b8386e..72b99d57f94 100644 --- a/zebrad/tests/common/lightwalletd/send_transaction_test.rs +++ b/zebrad/tests/common/lightwalletd/send_transaction_test.rs @@ -30,7 +30,7 @@ use zebrad::components::mempool::downloads::MAX_INBOUND_CONCURRENCY; use crate::common::{ cached_state::get_future_blocks, - launch::{can_spawn_zebrad_for_rpc, spawn_zebrad_for_rpc}, + launch::{can_spawn_zebrad_for_test_type, spawn_zebrad_for_rpc}, lightwalletd::{ can_spawn_lightwalletd_for_rpc, spawn_lightwalletd_for_rpc, sync::wait_for_zebrad_and_lightwalletd_sync, @@ -62,7 +62,7 @@ pub async fn run() -> Result<()> { let network = Mainnet; // Skip the test unless the user specifically asked for it - if !can_spawn_zebrad_for_rpc(test_name, test_type) { + if !can_spawn_zebrad_for_test_type(test_name, test_type, true) { return Ok(()); } diff --git a/zebrad/tests/common/sync.rs b/zebrad/tests/common/sync.rs index d7bc91d352c..dd0a1390294 100644 --- a/zebrad/tests/common/sync.rs +++ b/zebrad/tests/common/sync.rs @@ -5,10 +5,7 @@ //! Test functions in this file will not be run. //! This file is only for test library code. -use std::{ - path::{Path, PathBuf}, - time::Duration, -}; +use std::{path::PathBuf, time::Duration}; use color_eyre::eyre::Result; use tempfile::TempDir; @@ -19,7 +16,6 @@ use zebrad::{components::sync, config::ZebradConfig}; use zebra_test::{args, prelude::*}; use super::{ - cached_state::copy_state_directory, config::{persistent_test_config, testdir}, launch::ZebradTestDirExt, }; @@ -341,30 +337,6 @@ pub fn check_sync_logs_until( Ok(zebrad) } -/// Runs a zebrad instance to synchronize the chain to the network tip. -/// -/// The zebrad instance is executed on a copy of the partially synchronized chain state. This copy -/// is returned afterwards, containing the fully synchronized chain state. -#[allow(dead_code)] -#[tracing::instrument] -pub async fn copy_state_and_perform_full_sync( - network: Network, - partial_sync_path: &Path, -) -> Result { - let fully_synced_path = copy_state_directory(network, &partial_sync_path).await?; - - sync_until( - Height::MAX, - network, - SYNC_FINISHED_REGEX, - FINISH_PARTIAL_SYNC_TIMEOUT, - fully_synced_path, - MempoolBehavior::ShouldAutomaticallyActivate, - true, - false, - ) -} - /// Returns a test config for caching Zebra's state up to the mandatory checkpoint. pub fn cached_mandatory_checkpoint_test_config() -> Result { let mut config = persistent_test_config()?; diff --git a/zebrad/tests/common/test_type.rs b/zebrad/tests/common/test_type.rs index a94420160b9..adb5fd4b897 100644 --- a/zebrad/tests/common/test_type.rs +++ b/zebrad/tests/common/test_type.rs @@ -1,7 +1,14 @@ //! Provides TestType enum with shared code for acceptance tests -use std::{env, path::PathBuf, time::Duration}; +use std::{ + env, + path::{Path, PathBuf}, + time::Duration, +}; + +use indexmap::IndexSet; +use zebra_network::CacheDir; use zebra_test::{command::NO_MATCHES_REGEX_ITER, prelude::*}; use zebrad::config::ZebradConfig; @@ -41,6 +48,9 @@ pub enum TestType { allow_lightwalletd_cached_state: bool, }, + /// Launch with a Zebra and lightwalletd state that might or might not be empty. + UseAnyState, + /// Sync to tip from a lightwalletd cached state. /// /// This test requires a cached Zebra and lightwalletd state. @@ -69,7 +79,7 @@ impl TestType { // - FullSyncFromGenesis, UpdateCachedState, UpdateZebraCachedStateNoRpc: // skip the test if it is not available match self { - LaunchWithEmptyState { .. } => false, + LaunchWithEmptyState { .. } | UseAnyState => false, FullSyncFromGenesis { .. } | UpdateCachedState | UpdateZebraCachedStateNoRpc @@ -81,16 +91,17 @@ impl TestType { pub fn needs_zebra_rpc_server(&self) -> bool { match self { UpdateZebraCachedStateWithRpc | LaunchWithEmptyState { .. } => true, - UpdateZebraCachedStateNoRpc | FullSyncFromGenesis { .. } | UpdateCachedState => { - self.launches_lightwalletd() - } + UseAnyState + | UpdateZebraCachedStateNoRpc + | FullSyncFromGenesis { .. } + | UpdateCachedState => self.launches_lightwalletd(), } } /// Does this test launch `lightwalletd`? pub fn launches_lightwalletd(&self) -> bool { match self { - UpdateZebraCachedStateNoRpc | UpdateZebraCachedStateWithRpc => false, + UseAnyState | UpdateZebraCachedStateNoRpc | UpdateZebraCachedStateWithRpc => false, FullSyncFromGenesis { .. } | UpdateCachedState => true, LaunchWithEmptyState { launches_lightwalletd, @@ -106,6 +117,7 @@ impl TestType { // - UpdateCachedState: skip the test if it is not available match self { LaunchWithEmptyState { .. } + | UseAnyState | FullSyncFromGenesis { .. } | UpdateZebraCachedStateNoRpc | UpdateZebraCachedStateWithRpc => false, @@ -120,14 +132,17 @@ impl TestType { FullSyncFromGenesis { allow_lightwalletd_cached_state, } => *allow_lightwalletd_cached_state, - UpdateCachedState | UpdateZebraCachedStateNoRpc | UpdateZebraCachedStateWithRpc => true, + UseAnyState + | UpdateCachedState + | UpdateZebraCachedStateNoRpc + | UpdateZebraCachedStateWithRpc => true, } } /// Can this test create a new `LIGHTWALLETD_DATA_DIR` cached state? pub fn can_create_lightwalletd_cached_state(&self) -> bool { match self { - LaunchWithEmptyState { .. } => false, + LaunchWithEmptyState { .. } | UseAnyState => false, FullSyncFromGenesis { .. } | UpdateCachedState => true, UpdateZebraCachedStateNoRpc | UpdateZebraCachedStateWithRpc => false, } @@ -152,9 +167,16 @@ impl TestType { /// Returns a Zebra config for this test. /// + /// `replace_cache_dir` replaces any cached or ephemeral state. + /// /// Returns `None` if the test should be skipped, /// and `Some(Err(_))` if the config could not be created. - pub fn zebrad_config>(&self, test_name: S) -> Option> { + pub fn zebrad_config>( + &self, + test_name: Str, + use_internet_connection: bool, + replace_cache_dir: Option<&Path>, + ) -> Option> { let config = if self.needs_zebra_rpc_server() { // This is what we recommend our users configure. random_known_rpc_port_config(true) @@ -177,22 +199,35 @@ impl TestType { config.rpc.parallel_cpu_threads = 0; } - if !self.needs_zebra_cached_state() { - return Some(Ok(config)); + if !use_internet_connection { + config.network.initial_mainnet_peers = IndexSet::new(); + config.network.initial_testnet_peers = IndexSet::new(); + // Avoid re-using cached peers from disk when we're supposed to be a disconnected instance + config.network.cache_dir = CacheDir::disabled(); + + // Activate the mempool immediately by default + config.mempool.debug_enable_at_height = Some(0); } + // Add a fake miner address for mining RPCs #[cfg(feature = "getblocktemplate-rpcs")] let _ = config.mining.miner_address.insert( zebra_chain::transparent::Address::from_script_hash(config.network.network, [0x7e; 20]), ); - let zebra_state_path = self.zebrad_state_path(test_name)?; + // If we have a cached state, or we don't want to be ephemeral, update the config to use it + if replace_cache_dir.is_some() || self.needs_zebra_cached_state() { + let zebra_state_path = replace_cache_dir + .map(|path| path.to_owned()) + .or_else(|| self.zebrad_state_path(test_name))?; - config.sync.checkpoint_verify_concurrency_limit = - zebrad::components::sync::DEFAULT_CHECKPOINT_CONCURRENCY_LIMIT; + config.state.ephemeral = false; + config.state.cache_dir = zebra_state_path; - config.state.ephemeral = false; - config.state.cache_dir = zebra_state_path; + // And reset the concurrency to the default value + config.sync.checkpoint_verify_concurrency_limit = + zebrad::components::sync::DEFAULT_CHECKPOINT_CONCURRENCY_LIMIT; + } Some(Ok(config)) } @@ -237,7 +272,7 @@ impl TestType { /// Returns the `zebrad` timeout for this test type. pub fn zebrad_timeout(&self) -> Duration { match self { - LaunchWithEmptyState { .. } => LIGHTWALLETD_DELAY, + LaunchWithEmptyState { .. } | UseAnyState => LIGHTWALLETD_DELAY, FullSyncFromGenesis { .. } => LIGHTWALLETD_FULL_SYNC_TIP_DELAY, UpdateCachedState | UpdateZebraCachedStateNoRpc => LIGHTWALLETD_UPDATE_TIP_DELAY, UpdateZebraCachedStateWithRpc => FINISH_PARTIAL_SYNC_TIMEOUT, @@ -254,7 +289,7 @@ impl TestType { // We use the same timeouts for zebrad and lightwalletd, // because the tests check zebrad and lightwalletd concurrently. match self { - LaunchWithEmptyState { .. } => LIGHTWALLETD_DELAY, + LaunchWithEmptyState { .. } | UseAnyState => LIGHTWALLETD_DELAY, FullSyncFromGenesis { .. } => LIGHTWALLETD_FULL_SYNC_TIP_DELAY, UpdateCachedState | UpdateZebraCachedStateNoRpc | UpdateZebraCachedStateWithRpc => { LIGHTWALLETD_UPDATE_TIP_DELAY From 7b0dedd3a28b3bcb24214ecae7d00408e5349749 Mon Sep 17 00:00:00 2001 From: Arya Date: Thu, 13 Jul 2023 22:26:46 -0400 Subject: [PATCH 214/265] fix(network): Rate-limit inbound connections per IP. (#7041) * Adds RecentByIp * Removes new [cfg(not(test))]s, supports configurable max_conn_per_ip in RecentByIp and account_inbound_connections Updates tests * Uses self.time_limit instead of constant * Adds sleep after dropping connections Uses partition_point & split_off Moves tests to separate module * Apply suggestions from code review Co-authored-by: teor * Always prune before adding * Tweak comments * Move the time calculation outside the binary search closure --------- Co-authored-by: teor --- zebra-network/src/peer_set/initialize.rs | 9 ++ .../src/peer_set/initialize/recent_by_ip.rs | 94 +++++++++++++++++++ .../peer_set/initialize/recent_by_ip/tests.rs | 69 ++++++++++++++ .../src/peer_set/initialize/tests/vectors.rs | 31 ++++-- 4 files changed, 194 insertions(+), 9 deletions(-) create mode 100644 zebra-network/src/peer_set/initialize/recent_by_ip.rs create mode 100644 zebra-network/src/peer_set/initialize/recent_by_ip/tests.rs diff --git a/zebra-network/src/peer_set/initialize.rs b/zebra-network/src/peer_set/initialize.rs index d3b99621f79..3dabd7279b5 100644 --- a/zebra-network/src/peer_set/initialize.rs +++ b/zebra-network/src/peer_set/initialize.rs @@ -51,6 +51,8 @@ use crate::{ #[cfg(test)] mod tests; +mod recent_by_ip; + /// A successful outbound peer connection attempt or inbound connection handshake. /// /// The [`Handshake`](peer::Handshake) service returns a [`Result`]. Only successful connections @@ -576,6 +578,9 @@ where + Clone, S::Future: Send + 'static, { + let mut recent_inbound_connections = + recent_by_ip::RecentByIp::new(None, Some(config.max_connections_per_ip)); + let mut active_inbound_connections = ActiveConnectionCounter::new_counter_with( config.peerset_inbound_connection_limit(), "Inbound Connections", @@ -605,10 +610,14 @@ where if active_inbound_connections.update_count() >= config.peerset_inbound_connection_limit() + || recent_inbound_connections.is_past_limit_or_add(addr.ip()) { // Too many open inbound connections or pending handshakes already. // Close the connection. std::mem::drop(tcp_stream); + // Allow invalid connections to be cleared quickly, + // but still put a limit on our CPU and network usage from failed connections. + tokio::time::sleep(constants::MIN_INBOUND_PEER_FAILED_CONNECTION_INTERVAL).await; continue; } diff --git a/zebra-network/src/peer_set/initialize/recent_by_ip.rs b/zebra-network/src/peer_set/initialize/recent_by_ip.rs new file mode 100644 index 00000000000..b2fcf7502ff --- /dev/null +++ b/zebra-network/src/peer_set/initialize/recent_by_ip.rs @@ -0,0 +1,94 @@ +//! A set of IPs from recent connection attempts. + +use std::{ + collections::{HashMap, VecDeque}, + net::IpAddr, + time::{Duration, Instant}, +}; + +use crate::constants; + +#[cfg(test)] +mod tests; + +#[derive(Debug)] +/// Stores IPs of recently attempted inbound connections. +pub struct RecentByIp { + /// The list of IPs in decreasing connection age order. + pub by_time: VecDeque<(IpAddr, Instant)>, + + /// Stores IPs for recently attempted inbound connections. + pub by_ip: HashMap, + + /// The maximum number of peer connections Zebra will keep for a given IP address + /// before it drops any additional peer connections with that IP. + pub max_connections_per_ip: usize, + + /// The duration to wait after an entry is added before removing it. + pub time_limit: Duration, +} + +impl Default for RecentByIp { + fn default() -> Self { + Self::new(None, None) + } +} + +impl RecentByIp { + /// Creates a new [`RecentByIp`] + pub fn new(time_limit: Option, max_connections_per_ip: Option) -> Self { + let (by_time, by_ip) = Default::default(); + Self { + by_time, + by_ip, + time_limit: time_limit.unwrap_or(constants::MIN_PEER_RECONNECTION_DELAY), + max_connections_per_ip: max_connections_per_ip + .unwrap_or(constants::DEFAULT_MAX_CONNS_PER_IP), + } + } + + /// Prunes outdated entries, checks if there's a recently attempted inbound connection with + /// this IP, and adds the entry to `by_time`, and `by_ip` if needed. + /// + /// Returns true if the recently attempted inbound connection count is past the configured limit. + pub fn is_past_limit_or_add(&mut self, ip: IpAddr) -> bool { + let now = Instant::now(); + self.prune_by_time(now); + + let count = self.by_ip.entry(ip).or_default(); + if *count >= self.max_connections_per_ip { + true + } else { + *count += 1; + self.by_time.push_back((ip, now)); + false + } + } + + /// Prunes entries older than `time_limit`, decrementing or removing their counts in `by_ip`. + fn prune_by_time(&mut self, now: Instant) { + // Currently saturates to zero: + // + // + // This discards the whole structure if the time limit is very large, + // which is unexpected, but stops this list growing without limit. + // After the handshake, the peer set will remove any duplicate connections over the limit. + let age_limit = now - self.time_limit; + + // `by_time` must be sorted for this to work. + let split_off_idx = self.by_time.partition_point(|&(_, time)| time <= age_limit); + + let updated_by_time = self.by_time.split_off(split_off_idx); + + for (ip, _) in &self.by_time { + if let Some(count) = self.by_ip.get_mut(ip) { + *count -= 1; + if *count == 0 { + self.by_ip.remove(ip); + } + } + } + + self.by_time = updated_by_time; + } +} diff --git a/zebra-network/src/peer_set/initialize/recent_by_ip/tests.rs b/zebra-network/src/peer_set/initialize/recent_by_ip/tests.rs new file mode 100644 index 00000000000..e5a589cd3ee --- /dev/null +++ b/zebra-network/src/peer_set/initialize/recent_by_ip/tests.rs @@ -0,0 +1,69 @@ +//! Fixed test vectors for recent IP limits. + +use std::time::Duration; + +use crate::peer_set::initialize::recent_by_ip::RecentByIp; + +#[test] +fn old_connection_attempts_are_pruned() { + const TEST_TIME_LIMIT: Duration = Duration::from_secs(5); + + let _init_guard = zebra_test::init(); + + let mut recent_connections = RecentByIp::new(Some(TEST_TIME_LIMIT), None); + let ip = "127.0.0.1".parse().expect("should parse"); + + assert!( + !recent_connections.is_past_limit_or_add(ip), + "should not be past limit" + ); + assert!( + recent_connections.is_past_limit_or_add(ip), + "should be past max_connections_per_ip limit" + ); + + std::thread::sleep(TEST_TIME_LIMIT / 3); + + assert!( + recent_connections.is_past_limit_or_add(ip), + "should still contain entry after a third of the time limit" + ); + + std::thread::sleep(3 * TEST_TIME_LIMIT / 4); + + assert!( + !recent_connections.is_past_limit_or_add(ip), + "should prune entry after 13/12 * time_limit" + ); + + const TEST_MAX_CONNS_PER_IP: usize = 3; + + let mut recent_connections = + RecentByIp::new(Some(TEST_TIME_LIMIT), Some(TEST_MAX_CONNS_PER_IP)); + + for _ in 0..TEST_MAX_CONNS_PER_IP { + assert!( + !recent_connections.is_past_limit_or_add(ip), + "should not be past limit" + ); + } + + assert!( + recent_connections.is_past_limit_or_add(ip), + "should be past max_connections_per_ip limit" + ); + + std::thread::sleep(TEST_TIME_LIMIT / 3); + + assert!( + recent_connections.is_past_limit_or_add(ip), + "should still be past limit after a third of the reconnection delay" + ); + + std::thread::sleep(3 * TEST_TIME_LIMIT / 4); + + assert!( + !recent_connections.is_past_limit_or_add(ip), + "should prune entry after 13/12 * time_limit" + ); +} diff --git a/zebra-network/src/peer_set/initialize/tests/vectors.rs b/zebra-network/src/peer_set/initialize/tests/vectors.rs index 73c15077689..c871ab43227 100644 --- a/zebra-network/src/peer_set/initialize/tests/vectors.rs +++ b/zebra-network/src/peer_set/initialize/tests/vectors.rs @@ -727,7 +727,7 @@ async fn listener_peer_limit_zero_handshake_panic() { }); let (_config, mut peerset_rx) = - spawn_inbound_listener_with_peer_limit(0, unreachable_inbound_handshaker).await; + spawn_inbound_listener_with_peer_limit(0, None, unreachable_inbound_handshaker).await; let peer_result = peerset_rx.try_next(); assert!( @@ -752,7 +752,7 @@ async fn listener_peer_limit_one_handshake_error() { service_fn(|_| async { Err("test inbound handshaker always returns errors".into()) }); let (_config, mut peerset_rx) = - spawn_inbound_listener_with_peer_limit(1, error_inbound_handshaker).await; + spawn_inbound_listener_with_peer_limit(1, None, error_inbound_handshaker).await; let peer_result = peerset_rx.try_next(); assert!( @@ -794,8 +794,12 @@ async fn listener_peer_limit_one_handshake_ok_then_drop() { Ok(fake_client) }); - let (config, mut peerset_rx) = - spawn_inbound_listener_with_peer_limit(1, success_disconnect_inbound_handshaker).await; + let (config, mut peerset_rx) = spawn_inbound_listener_with_peer_limit( + 1, + usize::MAX, + success_disconnect_inbound_handshaker, + ) + .await; let mut peer_count: usize = 0; loop { @@ -853,7 +857,7 @@ async fn listener_peer_limit_one_handshake_ok_stay_open() { }); let (config, mut peerset_rx) = - spawn_inbound_listener_with_peer_limit(1, success_stay_open_inbound_handshaker).await; + spawn_inbound_listener_with_peer_limit(1, None, success_stay_open_inbound_handshaker).await; let mut peer_change_count: usize = 0; loop { @@ -917,7 +921,7 @@ async fn listener_peer_limit_default_handshake_error() { service_fn(|_| async { Err("test inbound handshaker always returns errors".into()) }); let (_config, mut peerset_rx) = - spawn_inbound_listener_with_peer_limit(None, error_inbound_handshaker).await; + spawn_inbound_listener_with_peer_limit(None, None, error_inbound_handshaker).await; let peer_result = peerset_rx.try_next(); assert!( @@ -963,8 +967,12 @@ async fn listener_peer_limit_default_handshake_ok_then_drop() { Ok(fake_client) }); - let (config, mut peerset_rx) = - spawn_inbound_listener_with_peer_limit(None, success_disconnect_inbound_handshaker).await; + let (config, mut peerset_rx) = spawn_inbound_listener_with_peer_limit( + None, + usize::MAX, + success_disconnect_inbound_handshaker, + ) + .await; let mut peer_count: usize = 0; loop { @@ -1022,7 +1030,8 @@ async fn listener_peer_limit_default_handshake_ok_stay_open() { }); let (config, mut peerset_rx) = - spawn_inbound_listener_with_peer_limit(None, success_stay_open_inbound_handshaker).await; + spawn_inbound_listener_with_peer_limit(None, None, success_stay_open_inbound_handshaker) + .await; let mut peer_change_count: usize = 0; loop { @@ -1609,6 +1618,7 @@ where /// Returns the generated [`Config`], and the peer set receiver. async fn spawn_inbound_listener_with_peer_limit( peerset_initial_target_size: impl Into>, + max_connections_per_ip: impl Into>, listen_handshaker: S, ) -> (Config, mpsc::Receiver) where @@ -1623,6 +1633,9 @@ where let listen_addr = "127.0.0.1:0".parse().unwrap(); let mut config = Config { listen_addr, + max_connections_per_ip: max_connections_per_ip + .into() + .unwrap_or(constants::DEFAULT_MAX_CONNS_PER_IP), ..Config::default() }; From f1ee502ddf2bf8ffcdb60dbabdc91885f3d723c9 Mon Sep 17 00:00:00 2001 From: Marek Date: Sat, 15 Jul 2023 18:24:51 +0200 Subject: [PATCH 215/265] Fix the position of coinbase tx (#7217) --- zebra-state/src/tests.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/zebra-state/src/tests.rs b/zebra-state/src/tests.rs index 1133542df1a..488ab4227bd 100644 --- a/zebra-state/src/tests.rs +++ b/zebra-state/src/tests.rs @@ -41,7 +41,7 @@ impl FakeChainHelper for Arc { _ => panic!("block must have a coinbase height to create a child"), } - child.transactions.push(tx); + child.transactions.insert(0, tx); Arc::make_mut(&mut child.header).previous_block_hash = parent_hash; Arc::new(child) From 3faef29d232fb41ddef72d4cf30cb240f83eaca5 Mon Sep 17 00:00:00 2001 From: Gustavo Valverde Date: Mon, 17 Jul 2023 21:03:17 +0100 Subject: [PATCH 216/265] ref(docker): organize `ENV` and `ARG` values based on their usage (#7200) * ref(docker): use a single variable for test features * ref(docker): scope `ARG`s and `ENV`s correctly * fix(docker): use variables as expected on test build * fix(docker): use correct `$RPC_PORT` validation * revert(docker): revert to using extra `ENTRYPOINT_FEATURES` * fix(rust): missing features replacements * fix(docker): enable backtraces for errors and panics This is a costly function!! * ref(docker): remove `$NETWORK` as an `ARG` * fix typo * Apply suggestions from code review Co-authored-by: Marek --------- Co-authored-by: Marek --- docker/Dockerfile | 66 +++++++++++++++++++----------------- docker/entrypoint.sh | 3 +- docker/runtime-entrypoint.sh | 2 +- 3 files changed, 38 insertions(+), 33 deletions(-) diff --git a/docker/Dockerfile b/docker/Dockerfile index c2ec9b2af5f..4c2c8e98262 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -8,6 +8,15 @@ # - release: builds release binary # - runtime: is our runtime environment # +# We first set default values for build arguments used across the stages. +# Each stage must define the build arguments (ARGs) it uses. +# +# Build zebrad with these features +# Keep these in sync with: +# https://github.com/ZcashFoundation/zebra/blob/main/.github/workflows/build-docker-image.yml#L37 +ARG FEATURES="default-release-binaries" +ARG TEST_FEATURES="lightwalletd-grpc-tests zebra-checkpoints" + # This stage implements cargo-chef for docker layer caching FROM rust:bullseye as chef RUN cargo install cargo-chef --locked @@ -58,39 +67,20 @@ RUN if [ "$(uname -m)" != "aarch64" ]; then \ && \ rm -rf /var/lib/apt/lists/* /tmp/* -# TODO: just keep the backtrace, colorbt, rust_log, and cargo_home variables as those are the only needed at build time. -# Build arguments and variables set to change how tests are run, tracelog levels, -# and Network to be used (Mainnet or Testnet) +# Build arguments and variables set for tracelog levels and debug information # # We set defaults to all variables. ARG RUST_BACKTRACE -ENV RUST_BACKTRACE=${RUST_BACKTRACE:-0} +ENV RUST_BACKTRACE=${RUST_BACKTRACE:-1} ARG RUST_LIB_BACKTRACE -ENV RUST_LIB_BACKTRACE=${RUST_LIB_BACKTRACE:-0} +ENV RUST_LIB_BACKTRACE=${RUST_LIB_BACKTRACE:-1} ARG COLORBT_SHOW_HIDDEN -ENV COLORBT_SHOW_HIDDEN=${COLORBT_SHOW_HIDDEN:-0} - -ARG RUST_LOG -ENV RUST_LOG=${RUST_LOG:-info} - -# Skip IPv6 tests by default, as some CI environment don't have IPv6 available -ARG ZEBRA_SKIP_IPV6_TESTS -ENV ZEBRA_SKIP_IPV6_TESTS=${ZEBRA_SKIP_IPV6_TESTS:-1} - -# Build zebrad with these features -# Keep these in sync with: -# https://github.com/ZcashFoundation/zebra/blob/main/.github/workflows/build-docker-image.yml#L42 -ARG FEATURES="default-release-binaries" -ARG TEST_FEATURES="lightwalletd-grpc-tests zebra-checkpoints" -# Use ENTRYPOINT_FEATURES to override the specific features used to run tests in entrypoint.sh, -# separately from the test and production image builds. -ENV ENTRYPOINT_FEATURES="$TEST_FEATURES $FEATURES" +ENV COLORBT_SHOW_HIDDEN=${COLORBT_SHOW_HIDDEN:-1} -# Use default network value if none is provided -ARG NETWORK -ENV NETWORK=${NETWORK:-Mainnet} +ARG SHORT_SHA +ENV SHORT_SHA=${SHORT_SHA:-unknown} ENV CARGO_HOME="/opt/zebrad/.cargo/" @@ -108,6 +98,16 @@ COPY --from=us-docker.pkg.dev/zealous-zebra/zebra/lightwalletd /opt/lightwalletd # unmodified source files < previous build cache < modified source files COPY . . +# Skip IPv6 tests by default, as some CI environment don't have IPv6 available +ARG ZEBRA_SKIP_IPV6_TESTS +ENV ZEBRA_SKIP_IPV6_TESTS=${ZEBRA_SKIP_IPV6_TESTS:-1} + +# Use ENTRYPOINT_FEATURES to override the specific features used to run tests in entrypoint.sh, +# separately from the test and production image builds. +ARG FEATURES +ARG TEST_FEATURES +ARG ENTRYPOINT_FEATURES="${FEATURES} ${TEST_FEATURES}" + # Re-hydrate the minimum project skeleton identified by `cargo chef prepare` in the planner stage, # over the top of the original source files, # and build it to cache all possible sentry and test dependencies. @@ -116,7 +116,7 @@ COPY . . # It creates fake empty test binaries so dependencies are built, but Zebra is not fully built. # # TODO: add --locked when cargo-chef supports it -RUN cargo chef cook --tests --release --features "${TEST_FEATURES} ${FEATURES}" --workspace --recipe-path recipe.json +RUN cargo chef cook --tests --release --features "${ENTRYPOINT_FEATURES}" --workspace --recipe-path recipe.json # Undo the source file changes made by cargo-chef. # rsync invalidates the cargo cache for the changed files only, by updating their timestamps. @@ -126,13 +126,16 @@ RUN rsync --recursive --checksum --itemize-changes --verbose zebra-original/ . RUN rm -r zebra-original # Build Zebra test binaries, but don't run them -RUN cargo test --locked --release --features "${TEST_FEATURES} ${FEATURES}" --workspace --no-run +RUN cargo test --locked --release --features "${ENTRYPOINT_FEATURES}" --workspace --no-run RUN cp /opt/zebrad/target/release/zebrad /usr/local/bin RUN cp /opt/zebrad/target/release/zebra-checkpoints /usr/local/bin COPY ./docker/entrypoint.sh / RUN chmod u+x /entrypoint.sh +# Entrypoint environment variables +ENV ENTRYPOINT_FEATURES=${ENTRYPOINT_FEATURES} + # By default, runs the entrypoint tests specified by the environmental variables (if any are set) ENTRYPOINT [ "/entrypoint.sh" ] @@ -145,6 +148,8 @@ FROM deps AS release COPY . . +ARG FEATURES + # This is the caching layer for Rust zebrad builds. # It creates a fake empty zebrad binary, see above for details. # @@ -175,10 +180,9 @@ RUN apt-get update && \ apt-get install -y --no-install-recommends \ ca-certificates -# Config settings - -ARG NETWORK -ENV NETWORK=${NETWORK:-Mainnet} +# Config settings for zebrad +ARG FEATURES +ENV FEATURES=${FEATURES} # Expose configured ports EXPOSE 8233 18233 diff --git a/docker/entrypoint.sh b/docker/entrypoint.sh index 653122caf43..9afb9b78e23 100755 --- a/docker/entrypoint.sh +++ b/docker/entrypoint.sh @@ -7,8 +7,9 @@ set -e # exit if any command in a pipeline fails set -o pipefail -# TODO: expand this section if needed (#4363) +: "${NETWORK:=Mainnet}" echo "Test variables:" +echo "NETWORK=$NETWORK" echo "ZEBRA_TEST_LIGHTWALLETD=$ZEBRA_TEST_LIGHTWALLETD" echo "Hard-coded Zebra full sync directory: /zebrad-cache" echo "ZEBRA_CACHED_STATE_DIR=$ZEBRA_CACHED_STATE_DIR" diff --git a/docker/runtime-entrypoint.sh b/docker/runtime-entrypoint.sh index c111579aac7..613619795f5 100755 --- a/docker/runtime-entrypoint.sh +++ b/docker/runtime-entrypoint.sh @@ -68,7 +68,7 @@ endpoint_addr = "${METRICS_ENDPOINT_ADDR}:${METRICS_ENDPOINT_PORT}" EOF fi -if [[ "${RPC_PORT}" ]]; then +if [[ -n "${RPC_PORT}" ]]; then cat <> "${ZEBRA_CONF_PATH}" [rpc] listen_addr = "${RPC_LISTEN_ADDR}:${RPC_PORT}" From e2f010eb4fd46e407bef7c6b2ff692c90d50392b Mon Sep 17 00:00:00 2001 From: Alfredo Garcia Date: Mon, 17 Jul 2023 19:06:27 -0300 Subject: [PATCH 217/265] build(deps): update ecc dependencies for `zcashd` 5.6.0, and create legacy state format compatibility layer (#7053) * update ecc dependencies no serialization * use zcash_primitives merke tree functions for serialization/deserialization in orchard and sapling trees * some more work in sapling/orchard serialization/deserialization * clippy * fix doc links * fix missing doc * make orchard trees serializa/deserialize as they were before upgrade * make sapling trees serialize/deserialize as they were before upgrade * use legacy for sprout * remove unused code * readd snapshot tests * repalce some code * upgrade zcash_proofs * remove legacy code for sprout * fix the count method * add root to serialize * fixes in as_bytes and from_bytes * use legacy code * add todo about pow2 tests * remove unused sprout code * fix doc typos * Add a recalculate_root() method to trees for tests * Rename test tree types to make them easier to change * Add TODOs for tests for old and new serialization formats * fix doc typos * add more test to note commitment trees * fix comment * fix leaf serializatiuon, reverse to old hashes in pow2 tests * fix serialization * put sapling SerializedTree code back * put orchard SerializedTree code back * clippy * add duplicated dependencies until zebra_script updates * fix a doc link * minor cleanup * remove todo comment from tests * add one more check to tests * update zebra_script * update deny.toml * replace custom function with library * fix some tests * update docs * Remove duplicate dependencies from deny.toml Based on the list at: https://github.com/ZcashFoundation/zebra/actions/runs/5557139662/jobs/10150543673?pr=7053#step:5:14 --------- Co-authored-by: teor --- Cargo.lock | 199 ++++++------ deny.toml | 14 +- zebra-chain/Cargo.toml | 11 +- zebra-chain/src/orchard/tree.rs | 54 ++-- zebra-chain/src/orchard/tree/legacy.rs | 122 ++++++++ zebra-chain/src/sapling/tree.rs | 79 ++--- zebra-chain/src/sapling/tree/legacy.rs | 125 ++++++++ zebra-chain/src/sprout/tree.rs | 29 +- zebra-chain/src/sprout/tree/legacy.rs | 121 +++++++ zebra-consensus/Cargo.toml | 4 +- zebra-rpc/Cargo.toml | 2 +- zebra-script/Cargo.toml | 2 +- .../service/finalized_state/tests/vectors.rs | 296 ++++++++++-------- 13 files changed, 718 insertions(+), 340 deletions(-) create mode 100644 zebra-chain/src/orchard/tree/legacy.rs create mode 100644 zebra-chain/src/sapling/tree/legacy.rs create mode 100644 zebra-chain/src/sprout/tree/legacy.rs diff --git a/Cargo.lock b/Cargo.lock index 8f52415c561..463aaf2d59f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -77,17 +77,6 @@ dependencies = [ "cpufeatures", ] -[[package]] -name = "ahash" -version = "0.7.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcb51a0695d8f838b1ee009b3fbf66bda078cd64590202a864a8f3e8c4315c47" -dependencies = [ - "getrandom 0.2.10", - "once_cell", - "version_check", -] - [[package]] name = "ahash" version = "0.8.3" @@ -550,12 +539,12 @@ dependencies = [ ] [[package]] -name = "bs58" -version = "0.4.0" +name = "bridgetree" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "771fe0050b883fcc3ea2359b1a96bcfbc090b7116eae7c3c512c7a083fdf23d3" +checksum = "3a813dadc684e4c78a4547757debd99666282545d90e4ccc3210913ed4337ad2" dependencies = [ - "sha2 0.9.9", + "incrementalmerkletree", ] [[package]] @@ -1072,9 +1061,9 @@ dependencies = [ [[package]] name = "cxx" -version = "1.0.94" +version = "1.0.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f61f1b6389c3fe1c316bf8a4dccc90a38208354b330925bce1f74a6c4756eb93" +checksum = "109308c20e8445959c2792e81871054c6a17e6976489a93d2769641a2ba5839c" dependencies = [ "cc", "cxxbridge-flags", @@ -1096,15 +1085,15 @@ dependencies = [ [[package]] name = "cxxbridge-flags" -version = "1.0.94" +version = "1.0.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7944172ae7e4068c533afbb984114a56c46e9ccddda550499caa222902c7f7bb" +checksum = "882074421238e84fe3b4c65d0081de34e5b323bf64555d3e61991f76eb64a7bb" [[package]] name = "cxxbridge-macro" -version = "1.0.94" +version = "1.0.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2345488264226bf682893e25de0769f3360aac9957980ec49361b083ddaa5bc5" +checksum = "4a076022ece33e7686fb76513518e219cca4fce5750a8ae6d1ce6c0f48fd1af9" dependencies = [ "proc-macro2 1.0.63", "quote 1.0.29", @@ -1211,15 +1200,6 @@ dependencies = [ "subtle", ] -[[package]] -name = "directories" -version = "5.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a49173b84e034382284f27f1af4dcbbd231ffa358c0fe316541a7337f376a35" -dependencies = [ - "dirs-sys", -] - [[package]] name = "dirs" version = "5.0.1" @@ -1764,7 +1744,7 @@ version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "43a3c133739dddd0d2990f9a4bdf8eb4b21ef50e4851ca85ab661199821d510e" dependencies = [ - "ahash 0.8.3", + "ahash", ] [[package]] @@ -1773,7 +1753,7 @@ version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2c6201b9ff9fd90a5a3bac2e56a830d0caa509576f0e503818ee82c181b3437a" dependencies = [ - "ahash 0.8.3", + "ahash", "allocator-api2", ] @@ -1792,14 +1772,15 @@ dependencies = [ [[package]] name = "hdwallet" -version = "0.3.1" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cd89bf343be18dbe1e505100e48168bbd084760e842a8fed0317d2361470193" +checksum = "5a03ba7d4c9ea41552cd4351965ff96883e629693ae85005c501bb4b9e1c48a7" dependencies = [ "lazy_static", "rand_core 0.6.4", "ring", - "secp256k1", + "secp256k1 0.26.0", + "thiserror", ] [[package]] @@ -1850,6 +1831,15 @@ dependencies = [ "digest 0.10.7", ] +[[package]] +name = "home" +version = "0.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5444c27eef6923071f7ebcc33e3444508466a76f7a2b93da00ed6e19f30c1ddb" +dependencies = [ + "windows-sys 0.48.0", +] + [[package]] name = "hostname" version = "0.3.1" @@ -2045,11 +2035,11 @@ dependencies = [ [[package]] name = "incrementalmerkletree" -version = "0.3.1" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d5ad43a3f5795945459d577f6589cf62a476e92c79b75e70cd954364e14ce17b" +checksum = "2eb91780c91bfc79769006a55c49127b83e1c1a6cf2b3b149ce3f247cbe342f0" dependencies = [ - "serde", + "either", ] [[package]] @@ -2089,7 +2079,7 @@ dependencies = [ "console", "instant", "number_prefix", - "portable-atomic 1.3.3", + "portable-atomic", "unicode-width", ] @@ -2099,7 +2089,7 @@ version = "0.11.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2fb7c1b80a1dfa604bb4a649a5c5aeef3d913f7c520cb42b40e534e8a61bcdfc" dependencies = [ - "ahash 0.8.3", + "ahash", "is-terminal", "itoa", "log", @@ -2291,6 +2281,15 @@ dependencies = [ "subtle", ] +[[package]] +name = "known-folders" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b6f1427d9c43b1cce87434c4d9eca33f43bdbb6246a762aa823a582f74c1684" +dependencies = [ + "windows-sys 0.48.0", +] + [[package]] name = "lazy_static" version = "1.4.0" @@ -2478,26 +2477,15 @@ dependencies = [ "nonempty", ] -[[package]] -name = "metrics" -version = "0.20.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b9b8653cec6897f73b519a43fba5ee3d50f62fe9af80b428accdcc093b4a849" -dependencies = [ - "ahash 0.7.6", - "metrics-macros 0.6.0", - "portable-atomic 0.3.20", -] - [[package]] name = "metrics" version = "0.21.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fde3af1a009ed76a778cb84fdef9e7dbbdf5775ae3e4cc1f434a6a307f6f76c5" dependencies = [ - "ahash 0.8.3", - "metrics-macros 0.7.0", - "portable-atomic 1.3.3", + "ahash", + "metrics-macros", + "portable-atomic", ] [[package]] @@ -2510,24 +2498,13 @@ dependencies = [ "hyper", "indexmap 1.9.3", "ipnet", - "metrics 0.21.1", + "metrics", "metrics-util", "quanta", "thiserror", "tokio", ] -[[package]] -name = "metrics-macros" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "731f8ecebd9f3a4aa847dfe75455e4757a45da40a7793d2f0b1f9b6ed18b23f3" -dependencies = [ - "proc-macro2 1.0.63", - "quote 1.0.29", - "syn 1.0.109", -] - [[package]] name = "metrics-macros" version = "0.7.0" @@ -2548,7 +2525,7 @@ dependencies = [ "crossbeam-epoch", "crossbeam-utils", "hashbrown 0.13.2", - "metrics 0.21.1", + "metrics", "num_cpus", "quanta", "sketches-ddsketch", @@ -2829,9 +2806,9 @@ checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d" [[package]] name = "orchard" -version = "0.4.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c6f418f2c25573923f81a091f38b4b19bc20f6c92b5070fb8f0711e64a2b998" +checksum = "5f4e7a52f510cb8c39e639e662a353adbaf86025478af89ae54a0551f8ca35e2" dependencies = [ "aes", "bitvec", @@ -3156,15 +3133,6 @@ dependencies = [ "universal-hash", ] -[[package]] -name = "portable-atomic" -version = "0.3.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e30165d31df606f5726b090ec7592c308a0eaf61721ff64c9a3018e344a8753e" -dependencies = [ - "portable-atomic 1.3.3", -] - [[package]] name = "portable-atomic" version = "1.3.3" @@ -3911,10 +3879,19 @@ version = "0.21.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c42e6f1735c5f00f51e43e28d6634141f2bcad10931b2609ddd74a86d751260" dependencies = [ - "secp256k1-sys", + "secp256k1-sys 0.4.2", "serde", ] +[[package]] +name = "secp256k1" +version = "0.26.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4124a35fe33ae14259c490fd70fa199a32b9ce9502f2ee6bc4f81ec06fa65894" +dependencies = [ + "secp256k1-sys 0.8.1", +] + [[package]] name = "secp256k1-sys" version = "0.4.2" @@ -3924,6 +3901,15 @@ dependencies = [ "cc", ] +[[package]] +name = "secp256k1-sys" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70a129b9e9efbfb223753b9163c4ab3b13cff7fd9c7f010fbac25ab4099fa07e" +dependencies = [ + "cc", +] + [[package]] name = "secrecy" version = "0.8.0" @@ -5538,6 +5524,15 @@ dependencies = [ "zeroize", ] +[[package]] +name = "xdg" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "688597db5a750e9cad4511cb94729a078e274308099a0382b5b8203bbc767fee" +dependencies = [ + "home", +] + [[package]] name = "yaml-rust" version = "0.4.5" @@ -5549,12 +5544,12 @@ dependencies = [ [[package]] name = "zcash_address" -version = "0.2.1" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52be35a205369d480378646bff9c9fedafd8efe8af1e0e54bb858f405883f2b2" +checksum = "8944af5c206cf2e37020ad54618e1825501b98548d35a638b73e0ec5762df8d5" dependencies = [ "bech32", - "bs58 0.4.0", + "bs58", "f4jumble", "zcash_encoding", ] @@ -5582,9 +5577,9 @@ dependencies = [ [[package]] name = "zcash_note_encryption" -version = "0.3.0" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2eb2149e6cd5fbee36c5b87c601715a8c35554602f7fe84af38b636afa2db318" +checksum = "5b4580cd6cee12e44421dac43169be8d23791650816bdb34e6ddfa70ac89c1c5" dependencies = [ "chacha20", "chacha20poly1305", @@ -5595,9 +5590,9 @@ dependencies = [ [[package]] name = "zcash_primitives" -version = "0.11.0" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "914d2195a478d5b63191584dff126f552751115181857b290211ec88e68acc3e" +checksum = "de1a231e6a58d3dcdd6e21d229db33d7c10f9b54d8c170e122b267f6826bb48f" dependencies = [ "aes", "bip0039", @@ -5621,7 +5616,7 @@ dependencies = [ "rand 0.8.5", "rand_core 0.6.4", "ripemd", - "secp256k1", + "secp256k1 0.26.0", "sha2 0.10.6", "subtle", "zcash_address", @@ -5631,34 +5626,38 @@ dependencies = [ [[package]] name = "zcash_proofs" -version = "0.11.0" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5c8147884952748b00aa443d36511ae2d7b49acfec74cfd39c0959fbb61ef14" +checksum = "59d2e066a717f28451a081f2ebd483ddda896cf00d572972c10979d645ffa6c4" dependencies = [ "bellman", "blake2b_simd", "bls12_381", - "directories", "group", + "home", + "incrementalmerkletree", "jubjub", + "known-folders", "lazy_static", "minreq", "rand_core 0.6.4", "redjubjub", "tracing", + "xdg", "zcash_primitives", ] [[package]] name = "zcash_script" -version = "0.1.12" +version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f5d794b254efc2759d249b477f53faa751f67543a4b4d1c7a5ff7df212d4ba5" +checksum = "8c4f95043fd34d402b8d5debb0e54a28c2b84fc99591f5973b4999e9c5b01bfd" dependencies = [ "bellman", "bindgen", "blake2b_simd", "bls12_381", + "bridgetree", "byteorder", "cc", "crossbeam-channel", @@ -5669,8 +5668,9 @@ dependencies = [ "jubjub", "libc", "memuse", - "metrics 0.20.1", + "metrics", "orchard", + "rand 0.8.5", "rand_core 0.6.4", "rayon", "subtle", @@ -5692,7 +5692,8 @@ dependencies = [ "bitvec", "blake2b_simd", "blake2s_simd", - "bs58 0.5.0", + "bridgetree", + "bs58", "byteorder", "chrono", "color-eyre", @@ -5721,7 +5722,7 @@ dependencies = [ "reddsa", "redjubjub", "ripemd", - "secp256k1", + "secp256k1 0.21.3", "serde", "serde-big-array", "serde_json", @@ -5760,7 +5761,7 @@ dependencies = [ "howudoin", "jubjub", "lazy_static", - "metrics 0.21.1", + "metrics", "num-integer", "once_cell", "orchard", @@ -5804,7 +5805,7 @@ dependencies = [ "indexmap 2.0.0", "itertools 0.11.0", "lazy_static", - "metrics 0.21.1", + "metrics", "num-integer", "ordered-map", "pin-project", @@ -5904,7 +5905,7 @@ dependencies = [ "itertools 0.11.0", "jubjub", "lazy_static", - "metrics 0.21.1", + "metrics", "mset", "once_cell", "proptest", @@ -5998,7 +5999,7 @@ dependencies = [ "jsonrpc-core", "lazy_static", "log", - "metrics 0.21.1", + "metrics", "metrics-exporter-prometheus", "num-integer", "once_cell", diff --git a/deny.toml b/deny.toml index 2e8e9738c7d..7e932dbf08e 100644 --- a/deny.toml +++ b/deny.toml @@ -70,26 +70,24 @@ skip-tree = [ # wait for zcashd and zcash_script to upgrade # https://github.com/ZcashFoundation/zcash_script/pulls - { name = "metrics", version = "=0.20.1" }, { name = "sha2", version = "=0.9.9" }, - # wait for ed25519-zebra, indexmap, metrics-util, and metrics to upgrade - # ed25519-zebra/hashbrown: https://github.com/ZcashFoundation/ed25519-zebra/pull/65 - { name = "ahash", version = "=0.7.6" }, - # wait for indexmap, toml_edit, serde_json, tower to upgrade { name = "hashbrown", version = "=0.12.3" }, # wait for metrics-exporter-prometheus to upgrade { name = "hashbrown", version = "=0.13.2" }, + # wait for zebra-chain to upgrade + { name = "secp256k1", version = "=0.21.3" }, + + # wait for zebra-chain to upgrade `secp256k1` + { name = "secp256k1-sys", version = "=0.4.2" }, + # ECC crates # wait for zcash_primitives to remove duplicated dependencies { name = "block-buffer", version = "=0.9.0" }, - # wait for zcash_address to upgrade - { name = "bs58", version = "=0.4.0" }, - # wait for minreq and zcash_proofs to upgrade { name = "rustls", version = "=0.20.8" }, diff --git a/zebra-chain/Cargo.toml b/zebra-chain/Cargo.toml index 6c200b32f80..d003b92b9b5 100644 --- a/zebra-chain/Cargo.toml +++ b/zebra-chain/Cargo.toml @@ -53,11 +53,12 @@ bitflags = "2.3.3" bitflags-serde-legacy = "0.1.1" blake2b_simd = "1.0.1" blake2s_simd = "1.0.1" +bridgetree = "0.3.0" bs58 = { version = "0.5.0", features = ["check"] } byteorder = "1.4.3" equihash = "0.2.0" group = "0.13.0" -incrementalmerkletree = "0.3.1" +incrementalmerkletree = "0.4.0" jubjub = "0.10.0" lazy_static = "1.4.0" num-integer = "0.1.45" @@ -72,11 +73,11 @@ x25519-dalek = { version = "2.0.0-rc.3", features = ["serde"] } # ECC deps halo2 = { package = "halo2_proofs", version = "0.3.0" } -orchard = "0.4.0" +orchard = "0.5.0" zcash_encoding = "0.2.0" zcash_history = "0.3.0" -zcash_note_encryption = "0.3.0" -zcash_primitives = { version = "0.11.0", features = ["transparent-inputs"] } +zcash_note_encryption = "0.4.0" +zcash_primitives = { version = "0.12.0", features = ["transparent-inputs"] } # Time chrono = { version = "0.4.26", default-features = false, features = ["clock", "std", "serde"] } @@ -108,7 +109,7 @@ reddsa = "0.5.0" serde_json = { version = "1.0.100", optional = true } # Experimental feature getblocktemplate-rpcs -zcash_address = { version = "0.2.1", optional = true } +zcash_address = { version = "0.3.0", optional = true } # Optional testing dependencies proptest = { version = "1.2.0", optional = true } diff --git a/zebra-chain/src/orchard/tree.rs b/zebra-chain/src/orchard/tree.rs index c212033ac12..9862bd8f7fb 100644 --- a/zebra-chain/src/orchard/tree.rs +++ b/zebra-chain/src/orchard/tree.rs @@ -18,11 +18,12 @@ use std::{ }; use bitvec::prelude::*; +use bridgetree; use halo2::pasta::{group::ff::PrimeField, pallas}; -use incrementalmerkletree::{bridgetree, Frontier}; +use incrementalmerkletree::Hashable; use lazy_static::lazy_static; use thiserror::Error; -use zcash_primitives::merkle_tree::{self, CommitmentTree}; +use zcash_primitives::merkle_tree::{write_commitment_tree, HashSer}; use super::sinsemilla::*; @@ -30,6 +31,9 @@ use crate::serialization::{ serde_helpers, ReadZcashExt, SerializationError, ZcashDeserialize, ZcashSerialize, }; +pub mod legacy; +use legacy::LegacyNoteCommitmentTree; + /// The type that is used to update the note commitment tree. /// /// Unfortunately, this is not the same as `orchard::NoteCommitment`. @@ -164,18 +168,18 @@ impl ZcashDeserialize for Root { /// A node of the Orchard Incremental Note Commitment Tree. #[derive(Copy, Clone, Debug, Eq, PartialEq)] -struct Node(pallas::Base); +pub struct Node(pallas::Base); /// Required to convert [`NoteCommitmentTree`] into [`SerializedTree`]. /// /// Zebra stores Orchard note commitment trees as [`Frontier`][1]s while the /// [`z_gettreestate`][2] RPC requires [`CommitmentTree`][3]s. Implementing -/// [`merkle_tree::Hashable`] for [`Node`]s allows the conversion. +/// [`HashSer`] for [`Node`]s allows the conversion. /// /// [1]: bridgetree::Frontier /// [2]: https://zcash.github.io/rpc/z_gettreestate.html -/// [3]: merkle_tree::CommitmentTree -impl merkle_tree::Hashable for Node { +/// [3]: incrementalmerkletree::frontier::CommitmentTree +impl HashSer for Node { fn read(mut reader: R) -> io::Result { let mut repr = [0u8; 32]; reader.read_exact(&mut repr)?; @@ -192,24 +196,9 @@ impl merkle_tree::Hashable for Node { fn write(&self, mut writer: W) -> io::Result<()> { writer.write_all(&self.0.to_repr()) } - - fn combine(level: usize, a: &Self, b: &Self) -> Self { - let level = u8::try_from(level).expect("level must fit into u8"); - let layer = MERKLE_DEPTH - 1 - level; - Self(merkle_crh_orchard(layer, a.0, b.0)) - } - - fn blank() -> Self { - Self(NoteCommitmentTree::uncommitted()) - } - - fn empty_root(level: usize) -> Self { - let layer_below = usize::from(MERKLE_DEPTH) - level; - Self(EMPTY_ROOTS[layer_below]) - } } -impl incrementalmerkletree::Hashable for Node { +impl Hashable for Node { fn empty_leaf() -> Self { Self(NoteCommitmentTree::uncommitted()) } @@ -217,13 +206,13 @@ impl incrementalmerkletree::Hashable for Node { /// Combine two nodes to generate a new node in the given level. /// Level 0 is the layer above the leaves (layer 31). /// Level 31 is the root (layer 0). - fn combine(level: incrementalmerkletree::Altitude, a: &Self, b: &Self) -> Self { + fn combine(level: incrementalmerkletree::Level, a: &Self, b: &Self) -> Self { let layer = MERKLE_DEPTH - 1 - u8::from(level); Self(merkle_crh_orchard(layer, a.0, b.0)) } /// Return the node for the level below the given level. (A quirk of the API) - fn empty_root(level: incrementalmerkletree::Altitude) -> Self { + fn empty_root(level: incrementalmerkletree::Level) -> Self { let layer_below = usize::from(MERKLE_DEPTH) - usize::from(level); Self(EMPTY_ROOTS[layer_below]) } @@ -265,6 +254,8 @@ pub enum NoteCommitmentTreeError { /// Orchard Incremental Note Commitment Tree #[derive(Debug, Serialize, Deserialize)] +#[serde(into = "LegacyNoteCommitmentTree")] +#[serde(from = "LegacyNoteCommitmentTree")] pub struct NoteCommitmentTree { /// The tree represented as a Frontier. /// @@ -311,7 +302,7 @@ impl NoteCommitmentTree { /// Returns an error if the tree is full. #[allow(clippy::unwrap_in_result)] pub fn append(&mut self, cm_x: NoteCommitmentUpdate) -> Result<(), NoteCommitmentTreeError> { - if self.inner.append(&cm_x.into()) { + if self.inner.append(cm_x.into()) { // Invalidate cached root let cached_root = self .cached_root @@ -385,7 +376,9 @@ impl NoteCommitmentTree { /// /// For Orchard, the tree is capped at 2^32. pub fn count(&self) -> u64 { - self.inner.position().map_or(0, |pos| u64::from(pos) + 1) + self.inner + .value() + .map_or(0, |x| u64::from(x.position()) + 1) } /// Checks if the tree roots and inner data structures of `self` and `other` are equal. @@ -459,7 +452,7 @@ impl From> for NoteCommitmentTree { /// A serialized Orchard note commitment tree. /// /// The format of the serialized data is compatible with -/// [`CommitmentTree`](merkle_tree::CommitmentTree) from `librustzcash` and not +/// [`CommitmentTree`](incrementalmerkletree::frontier::CommitmentTree) from `librustzcash` and not /// with [`Frontier`](bridgetree::Frontier) from the crate /// [`incrementalmerkletree`]. Zebra follows the former format in order to stay /// consistent with `zcashd` in RPCs. Note that [`NoteCommitmentTree`] itself is @@ -468,7 +461,7 @@ impl From> for NoteCommitmentTree { /// The formats are semantically equivalent. The primary difference between them /// is that in [`Frontier`](bridgetree::Frontier), the vector of parents is /// dense (we know where the gaps are from the position of the leaf in the -/// overall tree); whereas in [`CommitmentTree`](merkle_tree::CommitmentTree), +/// overall tree); whereas in [`CommitmentTree`](incrementalmerkletree::frontier::CommitmentTree), /// the vector of parent hashes is sparse with [`None`] values in the gaps. /// /// The sparse format, used in this implementation, allows representing invalid @@ -498,8 +491,9 @@ impl From<&NoteCommitmentTree> for SerializedTree { // Convert the note commitment tree from // [`Frontier`](bridgetree::Frontier) to // [`CommitmentTree`](merkle_tree::CommitmentTree). - let tree = CommitmentTree::from_frontier(&tree.inner); - tree.write(&mut serialized_tree) + let tree = incrementalmerkletree::frontier::CommitmentTree::from_frontier(&tree.inner); + + write_commitment_tree(&tree, &mut serialized_tree) .expect("note commitment tree should be serializable"); Self(serialized_tree) } diff --git a/zebra-chain/src/orchard/tree/legacy.rs b/zebra-chain/src/orchard/tree/legacy.rs new file mode 100644 index 00000000000..b4d97cf48d1 --- /dev/null +++ b/zebra-chain/src/orchard/tree/legacy.rs @@ -0,0 +1,122 @@ +//! Orchard serialization legacy code. +//! +//! We create a [`LegacyNoteCommitmentTree`] which is a copy of [`NoteCommitmentTree`] but where serialization and +//! deserialization can be derived. +//! To do this we create a [`LegacyFrontier`] which is a legacy `Frontier` structure that can be found in [1], +//! In order to make [`LegacyFrontier`] serializable we also have our own versions of `NonEmptyFrontier` ([`LegacyNonEmptyFrontier`]), +//! `Leaf`([`LegacyLeaf`]) and `Position`([`LegacyPosition`]) that can be found in [1] or [2]. +//! +//! Conversions methods to/from [`LegacyNoteCommitmentTree`] to/from [`NoteCommitmentTree`] are defined also in this file. +//! +//! [1]: https://github.com/zcash/incrementalmerkletree/blob/incrementalmerkletree-v0.3.1/src/bridgetree.rs +//! [2]: https://github.com/zcash/incrementalmerkletree/blob/incrementalmerkletree-v0.3.1/src/lib.rs + +use incrementalmerkletree::{frontier::Frontier, Position}; + +use super::{Node, NoteCommitmentTree, Root, MERKLE_DEPTH}; + +/// A legacy version of [`NoteCommitmentTree`]. +#[derive(Debug, Serialize, Deserialize)] +#[serde(rename = "NoteCommitmentTree")] +#[allow(missing_docs)] +pub struct LegacyNoteCommitmentTree { + pub inner: LegacyFrontier, + cached_root: std::sync::RwLock>, +} + +impl From for LegacyNoteCommitmentTree { + fn from(nct: NoteCommitmentTree) -> Self { + LegacyNoteCommitmentTree { + inner: nct.inner.into(), + cached_root: nct.cached_root, + } + } +} + +impl From for NoteCommitmentTree { + fn from(legacy_nct: LegacyNoteCommitmentTree) -> Self { + NoteCommitmentTree { + inner: legacy_nct.inner.into(), + cached_root: legacy_nct.cached_root, + } + } +} + +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename = "Frontier")] +#[allow(missing_docs)] +pub struct LegacyFrontier { + frontier: Option>, +} + +impl From> for Frontier { + fn from(legacy_frontier: LegacyFrontier) -> Self { + if let Some(legacy_frontier_data) = legacy_frontier.frontier { + let mut ommers = legacy_frontier_data.ommers; + let position = Position::from( + u64::try_from(legacy_frontier_data.position.0) + .expect("old `usize` always fits in `u64`"), + ); + let leaf = match legacy_frontier_data.leaf { + LegacyLeaf::Left(a) => a, + LegacyLeaf::Right(a, b) => { + ommers.insert(0, a); + b + } + }; + Frontier::from_parts( + position, + leaf, + ommers, + ) + .expect("We should be able to construct a frontier from parts given legacy frontier is not empty") + } else { + Frontier::empty() + } + } +} + +impl From> for LegacyFrontier { + fn from(frontier: Frontier) -> Self { + if let Some(frontier_data) = frontier.value() { + let leaf_from_frontier = *frontier_data.leaf(); + let mut leaf = LegacyLeaf::Left(leaf_from_frontier); + let mut ommers = frontier_data.ommers().to_vec(); + let position = usize::try_from(u64::from(frontier_data.position())) + .expect("new position should fit in a `usize`"); + if frontier_data.position().is_odd() { + let left = ommers.remove(0); + leaf = LegacyLeaf::Right(left, leaf_from_frontier); + } + LegacyFrontier { + frontier: Some(LegacyNonEmptyFrontier { + position: LegacyPosition(position), + leaf, + ommers: ommers.to_vec(), + }), + } + } else { + LegacyFrontier { frontier: None } + } + } +} + +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename = "NonEmptyFrontier")] +struct LegacyNonEmptyFrontier { + position: LegacyPosition, + leaf: LegacyLeaf, + ommers: Vec, +} + +/// A set of leaves of a Merkle tree. +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename = "Leaf")] +enum LegacyLeaf { + Left(A), + Right(A, A), +} + +#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] +#[repr(transparent)] +struct LegacyPosition(usize); diff --git a/zebra-chain/src/sapling/tree.rs b/zebra-chain/src/sapling/tree.rs index 06029731108..ea045e325ab 100644 --- a/zebra-chain/src/sapling/tree.rs +++ b/zebra-chain/src/sapling/tree.rs @@ -18,14 +18,13 @@ use std::{ }; use bitvec::prelude::*; -use incrementalmerkletree::{ - bridgetree::{self, Leaf}, - Frontier, -}; +use bridgetree::{self}; +use incrementalmerkletree::{frontier::Frontier, Hashable}; + use lazy_static::lazy_static; use thiserror::Error; use zcash_encoding::{Optional, Vector}; -use zcash_primitives::merkle_tree::{self, Hashable}; +use zcash_primitives::merkle_tree::HashSer; use super::commitment::pedersen_hashes::pedersen_hash; @@ -33,6 +32,9 @@ use crate::serialization::{ serde_helpers, ReadZcashExt, SerializationError, ZcashDeserialize, ZcashSerialize, }; +pub mod legacy; +use legacy::{LegacyLeaf, LegacyNoteCommitmentTree}; + /// The type that is used to update the note commitment tree. /// /// Unfortunately, this is not the same as `sapling::NoteCommitment`. @@ -85,12 +87,6 @@ lazy_static! { }; } -/// The index of a note's commitment at the leafmost layer of its Note -/// Commitment Tree. -/// -/// -pub struct Position(pub(crate) u64); - /// Sapling note commitment tree root node hash. /// /// The root hash in LEBS2OSP256(rt) encoding of the Sapling note @@ -167,7 +163,7 @@ impl ZcashDeserialize for Root { /// Note that it's handled as a byte buffer and not a point coordinate (jubjub::Fq) /// because that's how the spec handles the MerkleCRH^Sapling function inputs and outputs. #[derive(Copy, Clone, Eq, PartialEq)] -struct Node([u8; 32]); +pub struct Node([u8; 32]); impl fmt::Debug for Node { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { @@ -179,12 +175,12 @@ impl fmt::Debug for Node { /// /// Zebra stores Sapling note commitment trees as [`Frontier`][1]s while the /// [`z_gettreestate`][2] RPC requires [`CommitmentTree`][3]s. Implementing -/// [`merkle_tree::Hashable`] for [`Node`]s allows the conversion. +/// [`incrementalmerkletree::Hashable`] for [`Node`]s allows the conversion. /// /// [1]: bridgetree::Frontier /// [2]: https://zcash.github.io/rpc/z_gettreestate.html -/// [3]: merkle_tree::CommitmentTree -impl merkle_tree::Hashable for Node { +/// [3]: incrementalmerkletree::frontier::CommitmentTree +impl HashSer for Node { fn read(mut reader: R) -> io::Result { let mut node = [0u8; 32]; reader.read_exact(&mut node)?; @@ -194,24 +190,9 @@ impl merkle_tree::Hashable for Node { fn write(&self, mut writer: W) -> io::Result<()> { writer.write_all(self.0.as_ref()) } - - fn combine(level: usize, a: &Self, b: &Self) -> Self { - let level = u8::try_from(level).expect("level must fit into u8"); - let layer = MERKLE_DEPTH - 1 - level; - Self(merkle_crh_sapling(layer, a.0, b.0)) - } - - fn blank() -> Self { - Self(NoteCommitmentTree::uncommitted()) - } - - fn empty_root(level: usize) -> Self { - let layer_below = usize::from(MERKLE_DEPTH) - level; - Self(EMPTY_ROOTS[layer_below]) - } } -impl incrementalmerkletree::Hashable for Node { +impl Hashable for Node { fn empty_leaf() -> Self { Self(NoteCommitmentTree::uncommitted()) } @@ -219,13 +200,13 @@ impl incrementalmerkletree::Hashable for Node { /// Combine two nodes to generate a new node in the given level. /// Level 0 is the layer above the leaves (layer 31). /// Level 31 is the root (layer 0). - fn combine(level: incrementalmerkletree::Altitude, a: &Self, b: &Self) -> Self { + fn combine(level: incrementalmerkletree::Level, a: &Self, b: &Self) -> Self { let layer = MERKLE_DEPTH - 1 - u8::from(level); Self(merkle_crh_sapling(layer, a.0, b.0)) } /// Return the node for the level below the given level. (A quirk of the API) - fn empty_root(level: incrementalmerkletree::Altitude) -> Self { + fn empty_root(level: incrementalmerkletree::Level) -> Self { let layer_below = usize::from(MERKLE_DEPTH) - usize::from(level); Self(EMPTY_ROOTS[layer_below]) } @@ -267,6 +248,8 @@ pub enum NoteCommitmentTreeError { /// Sapling Incremental Note Commitment Tree. #[derive(Debug, Serialize, Deserialize)] +#[serde(into = "LegacyNoteCommitmentTree")] +#[serde(from = "LegacyNoteCommitmentTree")] pub struct NoteCommitmentTree { /// The tree represented as a [`Frontier`](bridgetree::Frontier). /// @@ -284,7 +267,7 @@ pub struct NoteCommitmentTree { /// /// /// Note: MerkleDepth^Sapling = MERKLE_DEPTH = 32. - inner: bridgetree::Frontier, + inner: Frontier, /// A cached root of the tree. /// @@ -314,7 +297,7 @@ impl NoteCommitmentTree { /// Returns an error if the tree is full. #[allow(clippy::unwrap_in_result)] pub fn append(&mut self, cm_u: NoteCommitmentUpdate) -> Result<(), NoteCommitmentTreeError> { - if self.inner.append(&cm_u.into()) { + if self.inner.append(cm_u.into()) { // Invalidate cached root let cached_root = self .cached_root @@ -388,7 +371,9 @@ impl NoteCommitmentTree { /// /// For Sapling, the tree is capped at 2^32. pub fn count(&self) -> u64 { - self.inner.position().map_or(0, |pos| u64::from(pos) + 1) + self.inner + .value() + .map_or(0, |x| u64::from(x.position()) + 1) } /// Checks if the tree roots and inner data structures of `self` and `other` are equal. @@ -463,7 +448,7 @@ impl From> for NoteCommitmentTree { /// A serialized Sapling note commitment tree. /// /// The format of the serialized data is compatible with -/// [`CommitmentTree`](merkle_tree::CommitmentTree) from `librustzcash` and not +/// [`CommitmentTree`](incrementalmerkletree::frontier::CommitmentTree) from `librustzcash` and not /// with [`Frontier`](bridgetree::Frontier) from the crate /// [`incrementalmerkletree`]. Zebra follows the former format in order to stay /// consistent with `zcashd` in RPCs. Note that [`NoteCommitmentTree`] itself is @@ -472,7 +457,7 @@ impl From> for NoteCommitmentTree { /// The formats are semantically equivalent. The primary difference between them /// is that in [`Frontier`](bridgetree::Frontier), the vector of parents is /// dense (we know where the gaps are from the position of the leaf in the -/// overall tree); whereas in [`CommitmentTree`](merkle_tree::CommitmentTree), +/// overall tree); whereas in [`CommitmentTree`](incrementalmerkletree::frontier::CommitmentTree), /// the vector of parent hashes is sparse with [`None`] values in the gaps. /// /// The sparse format, used in this implementation, allows representing invalid @@ -489,6 +474,9 @@ impl From<&NoteCommitmentTree> for SerializedTree { fn from(tree: &NoteCommitmentTree) -> Self { let mut serialized_tree = vec![]; + // + let legacy_tree = LegacyNoteCommitmentTree::from(tree.clone()); + // Convert the note commitment tree represented as a frontier into the // format compatible with `zcashd`. // @@ -502,20 +490,22 @@ impl From<&NoteCommitmentTree> for SerializedTree { // sparse formats for Sapling. // // [1]: - if let Some(frontier) = tree.inner.value() { - let (left_leaf, right_leaf) = match frontier.leaf() { - Leaf::Left(left_value) => (Some(left_value), None), - Leaf::Right(left_value, right_value) => (Some(left_value), Some(right_value)), + if let Some(frontier) = legacy_tree.inner.frontier { + let (left_leaf, right_leaf) = match frontier.leaf { + LegacyLeaf::Left(left_value) => (Some(left_value), None), + LegacyLeaf::Right(left_value, right_value) => (Some(left_value), Some(right_value)), }; // Ommers are siblings of parent nodes along the branch from the // most recent leaf to the root of the tree. - let mut ommers_iter = frontier.ommers().iter(); + let mut ommers_iter = frontier.ommers.iter(); // Set bits in the binary representation of the position indicate // the presence of ommers along the branch from the most recent leaf // node to the root of the tree, except for the lowest bit. - let mut position: usize = frontier.position().into(); + let mut position: u64 = (frontier.position.0) + .try_into() + .expect("old usize position always fit in u64"); // The lowest bit does not indicate the presence of any ommers. We // clear it so that we can test if there are no set bits left in @@ -552,7 +542,6 @@ impl From<&NoteCommitmentTree> for SerializedTree { } // Serialize the converted note commitment tree. - Optional::write(&mut serialized_tree, left_leaf, |tree, leaf| { leaf.write(tree) }) diff --git a/zebra-chain/src/sapling/tree/legacy.rs b/zebra-chain/src/sapling/tree/legacy.rs new file mode 100644 index 00000000000..0e66e8aedea --- /dev/null +++ b/zebra-chain/src/sapling/tree/legacy.rs @@ -0,0 +1,125 @@ +//! Sapling serialization legacy code. +//! +//! We create a [`LegacyNoteCommitmentTree`] which is a copy of [`NoteCommitmentTree`] but where serialization and +//! deserialization can be derived. +//! To do this we create a [`LegacyFrontier`] which is a legacy `Frontier` structure that can be found in [1], +//! In order to make [`LegacyFrontier`] serializable we also have our own versions of `NonEmptyFrontier` ([`LegacyNonEmptyFrontier`]), +//! `Leaf`([`LegacyLeaf`]) and `Position`([`LegacyPosition`]) that can be found in [1] or [2]. +//! +//! Conversions methods to/from [`LegacyNoteCommitmentTree`] to/from [`NoteCommitmentTree`] are defined also in this file. +//! +//! [1]: https://github.com/zcash/incrementalmerkletree/blob/incrementalmerkletree-v0.3.1/src/bridgetree.rs +//! [2]: https://github.com/zcash/incrementalmerkletree/blob/incrementalmerkletree-v0.3.1/src/lib.rs + +use incrementalmerkletree::{frontier::Frontier, Position}; + +use super::{Node, NoteCommitmentTree, Root, MERKLE_DEPTH}; + +/// A legacy version of [`NoteCommitmentTree`]. +#[derive(Debug, Serialize, Deserialize)] +#[serde(rename = "NoteCommitmentTree")] +#[allow(missing_docs)] +pub struct LegacyNoteCommitmentTree { + pub inner: LegacyFrontier, + cached_root: std::sync::RwLock>, +} + +impl From for LegacyNoteCommitmentTree { + fn from(nct: NoteCommitmentTree) -> Self { + LegacyNoteCommitmentTree { + inner: nct.inner.into(), + cached_root: nct.cached_root, + } + } +} + +impl From for NoteCommitmentTree { + fn from(nct: LegacyNoteCommitmentTree) -> Self { + NoteCommitmentTree { + inner: nct.inner.into(), + cached_root: nct.cached_root, + } + } +} + +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename = "Frontier")] +#[allow(missing_docs)] +pub struct LegacyFrontier { + pub frontier: Option>, +} + +impl From> for Frontier { + fn from(legacy_frontier: LegacyFrontier) -> Self { + if let Some(legacy_frontier_data) = legacy_frontier.frontier { + let mut ommers = legacy_frontier_data.ommers; + let position = Position::from( + u64::try_from(legacy_frontier_data.position.0) + .expect("old `usize` always fits in `u64`"), + ); + let leaf = match legacy_frontier_data.leaf { + LegacyLeaf::Left(a) => a, + LegacyLeaf::Right(a, b) => { + ommers.insert(0, a); + b + } + }; + Frontier::from_parts( + position, + leaf, + ommers, + ) + .expect("We should be able to construct a frontier from parts given legacy frontier is not empty") + } else { + Frontier::empty() + } + } +} + +impl From> for LegacyFrontier { + fn from(frontier: Frontier) -> Self { + if let Some(frontier_data) = frontier.value() { + let leaf_from_frontier = *frontier_data.leaf(); + let mut leaf = LegacyLeaf::Left(leaf_from_frontier); + let mut ommers = frontier_data.ommers().to_vec(); + let position = usize::try_from(u64::from(frontier_data.position())) + .expect("new position should fit in a `usize`"); + if frontier_data.position().is_odd() { + let left = ommers.remove(0); + leaf = LegacyLeaf::Right(left, leaf_from_frontier); + } + LegacyFrontier { + frontier: Some(LegacyNonEmptyFrontier { + position: LegacyPosition(position), + leaf, + ommers: ommers.to_vec(), + }), + } + } else { + LegacyFrontier { frontier: None } + } + } +} + +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename = "NonEmptyFrontier")] +#[allow(missing_docs)] +pub struct LegacyNonEmptyFrontier { + pub position: LegacyPosition, + pub leaf: LegacyLeaf, + pub ommers: Vec, +} + +/// A set of leaves of a Merkle tree. +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename = "Leaf")] +#[allow(missing_docs)] +pub enum LegacyLeaf { + Left(A), + Right(A, A), +} + +#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] +#[repr(transparent)] +#[allow(missing_docs)] +pub struct LegacyPosition(pub usize); diff --git a/zebra-chain/src/sprout/tree.rs b/zebra-chain/src/sprout/tree.rs index af1d964d123..2b70b0a364d 100644 --- a/zebra-chain/src/sprout/tree.rs +++ b/zebra-chain/src/sprout/tree.rs @@ -13,13 +13,16 @@ use std::fmt; use byteorder::{BigEndian, ByteOrder}; -use incrementalmerkletree::{bridgetree, Frontier}; +use incrementalmerkletree::frontier::Frontier; use lazy_static::lazy_static; use sha2::digest::generic_array::GenericArray; use thiserror::Error; use super::commitment::NoteCommitment; +pub mod legacy; +use legacy::LegacyNoteCommitmentTree; + #[cfg(any(test, feature = "proptest-impl"))] use proptest_derive::Arbitrary; @@ -128,7 +131,7 @@ impl From<&Root> for [u8; 32] { /// A node of the Sprout note commitment tree. #[derive(Clone, Copy, Eq, PartialEq)] -struct Node([u8; 32]); +pub struct Node([u8; 32]); impl fmt::Debug for Node { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { @@ -147,12 +150,12 @@ impl incrementalmerkletree::Hashable for Node { /// Note that Sprout does not use the `level` argument. /// /// [MerkleCRH^Sprout]: https://zips.z.cash/protocol/protocol.pdf#sproutmerklecrh - fn combine(_level: incrementalmerkletree::Altitude, a: &Self, b: &Self) -> Self { + fn combine(_level: incrementalmerkletree::Level, a: &Self, b: &Self) -> Self { Self(merkle_crh_sprout(a.0, b.0)) } /// Returns the node for the level below the given level. (A quirk of the API) - fn empty_root(level: incrementalmerkletree::Altitude) -> Self { + fn empty_root(level: incrementalmerkletree::Level) -> Self { let layer_below = usize::from(MERKLE_DEPTH) - usize::from(level); Self(EMPTY_ROOTS[layer_below]) } @@ -200,16 +203,18 @@ pub enum NoteCommitmentTreeError { /// job of this tree to protect against double-spending, as it is append-only; double-spending /// is prevented by maintaining the [nullifier set] for each shielded pool. /// -/// Internally this wraps [`incrementalmerkletree::bridgetree::Frontier`], so that we can maintain and increment +/// Internally this wraps [`bridgetree::Frontier`], so that we can maintain and increment /// the full tree with only the minimal amount of non-empty nodes/leaves required. /// /// [Sprout Note Commitment Tree]: https://zips.z.cash/protocol/protocol.pdf#merkletree /// [nullifier set]: https://zips.z.cash/protocol/protocol.pdf#nullifierset #[derive(Debug, Serialize, Deserialize)] +#[serde(into = "LegacyNoteCommitmentTree")] +#[serde(from = "LegacyNoteCommitmentTree")] pub struct NoteCommitmentTree { - /// The tree represented as a [`incrementalmerkletree::bridgetree::Frontier`]. + /// The tree represented as a [`bridgetree::Frontier`]. /// - /// A [`incrementalmerkletree::Frontier`] is a subset of the tree that allows to fully specify it. It + /// A [`bridgetree::Frontier`] is a subset of the tree that allows to fully specify it. It /// consists of nodes along the rightmost (newer) branch of the tree that /// has non-empty nodes. Upper (near root) empty nodes of the branch are not /// stored. @@ -222,7 +227,7 @@ pub struct NoteCommitmentTree { /// /// /// Note: MerkleDepth^Sprout = MERKLE_DEPTH = 29. - inner: bridgetree::Frontier, + inner: Frontier, /// A cached root of the tree. /// @@ -248,7 +253,7 @@ impl NoteCommitmentTree { /// Returns an error if the tree is full. #[allow(clippy::unwrap_in_result)] pub fn append(&mut self, cm: NoteCommitment) -> Result<(), NoteCommitmentTreeError> { - if self.inner.append(&cm.into()) { + if self.inner.append(cm.into()) { // Invalidate cached root let cached_root = self .cached_root @@ -323,7 +328,9 @@ impl NoteCommitmentTree { /// /// [spec]: https://zips.z.cash/protocol/protocol.pdf#merkletree pub fn count(&self) -> u64 { - self.inner.position().map_or(0, |pos| u64::from(pos) + 1) + self.inner + .value() + .map_or(0, |x| u64::from(x.position()) + 1) } /// Checks if the tree roots and inner data structures of `self` and `other` are equal. @@ -360,7 +367,7 @@ impl Clone for NoteCommitmentTree { impl Default for NoteCommitmentTree { fn default() -> Self { Self { - inner: bridgetree::Frontier::empty(), + inner: Frontier::empty(), cached_root: Default::default(), } } diff --git a/zebra-chain/src/sprout/tree/legacy.rs b/zebra-chain/src/sprout/tree/legacy.rs new file mode 100644 index 00000000000..b11e674bafa --- /dev/null +++ b/zebra-chain/src/sprout/tree/legacy.rs @@ -0,0 +1,121 @@ +//! Sprout serialization legacy code. +//! +//! We create a [`LegacyNoteCommitmentTree`] which is a copy of [`NoteCommitmentTree`] but where serialization and +//! deserialization can be derived. +//! To do this we create a [`LegacyFrontier`] which is a legacy `Frontier` structure that can be found in [1], +//! In order to make [`LegacyFrontier`] serializable we also have our own versions of `NonEmptyFrontier` ([`LegacyNonEmptyFrontier`]), +//! `Leaf`([`LegacyLeaf`]) and `Position`([`LegacyPosition`]) that can be found in [1] or [2]. +//! +//! Conversions methods to/from [`LegacyNoteCommitmentTree`] to/from [`NoteCommitmentTree`] are defined also in this file. +//! +//! [1]: https://github.com/zcash/incrementalmerkletree/blob/incrementalmerkletree-v0.3.1/src/bridgetree.rs +//! [2]: https://github.com/zcash/incrementalmerkletree/blob/incrementalmerkletree-v0.3.1/src/lib.rs + +use incrementalmerkletree::{frontier::Frontier, Position}; + +use super::{Node, NoteCommitmentTree, Root, MERKLE_DEPTH}; + +/// A legacy version of [`NoteCommitmentTree`]. +#[derive(Debug, Serialize, Deserialize)] +#[serde(rename = "NoteCommitmentTree")] +pub struct LegacyNoteCommitmentTree { + inner: LegacyFrontier, + cached_root: std::sync::RwLock>, +} + +impl From for LegacyNoteCommitmentTree { + fn from(nct: NoteCommitmentTree) -> Self { + LegacyNoteCommitmentTree { + inner: nct.inner.into(), + cached_root: nct.cached_root, + } + } +} + +impl From for NoteCommitmentTree { + fn from(nct: LegacyNoteCommitmentTree) -> Self { + NoteCommitmentTree { + inner: nct.inner.into(), + cached_root: nct.cached_root, + } + } +} + +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename = "Frontier")] +#[allow(missing_docs)] +pub struct LegacyFrontier { + frontier: Option>, +} + +impl From> for Frontier { + fn from(legacy_frontier: LegacyFrontier) -> Self { + if let Some(legacy_frontier_data) = legacy_frontier.frontier { + let mut ommers = legacy_frontier_data.ommers; + let position = Position::from( + u64::try_from(legacy_frontier_data.position.0) + .expect("old `usize` always fits in `u64`"), + ); + let leaf = match legacy_frontier_data.leaf { + LegacyLeaf::Left(a) => a, + LegacyLeaf::Right(a, b) => { + ommers.insert(0, a); + b + } + }; + Frontier::from_parts( + position, + leaf, + ommers, + ) + .expect("We should be able to construct a frontier from parts given legacy frontier is not empty") + } else { + Frontier::empty() + } + } +} + +impl From> for LegacyFrontier { + fn from(frontier: Frontier) -> Self { + if let Some(frontier_data) = frontier.value() { + let leaf_from_frontier = *frontier_data.leaf(); + let mut leaf = LegacyLeaf::Left(leaf_from_frontier); + let mut ommers = frontier_data.ommers().to_vec(); + let position = usize::try_from(u64::from(frontier_data.position())) + .expect("new position should fit in a `usize`"); + if frontier_data.position().is_odd() { + let left = ommers.remove(0); + leaf = LegacyLeaf::Right(left, leaf_from_frontier); + } + LegacyFrontier { + frontier: Some(LegacyNonEmptyFrontier { + position: LegacyPosition(position), + leaf, + ommers: ommers.to_vec(), + }), + } + } else { + LegacyFrontier { frontier: None } + } + } +} + +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename = "NonEmptyFrontier")] +struct LegacyNonEmptyFrontier { + position: LegacyPosition, + leaf: LegacyLeaf, + ommers: Vec, +} + +/// A set of leaves of a Merkle tree. +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename = "Leaf")] +enum LegacyLeaf { + Left(A), + Right(A, A), +} + +#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] +#[repr(transparent)] +struct LegacyPosition(usize); diff --git a/zebra-consensus/Cargo.toml b/zebra-consensus/Cargo.toml index 639a27de3c2..35fabab2f1b 100644 --- a/zebra-consensus/Cargo.toml +++ b/zebra-consensus/Cargo.toml @@ -58,9 +58,9 @@ tower = { version = "0.4.13", features = ["timeout", "util", "buffer"] } tracing = "0.1.37" tracing-futures = "0.2.5" -orchard = "0.4.0" +orchard = "0.5.0" -zcash_proofs = { version = "0.11.0", features = ["local-prover", "multicore", "download-params"] } +zcash_proofs = { version = "0.12.1", features = ["local-prover", "multicore", "download-params"] } tower-fallback = { path = "../tower-fallback/", version = "0.2.41-beta.3" } tower-batch-control = { path = "../tower-batch-control/", version = "0.2.41-beta.3" } diff --git a/zebra-rpc/Cargo.toml b/zebra-rpc/Cargo.toml index c387f31cf39..d0f6adecc26 100644 --- a/zebra-rpc/Cargo.toml +++ b/zebra-rpc/Cargo.toml @@ -65,7 +65,7 @@ serde = { version = "1.0.168", features = ["serde_derive"] } # Experimental feature getblocktemplate-rpcs rand = { version = "0.8.5", optional = true } # ECC deps used by getblocktemplate-rpcs feature -zcash_address = { version = "0.2.1", optional = true } +zcash_address = { version = "0.3.0", optional = true } # Test-only feature proptest-impl proptest = { version = "1.2.0", optional = true } diff --git a/zebra-script/Cargo.toml b/zebra-script/Cargo.toml index eb284f9f957..747c8c0dadb 100644 --- a/zebra-script/Cargo.toml +++ b/zebra-script/Cargo.toml @@ -15,7 +15,7 @@ keywords = ["zebra", "zcash"] categories = ["api-bindings", "cryptography::cryptocurrencies"] [dependencies] -zcash_script = "0.1.12" +zcash_script = "0.1.13" zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.27" } diff --git a/zebra-state/src/service/finalized_state/tests/vectors.rs b/zebra-state/src/service/finalized_state/tests/vectors.rs index 8df81b66a89..2390ed72edd 100644 --- a/zebra-state/src/service/finalized_state/tests/vectors.rs +++ b/zebra-state/src/service/finalized_state/tests/vectors.rs @@ -10,9 +10,16 @@ use rand::random; use halo2::pasta::{group::ff::PrimeField, pallas}; use zebra_chain::{ - orchard::tree::NoteCommitmentTree as OrchardNoteCommitmentTree, - sapling::tree::NoteCommitmentTree as SaplingNoteCommitmentTree, + orchard::{ + tree::legacy::LegacyNoteCommitmentTree as LegacyOrchardNoteCommitmentTree, + tree::NoteCommitmentTree as OrchardNoteCommitmentTree, + }, + sapling::{ + tree::legacy::LegacyNoteCommitmentTree as LegacySaplingNoteCommitmentTree, + tree::NoteCommitmentTree as SaplingNoteCommitmentTree, + }, sprout::{ + tree::legacy::LegacyNoteCommitmentTree as LegacySproutNoteCommitmentTree, tree::NoteCommitmentTree as SproutNoteCommitmentTree, NoteCommitment as SproutNoteCommitment, }, @@ -20,26 +27,6 @@ use zebra_chain::{ use crate::service::finalized_state::disk_format::{FromDisk, IntoDisk}; -// Currently, these tests check these structs are equal: -// * commitments -> tree struct -// * commitments -> tree struct -> serialize -> deserialize -> tree struct -// And these serialized formats are equal: -// * fixed serialized test vector -// * commitments -> tree struct -> serialize -// * commitments -> tree struct -> serialize -> deserialize -> tree struct -> serialize -// -// TODO: apply these tests to the new tree structs, and update the serialization format -// (keeping the tests for the old format is optional, because the tests below cover it) -// -// TODO: test that old and new serializations produce the same format: -// Tree roots built from the same commitments should match: -// * commitments -> old tree struct -> new tree struct -> un-cached root -// * commitments -> new tree struct -> un-cached root -// Even when serialized and deserialized: -// * commitments -> old tree struct -> old serialize -> old deserialize -> old tree struct -> new tree struct -> un-cached root -// * commitments -> new tree struct -> new serialize -> new deserialize -> new tree struct -> un-cached root -// * commitments -> new tree struct -> un-cached root - /// Check that the sprout tree database serialization format has not changed. #[test] fn sprout_note_commitment_tree_serialization() { @@ -73,21 +60,8 @@ fn sprout_note_commitment_tree_serialization() { // The purpose of this test is to make sure the serialization format does // not change by accident. let expected_serialized_tree_hex = "010200836045484077cf6390184ea7cd48b460e2d0f22b2293b69633bb152314a692fb019f5b2b1e4bf7e7318d0a1f417ca6bca36077025b3d11e074b94cd55ce9f3861801c45297124f50dcd3f78eed017afd1e30764cd74cdf0a57751978270fd0721359"; - let serialized_tree = incremental_tree.as_bytes(); - assert_eq!(hex::encode(&serialized_tree), expected_serialized_tree_hex); - - let deserialized_tree = SproutNoteCommitmentTree::from_bytes(&serialized_tree); - - // This check isn't enough to show that the entire struct is the same, because it just compares - // the cached serialized/deserialized roots. (NoteCommitmentTree::eq() also just compares - // roots.) - assert_eq!(incremental_tree.root(), deserialized_tree.root()); - incremental_tree.assert_frontier_eq(&deserialized_tree); - - // Double-check that the internal format is the same by re-serializing the tree. - let re_serialized_tree = deserialized_tree.as_bytes(); - assert_eq!(serialized_tree, re_serialized_tree); + sprout_checks(incremental_tree, expected_serialized_tree_hex); } /// Check that the sprout tree database serialization format has not changed for one commitment. @@ -119,21 +93,8 @@ fn sprout_note_commitment_tree_serialization_one() { // The purpose of this test is to make sure the serialization format does // not change by accident. let expected_serialized_tree_hex = "010000836045484077cf6390184ea7cd48b460e2d0f22b2293b69633bb152314a692fb000193e5f97ce1d5d94d0c6e1b66a4a262c9ae89e56e28f3f6e4a557b6fb70e173a8"; - let serialized_tree = incremental_tree.as_bytes(); - assert_eq!(hex::encode(&serialized_tree), expected_serialized_tree_hex); - - let deserialized_tree = SproutNoteCommitmentTree::from_bytes(&serialized_tree); - - // This check isn't enough to show that the entire struct is the same, because it just compares - // the cached serialized/deserialized roots. (NoteCommitmentTree::eq() also just compares - // roots.) - assert_eq!(incremental_tree.root(), deserialized_tree.root()); - - incremental_tree.assert_frontier_eq(&deserialized_tree); - // Double-check that the internal format is the same by re-serializing the tree. - let re_serialized_tree = deserialized_tree.as_bytes(); - assert_eq!(serialized_tree, re_serialized_tree); + sprout_checks(incremental_tree, expected_serialized_tree_hex); } /// Check that the sprout tree database serialization format has not changed when the number of @@ -174,21 +135,8 @@ fn sprout_note_commitment_tree_serialization_pow2() { // The purpose of this test is to make sure the serialization format does // not change by accident. let expected_serialized_tree_hex = "010301836045484077cf6390184ea7cd48b460e2d0f22b2293b69633bb152314a692fb92498a8295ea36d593eaee7cb8b55be3a3e37b8185d3807693184054cd574ae4019f5b2b1e4bf7e7318d0a1f417ca6bca36077025b3d11e074b94cd55ce9f3861801b61f588fcba9cea79e94376adae1c49583f716d2f20367141f1369a235b95c98"; - let serialized_tree = incremental_tree.as_bytes(); - assert_eq!(hex::encode(&serialized_tree), expected_serialized_tree_hex); - - let deserialized_tree = SproutNoteCommitmentTree::from_bytes(&serialized_tree); - - // This check isn't enough to show that the entire struct is the same, because it just compares - // the cached serialized/deserialized roots. (NoteCommitmentTree::eq() also just compares - // roots.) - assert_eq!(incremental_tree.root(), deserialized_tree.root()); - - incremental_tree.assert_frontier_eq(&deserialized_tree); - // Double-check that the internal format is the same by re-serializing the tree. - let re_serialized_tree = deserialized_tree.as_bytes(); - assert_eq!(serialized_tree, re_serialized_tree); + sprout_checks(incremental_tree, expected_serialized_tree_hex); } /// Check that the sapling tree database serialization format has not changed. @@ -224,21 +172,8 @@ fn sapling_note_commitment_tree_serialization() { // The purpose of this test is to make sure the serialization format does // not change by accident. let expected_serialized_tree_hex = "0102007c3ea01a6e3a3d90cf59cd789e467044b5cd78eb2c84cc6816f960746d0e036c0162324ff2c329e99193a74d28a585a3c167a93bf41a255135529c913bd9b1e66601ddaa1ab86de5c153993414f34ba97e9674c459dfadde112b89eeeafa0e5a204c"; - let serialized_tree = incremental_tree.as_bytes(); - assert_eq!(hex::encode(&serialized_tree), expected_serialized_tree_hex); - let deserialized_tree = SaplingNoteCommitmentTree::from_bytes(&serialized_tree); - - // This check isn't enough to show that the entire struct is the same, because it just compares - // the cached serialized/deserialized roots. (NoteCommitmentTree::eq() also just compares - // roots.) - assert_eq!(incremental_tree.root(), deserialized_tree.root()); - - incremental_tree.assert_frontier_eq(&deserialized_tree); - - // Double-check that the internal format is the same by re-serializing the tree. - let re_serialized_tree = deserialized_tree.as_bytes(); - assert_eq!(serialized_tree, re_serialized_tree); + sapling_checks(incremental_tree, expected_serialized_tree_hex); } /// Check that the sapling tree database serialization format has not changed for one commitment. @@ -270,21 +205,8 @@ fn sapling_note_commitment_tree_serialization_one() { // The purpose of this test is to make sure the serialization format does // not change by accident. let expected_serialized_tree_hex = "010000225747f3b5d5dab4e5a424f81f85c904ff43286e0f3fd07ef0b8c6a627b1145800012c60c7de033d7539d123fb275011edfe08d57431676981d162c816372063bc71"; - let serialized_tree = incremental_tree.as_bytes(); - assert_eq!(hex::encode(&serialized_tree), expected_serialized_tree_hex); - - let deserialized_tree = SaplingNoteCommitmentTree::from_bytes(&serialized_tree); - // This check isn't enough to show that the entire struct is the same, because it just compares - // the cached serialized/deserialized roots. (NoteCommitmentTree::eq() also just compares - // roots.) - assert_eq!(incremental_tree.root(), deserialized_tree.root()); - - incremental_tree.assert_frontier_eq(&deserialized_tree); - - // Double-check that the internal format is the same by re-serializing the tree. - let re_serialized_tree = deserialized_tree.as_bytes(); - assert_eq!(serialized_tree, re_serialized_tree); + sapling_checks(incremental_tree, expected_serialized_tree_hex); } /// Check that the sapling tree database serialization format has not changed when the number of @@ -329,21 +251,8 @@ fn sapling_note_commitment_tree_serialization_pow2() { // The purpose of this test is to make sure the serialization format does // not change by accident. let expected_serialized_tree_hex = "010701f43e3aac61e5a753062d4d0508c26ceaf5e4c0c58ba3c956e104b5d2cf67c41c3a3661bc12b72646c94bc6c92796e81953985ee62d80a9ec3645a9a95740ac15025991131c5c25911b35fcea2a8343e2dfd7a4d5b45493390e0cb184394d91c349002df68503da9247dfde6585cb8c9fa94897cf21735f8fc1b32116ef474de05c01d23765f3d90dfd97817ed6d995bd253d85967f77b9f1eaef6ecbcb0ef6796812"; - let serialized_tree = incremental_tree.as_bytes(); - assert_eq!(hex::encode(&serialized_tree), expected_serialized_tree_hex); - let deserialized_tree = SaplingNoteCommitmentTree::from_bytes(&serialized_tree); - - // This check isn't enough to show that the entire struct is the same, because it just compares - // the cached serialized/deserialized roots. (NoteCommitmentTree::eq() also just compares - // roots.) - assert_eq!(incremental_tree.root(), deserialized_tree.root()); - - incremental_tree.assert_frontier_eq(&deserialized_tree); - - // Double-check that the internal format is the same by re-serializing the tree. - let re_serialized_tree = deserialized_tree.as_bytes(); - assert_eq!(serialized_tree, re_serialized_tree); + sapling_checks(incremental_tree, expected_serialized_tree_hex); } /// Check that the orchard tree database serialization format has not changed. @@ -389,21 +298,8 @@ fn orchard_note_commitment_tree_serialization() { // The purpose of this test is to make sure the serialization format does // not change by accident. let expected_serialized_tree_hex = "010200ee9488053a30c596b43014105d3477e6f578c89240d1d1ee1743b77bb6adc40a01a34b69a4e4d9ccf954d46e5da1004d361a5497f511aeb4d481d23c0be177813301a0be6dab19bc2c65d8299258c16e14d48ec4d4959568c6412aa85763c222a702"; - let serialized_tree = incremental_tree.as_bytes(); - assert_eq!(hex::encode(&serialized_tree), expected_serialized_tree_hex); - - let deserialized_tree = OrchardNoteCommitmentTree::from_bytes(&serialized_tree); - - // This check isn't enough to show that the entire struct is the same, because it just compares - // the cached serialized/deserialized roots. (NoteCommitmentTree::eq() also just compares - // roots.) - assert_eq!(incremental_tree.root(), deserialized_tree.root()); - - incremental_tree.assert_frontier_eq(&deserialized_tree); - // Double-check that the internal format is the same by re-serializing the tree. - let re_serialized_tree = deserialized_tree.as_bytes(); - assert_eq!(serialized_tree, re_serialized_tree); + orchard_checks(incremental_tree, expected_serialized_tree_hex); } /// Check that the orchard tree database serialization format has not changed for one commitment. @@ -437,21 +333,8 @@ fn orchard_note_commitment_tree_serialization_one() { // The purpose of this test is to make sure the serialization format does // not change by accident. let expected_serialized_tree_hex = "01000068135cf49933229099a44ec99a75e1e1cb4640f9b5bdec6b3223856fea16390a000178afd4da59c541e9c2f317f9aff654f1fb38d14dc99431cbbfa93601c7068117"; - let serialized_tree = incremental_tree.as_bytes(); - assert_eq!(hex::encode(&serialized_tree), expected_serialized_tree_hex); - - let deserialized_tree = OrchardNoteCommitmentTree::from_bytes(&serialized_tree); - - // This check isn't enough to show that the entire struct is the same, because it just compares - // the cached serialized/deserialized roots. (NoteCommitmentTree::eq() also just compares - // roots.) - assert_eq!(incremental_tree.root(), deserialized_tree.root()); - - incremental_tree.assert_frontier_eq(&deserialized_tree); - // Double-check that the internal format is the same by re-serializing the tree. - let re_serialized_tree = deserialized_tree.as_bytes(); - assert_eq!(serialized_tree, re_serialized_tree); + orchard_checks(incremental_tree, expected_serialized_tree_hex); } /// Check that the orchard tree database serialization format has not changed when the number of @@ -496,19 +379,156 @@ fn orchard_note_commitment_tree_serialization_pow2() { // The purpose of this test is to make sure the serialization format does // not change by accident. let expected_serialized_tree_hex = "01010178315008fb2998b430a5731d6726207dc0f0ec81ea64af5cf612956901e72f0eee9488053a30c596b43014105d3477e6f578c89240d1d1ee1743b77bb6adc40a0001d3d525931005e45f5a29bc82524e871e5ee1b6d77839deb741a6e50cd99fdf1a"; + + orchard_checks(incremental_tree, expected_serialized_tree_hex); +} + +fn sprout_checks(incremental_tree: SproutNoteCommitmentTree, expected_serialized_tree_hex: &str) { let serialized_tree = incremental_tree.as_bytes(); + assert_eq!(hex::encode(&serialized_tree), expected_serialized_tree_hex); - let deserialized_tree = OrchardNoteCommitmentTree::from_bytes(&serialized_tree); + let deserialized_tree = SproutNoteCommitmentTree::from_bytes(&serialized_tree); + + // Get a legacy deserialized tree from the deserialized tree. + let deserialized_legacy_tree = LegacySproutNoteCommitmentTree::from(deserialized_tree.clone()); + + // Get a deserialized tree from a legacy deserialized tree. + let deserialized_legacy_tree_as_new = deserialized_legacy_tree.into(); + + // Check frontiers are the same. + incremental_tree.assert_frontier_eq(&deserialized_tree); + incremental_tree.assert_frontier_eq(&deserialized_legacy_tree_as_new); + + // Check cached roots are the same. + assert_eq!(incremental_tree.root(), deserialized_tree.root()); + assert_eq!( + incremental_tree.root(), + deserialized_legacy_tree_as_new.root() + ); + + // Check recalculated roots are the same + assert_eq!( + incremental_tree.recalculate_root(), + deserialized_tree.recalculate_root() + ); + assert_eq!( + incremental_tree.recalculate_root(), + deserialized_legacy_tree_as_new.recalculate_root() + ); + + // Check reclaculated and cached roots are the same + assert_eq!( + incremental_tree.recalculate_root(), + deserialized_tree + .cached_root() + .expect("cached root was serialized") + ); + + // Double-check that the internal format is the same by re-serializing the tree. + let re_serialized_tree = deserialized_tree.as_bytes(); + let re_serialized_legacy_tree = deserialized_legacy_tree_as_new.as_bytes(); + + assert_eq!(serialized_tree, re_serialized_tree); + assert_eq!(re_serialized_legacy_tree, re_serialized_tree); +} + +fn sapling_checks(incremental_tree: SaplingNoteCommitmentTree, expected_serialized_tree_hex: &str) { + let serialized_tree = incremental_tree.as_bytes(); + + assert_eq!(hex::encode(&serialized_tree), expected_serialized_tree_hex); + + let deserialized_tree = SaplingNoteCommitmentTree::from_bytes(&serialized_tree); + + // Get a legacy deserialized tree from the deserialized tree. + let deserialized_legacy_tree = LegacySaplingNoteCommitmentTree::from(deserialized_tree.clone()); + + // Get a deserialized tree from a legacy deserialized tree. + let deserialized_legacy_tree_as_new = deserialized_legacy_tree.into(); + + // Check frontiers are the same. + incremental_tree.assert_frontier_eq(&deserialized_tree); + incremental_tree.assert_frontier_eq(&deserialized_legacy_tree_as_new); - // This check isn't enough to show that the entire struct is the same, because it just compares - // the cached serialized/deserialized roots. (NoteCommitmentTree::eq() also just compares - // roots.) + // Check cached roots are the same. assert_eq!(incremental_tree.root(), deserialized_tree.root()); + assert_eq!( + incremental_tree.root(), + deserialized_legacy_tree_as_new.root() + ); + + // Check recalculated roots are the same + assert_eq!( + incremental_tree.recalculate_root(), + deserialized_tree.recalculate_root() + ); + assert_eq!( + incremental_tree.recalculate_root(), + deserialized_legacy_tree_as_new.recalculate_root() + ); + + // Check reclaculated and cached roots are the same + assert_eq!( + incremental_tree.recalculate_root(), + deserialized_tree + .cached_root() + .expect("cached root was serialized") + ); + // Double-check that the internal format is the same by re-serializing the tree. + let re_serialized_tree = deserialized_tree.as_bytes(); + let re_serialized_legacy_tree = deserialized_legacy_tree_as_new.as_bytes(); + + assert_eq!(serialized_tree, re_serialized_tree); + assert_eq!(re_serialized_legacy_tree, re_serialized_tree); +} + +fn orchard_checks(incremental_tree: OrchardNoteCommitmentTree, expected_serialized_tree_hex: &str) { + let serialized_tree = incremental_tree.as_bytes(); + + assert_eq!(hex::encode(&serialized_tree), expected_serialized_tree_hex); + + let deserialized_tree = OrchardNoteCommitmentTree::from_bytes(&serialized_tree); + + // Get a legacy deserialized tree from the deserialized tree. + let deserialized_legacy_tree = LegacyOrchardNoteCommitmentTree::from(deserialized_tree.clone()); + + // Get a deserialized tree from a legacy deserialized tree. + let deserialized_legacy_tree_as_new = deserialized_legacy_tree.into(); + + // Check frontiers are the same. incremental_tree.assert_frontier_eq(&deserialized_tree); + incremental_tree.assert_frontier_eq(&deserialized_legacy_tree_as_new); + + // Check cached roots are the same. + assert_eq!(incremental_tree.root(), deserialized_tree.root()); + assert_eq!( + incremental_tree.root(), + deserialized_legacy_tree_as_new.root() + ); + + // Check recalculated roots are the same + assert_eq!( + incremental_tree.recalculate_root(), + deserialized_tree.recalculate_root() + ); + assert_eq!( + incremental_tree.recalculate_root(), + deserialized_legacy_tree_as_new.recalculate_root() + ); + + // Check reclaculated and cached roots are the same + assert_eq!( + incremental_tree.recalculate_root(), + deserialized_tree + .cached_root() + .expect("cached root was serialized") + ); // Double-check that the internal format is the same by re-serializing the tree. let re_serialized_tree = deserialized_tree.as_bytes(); + let re_serialized_legacy_tree = deserialized_legacy_tree_as_new.as_bytes(); + assert_eq!(serialized_tree, re_serialized_tree); + assert_eq!(re_serialized_legacy_tree, re_serialized_tree); } From 1e47fe5b2bb1b9579fedffbcf1d830194f753666 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 18 Jul 2023 00:10:40 +0000 Subject: [PATCH 218/265] build(deps): bump tj-actions/changed-files from 37.1.0 to 37.1.2 (#7220) Bumps [tj-actions/changed-files](https://github.com/tj-actions/changed-files) from 37.1.0 to 37.1.2. - [Release notes](https://github.com/tj-actions/changed-files/releases) - [Changelog](https://github.com/tj-actions/changed-files/blob/main/HISTORY.md) - [Commits](https://github.com/tj-actions/changed-files/compare/v37.1.0...v37.1.2) --- updated-dependencies: - dependency-name: tj-actions/changed-files dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/lint.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 434d29b32ff..efcbdcca7c7 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -37,7 +37,7 @@ jobs: - name: Rust files id: changed-files-rust - uses: tj-actions/changed-files@v37.1.0 + uses: tj-actions/changed-files@v37.1.2 with: files: | **/*.rs @@ -49,7 +49,7 @@ jobs: - name: Workflow files id: changed-files-workflows - uses: tj-actions/changed-files@v37.1.0 + uses: tj-actions/changed-files@v37.1.2 with: files: | .github/workflows/*.yml From c885de4abba000ccce74b33d49d461bbf9ac0fe5 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 18 Jul 2023 02:37:35 +0000 Subject: [PATCH 219/265] build(deps): bump semver from 1.0.17 to 1.0.18 (#7227) Bumps [semver](https://github.com/dtolnay/semver) from 1.0.17 to 1.0.18. - [Release notes](https://github.com/dtolnay/semver/releases) - [Commits](https://github.com/dtolnay/semver/compare/1.0.17...1.0.18) --- updated-dependencies: - dependency-name: semver dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 12 ++++++------ zebra-state/Cargo.toml | 2 +- zebrad/Cargo.toml | 2 +- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 463aaf2d59f..c9983717e3d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -18,7 +18,7 @@ dependencies = [ "once_cell", "regex", "secrecy", - "semver 1.0.17", + "semver 1.0.18", "serde", "termcolor", "toml 0.5.11", @@ -3755,7 +3755,7 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" dependencies = [ - "semver 1.0.17", + "semver 1.0.18", ] [[package]] @@ -3954,9 +3954,9 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.17" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bebd363326d05ec3e2f532ab7660680f3b02130d780c299bca73469d521bc0ed" +checksum = "b0293b4b29daaf487284529cc2f5675b8e57c61f70167ba415a463651fd6a918" dependencies = [ "serde", ] @@ -5915,7 +5915,7 @@ dependencies = [ "regex", "rlimit", "rocksdb", - "semver 1.0.17", + "semver 1.0.18", "serde", "serde_json", "spandoc", @@ -6010,7 +6010,7 @@ dependencies = [ "rand 0.8.5", "rayon", "regex", - "semver 1.0.17", + "semver 1.0.18", "sentry", "serde", "serde_json", diff --git a/zebra-state/Cargo.toml b/zebra-state/Cargo.toml index a459609f99f..198fc33bfb7 100644 --- a/zebra-state/Cargo.toml +++ b/zebra-state/Cargo.toml @@ -56,7 +56,7 @@ mset = "0.1.1" regex = "1.9.1" rlimit = "0.10.0" rocksdb = { version = "0.21.0", default-features = false, features = ["lz4"] } -semver = "1.0.17" +semver = "1.0.18" serde = { version = "1.0.168", features = ["serde_derive"] } tempfile = "3.5.0" thiserror = "1.0.43" diff --git a/zebrad/Cargo.toml b/zebrad/Cargo.toml index dd9d33f24c7..3a3571eff81 100644 --- a/zebrad/Cargo.toml +++ b/zebrad/Cargo.toml @@ -158,7 +158,7 @@ chrono = { version = "0.4.26", default-features = false, features = ["clock", "s humantime-serde = "1.1.1" indexmap = "2.0.0" lazy_static = "1.4.0" -semver = "1.0.17" +semver = "1.0.18" serde = { version = "1.0.168", features = ["serde_derive"] } toml = "0.7.6" From 3bbe3cec4f08ed76dc04ca1674fd7187f532a7eb Mon Sep 17 00:00:00 2001 From: teor Date: Tue, 18 Jul 2023 14:53:26 +1000 Subject: [PATCH 220/265] fix(panic): Stop panicking on async task cancellation on shutdown in network and state futures (#7219) * Add an async-error feature and an initial module structure * Implement checking for panics in OS threads and async tasks * Implement waiting for panics in OS threads and async tasks * Add a TODO to simplify some state request error handling * Use the new panic-checking methods in zebra-state * Use new panic-checking methods in zebra-network * fixup! Implement waiting for panics in OS threads and async tasks * Replace existing async code with generic panic-checking methods * Simplify trait to a single method * Move thread panic code into generic trait impls * Simplify option handling Co-authored-by: Arya * Fix comment Co-authored-by: Arya * Add missing track_caller --------- Co-authored-by: Arya --- zebra-chain/Cargo.toml | 12 +- zebra-chain/src/diagnostic.rs | 15 +- zebra-chain/src/diagnostic/task.rs | 47 +++++ zebra-chain/src/diagnostic/task/future.rs | 93 +++++++++ zebra-chain/src/diagnostic/task/thread.rs | 108 ++++++++++ zebra-network/Cargo.toml | 2 +- zebra-network/src/peer_set/candidate_set.rs | 8 +- zebra-network/src/peer_set/initialize.rs | 192 ++++++------------ zebra-state/Cargo.toml | 2 +- zebra-state/src/response.rs | 3 + zebra-state/src/service.rs | 76 ++----- .../finalized_state/disk_format/upgrade.rs | 41 +--- 12 files changed, 367 insertions(+), 232 deletions(-) create mode 100644 zebra-chain/src/diagnostic/task.rs create mode 100644 zebra-chain/src/diagnostic/task/future.rs create mode 100644 zebra-chain/src/diagnostic/task/thread.rs diff --git a/zebra-chain/Cargo.toml b/zebra-chain/Cargo.toml index d003b92b9b5..74272274200 100644 --- a/zebra-chain/Cargo.toml +++ b/zebra-chain/Cargo.toml @@ -24,6 +24,11 @@ json-conversion = [ "serde_json", ] +# Async error handling convenience traits +async-error = [ + "tokio", +] + # Experimental mining RPC support getblocktemplate-rpcs = [ "zcash_address", @@ -39,7 +44,7 @@ proptest-impl = [ "proptest-derive", "rand", "rand_chacha", - "tokio", + "tokio/tracing", "zebra-test", ] @@ -108,6 +113,9 @@ reddsa = "0.5.0" # Production feature json-conversion serde_json = { version = "1.0.100", optional = true } +# Production feature async-error and testing feature proptest-impl +tokio = { version = "1.29.1", optional = true } + # Experimental feature getblocktemplate-rpcs zcash_address = { version = "0.3.0", optional = true } @@ -118,8 +126,6 @@ proptest-derive = { version = "0.3.0", optional = true } rand = { version = "0.8.5", optional = true } rand_chacha = { version = "0.3.1", optional = true } -tokio = { version = "1.29.1", features = ["tracing"], optional = true } - zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.27", optional = true } [dev-dependencies] diff --git a/zebra-chain/src/diagnostic.rs b/zebra-chain/src/diagnostic.rs index 560838fb32b..1a453143aff 100644 --- a/zebra-chain/src/diagnostic.rs +++ b/zebra-chain/src/diagnostic.rs @@ -1,6 +1,15 @@ -//! Tracing the execution time of functions. -//! -//! TODO: also trace polling time for futures, using a `Future` wrapper +//! Diagnostic types and functions for Zebra: +//! - code performance +//! - task handling +//! - errors and panics + +pub mod task; + +// Tracing the execution time of functions. +// +// TODO: +// - move this to a `timing` submodule +// - also trace polling time for futures, using a `Future` wrapper use std::time::{Duration, Instant}; diff --git a/zebra-chain/src/diagnostic/task.rs b/zebra-chain/src/diagnostic/task.rs new file mode 100644 index 00000000000..2d43f695537 --- /dev/null +++ b/zebra-chain/src/diagnostic/task.rs @@ -0,0 +1,47 @@ +//! Diagnostic types and functions for Zebra tasks: +//! - OS thread handling +//! - async future task handling +//! - errors and panics + +#[cfg(feature = "async-error")] +pub mod future; + +pub mod thread; + +/// A trait that checks a task's return value for panics. +pub trait CheckForPanics { + /// The output type, after removing panics from `Self`. + type Output; + + /// Check if `self` contains a panic payload or an unexpected termination, then panic. + /// Otherwise, return the non-panic part of `self`. + /// + /// # Panics + /// + /// If `self` contains a panic payload or an unexpected termination. + #[track_caller] + fn check_for_panics(self) -> Self::Output; +} + +/// A trait that waits for a task to finish, then handles panics and cancellations. +pub trait WaitForPanics { + /// The underlying task output, after removing panics and unwrapping termination results. + type Output; + + /// Waits for `self` to finish, then check if its output is: + /// - a panic payload: resume that panic, + /// - an unexpected termination: panic with that error, + /// - an expected termination: hang waiting for shutdown. + /// + /// Otherwise, returns the task return value of `self`. + /// + /// # Panics + /// + /// If `self` contains a panic payload or an unexpected termination. + /// + /// # Hangs + /// + /// If `self` contains an expected termination, and we're shutting down anyway. + #[track_caller] + fn wait_for_panics(self) -> Self::Output; +} diff --git a/zebra-chain/src/diagnostic/task/future.rs b/zebra-chain/src/diagnostic/task/future.rs new file mode 100644 index 00000000000..431b13ed94f --- /dev/null +++ b/zebra-chain/src/diagnostic/task/future.rs @@ -0,0 +1,93 @@ +//! Diagnostic types and functions for Zebra async future tasks: +//! - task handles +//! - errors and panics + +use std::{future, panic}; + +use futures::future::{BoxFuture, FutureExt}; +use tokio::task::{JoinError, JoinHandle}; + +use crate::shutdown::is_shutting_down; + +use super::{CheckForPanics, WaitForPanics}; + +/// This is the return type of the [`JoinHandle`] future. +impl CheckForPanics for Result { + /// The [`JoinHandle`]'s task output, after resuming any panics, + /// and ignoring task cancellations on shutdown. + type Output = Result; + + /// Returns the task result if the task finished normally. + /// Otherwise, resumes any panics, logs unexpected errors, and ignores any expected errors. + /// + /// If the task finished normally, returns `Some(T)`. + /// If the task was cancelled, returns `None`. + #[track_caller] + fn check_for_panics(self) -> Self::Output { + match self { + Ok(task_output) => Ok(task_output), + Err(join_error) => Err(join_error.check_for_panics()), + } + } +} + +impl CheckForPanics for JoinError { + /// The [`JoinError`] after resuming any panics, and logging any unexpected task cancellations. + type Output = JoinError; + + /// Resume any panics and panic on unexpected task cancellations. + /// Always returns [`JoinError::Cancelled`](JoinError::is_cancelled). + #[track_caller] + fn check_for_panics(self) -> Self::Output { + match self.try_into_panic() { + Ok(panic_payload) => panic::resume_unwind(panic_payload), + + // We could ignore this error, but then we'd have to change the return type. + Err(task_cancelled) if is_shutting_down() => { + debug!( + ?task_cancelled, + "ignoring cancelled task because Zebra is shutting down" + ); + + task_cancelled + } + + Err(task_cancelled) => { + panic!("task cancelled during normal Zebra operation: {task_cancelled:?}"); + } + } + } +} + +impl WaitForPanics for JoinHandle +where + T: Send + 'static, +{ + type Output = BoxFuture<'static, T>; + + /// Returns a future which waits for `self` to finish, then checks if its output is: + /// - a panic payload: resume that panic, + /// - an unexpected termination: panic with that error, + /// - an expected termination: hang waiting for shutdown. + /// + /// Otherwise, returns the task return value of `self`. + /// + /// # Panics + /// + /// If `self` contains a panic payload, or [`JoinHandle::abort()`] has been called on `self`. + /// + /// # Hangs + /// + /// If `self` contains an expected termination, and we're shutting down anyway. + /// Futures hang by returning `Pending` and not setting a waker, so this uses minimal resources. + #[track_caller] + fn wait_for_panics(self) -> Self::Output { + async move { + match self.await.check_for_panics() { + Ok(task_output) => task_output, + Err(_expected_cancel_error) => future::pending().await, + } + } + .boxed() + } +} diff --git a/zebra-chain/src/diagnostic/task/thread.rs b/zebra-chain/src/diagnostic/task/thread.rs new file mode 100644 index 00000000000..84df3fac4aa --- /dev/null +++ b/zebra-chain/src/diagnostic/task/thread.rs @@ -0,0 +1,108 @@ +//! Diagnostic types and functions for Zebra OS thread tasks: +//! - task handles +//! - errors and panics + +use std::{ + panic, + sync::Arc, + thread::{self, JoinHandle}, +}; + +use super::{CheckForPanics, WaitForPanics}; + +impl CheckForPanics for thread::Result { + type Output = T; + + /// Panics if the thread panicked. + /// + /// Threads can't be cancelled except by using a panic, so there are no thread errors here. + #[track_caller] + fn check_for_panics(self) -> Self::Output { + match self { + // The value returned by the thread when it finished. + Ok(thread_output) => thread_output, + + // A thread error is always a panic. + Err(panic_payload) => panic::resume_unwind(panic_payload), + } + } +} + +impl WaitForPanics for JoinHandle { + type Output = T; + + /// Waits for the thread to finish, then panics if the thread panicked. + #[track_caller] + fn wait_for_panics(self) -> Self::Output { + self.join().check_for_panics() + } +} + +impl WaitForPanics for Arc> { + type Output = Option; + + /// If this is the final `Arc`, waits for the thread to finish, then panics if the thread + /// panicked. Otherwise, returns the thread's return value. + /// + /// If this is not the final `Arc`, drops the handle and immediately returns `None`. + #[track_caller] + fn wait_for_panics(self) -> Self::Output { + // If we are the last Arc with a reference to this handle, + // we can wait for it and propagate any panics. + // + // We use into_inner() because it guarantees that exactly one of the tasks gets the + // JoinHandle. try_unwrap() lets us keep the JoinHandle, but it can also miss panics. + // + // This is more readable as an expanded statement. + #[allow(clippy::manual_map)] + if let Some(handle) = Arc::into_inner(self) { + Some(handle.wait_for_panics()) + } else { + None + } + } +} + +impl CheckForPanics for &mut Option>> { + type Output = Option; + + /// If this is the final `Arc`, checks if the thread has finished, then panics if the thread + /// panicked. Otherwise, returns the thread's return value. + /// + /// If the thread has not finished, or this is not the final `Arc`, returns `None`. + #[track_caller] + fn check_for_panics(self) -> Self::Output { + let handle = self.take()?; + + if handle.is_finished() { + // This is the same as calling `self.wait_for_panics()`, but we can't do that, + // because we've taken `self`. + #[allow(clippy::manual_map)] + return handle.wait_for_panics(); + } + + *self = Some(handle); + + None + } +} + +impl WaitForPanics for &mut Option>> { + type Output = Option; + + /// If this is the final `Arc`, waits for the thread to finish, then panics if the thread + /// panicked. Otherwise, returns the thread's return value. + /// + /// If this is not the final `Arc`, drops the handle and returns `None`. + #[track_caller] + fn wait_for_panics(self) -> Self::Output { + // This is more readable as an expanded statement. + #[allow(clippy::manual_map)] + if let Some(output) = self.take()?.wait_for_panics() { + Some(output) + } else { + // Some other task has a reference, so we should give up ours to let them use it. + None + } + } +} diff --git a/zebra-network/Cargo.toml b/zebra-network/Cargo.toml index e017f1aa71f..a11f520ab3f 100644 --- a/zebra-network/Cargo.toml +++ b/zebra-network/Cargo.toml @@ -83,7 +83,7 @@ howudoin = { version = "0.1.2", optional = true } proptest = { version = "1.2.0", optional = true } proptest-derive = { version = "0.3.0", optional = true } -zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.27" } +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.27", features = ["async-error"] } [dev-dependencies] proptest = "1.2.0" diff --git a/zebra-network/src/peer_set/candidate_set.rs b/zebra-network/src/peer_set/candidate_set.rs index f3126e6adde..f951bda5b9b 100644 --- a/zebra-network/src/peer_set/candidate_set.rs +++ b/zebra-network/src/peer_set/candidate_set.rs @@ -8,7 +8,7 @@ use tokio::time::{sleep_until, timeout, Instant}; use tower::{Service, ServiceExt}; use tracing::Span; -use zebra_chain::serialization::DateTime32; +use zebra_chain::{diagnostic::task::WaitForPanics, serialization::DateTime32}; use crate::{ constants, meta_addr::MetaAddrChange, peer_set::set::MorePeers, types::MetaAddr, AddressBook, @@ -348,8 +348,8 @@ where tokio::task::spawn_blocking(move || { span.in_scope(|| address_book.lock().unwrap().extend(addrs)) }) + .wait_for_panics() .await - .expect("panic in new peers address book update task"); } /// Returns the next candidate for a connection attempt, if any are available. @@ -403,8 +403,8 @@ where // Correctness: Spawn address book accesses on a blocking thread, to avoid deadlocks (see #1976). let span = Span::current(); let next_peer = tokio::task::spawn_blocking(move || span.in_scope(next_peer)) - .await - .expect("panic in next peer address book task")?; + .wait_for_panics() + .await?; // Security: rate-limit new outbound peer connections sleep_until(self.min_next_handshake).await; diff --git a/zebra-network/src/peer_set/initialize.rs b/zebra-network/src/peer_set/initialize.rs index 3dabd7279b5..6919b2ad09c 100644 --- a/zebra-network/src/peer_set/initialize.rs +++ b/zebra-network/src/peer_set/initialize.rs @@ -23,8 +23,7 @@ use rand::seq::SliceRandom; use tokio::{ net::{TcpListener, TcpStream}, sync::broadcast, - task::JoinError, - time::{error::Elapsed, sleep, Instant}, + time::{sleep, Instant}, }; use tokio_stream::wrappers::IntervalStream; use tower::{ @@ -33,11 +32,11 @@ use tower::{ use tracing::Span; use tracing_futures::Instrument; -use zebra_chain::chain_tip::ChainTip; +use zebra_chain::{chain_tip::ChainTip, diagnostic::task::WaitForPanics}; use crate::{ address_book_updater::AddressBookUpdater, - constants::{self, HANDSHAKE_TIMEOUT}, + constants, meta_addr::{MetaAddr, MetaAddrChange}, peer::{ self, address_is_valid_for_inbound_listeners, HandshakeRequest, MinimumPeerVersion, @@ -207,18 +206,8 @@ where // Wait for the initial seed peer count let mut active_outbound_connections = initial_peers_join + .wait_for_panics() .await - .unwrap_or_else(|e @ JoinError { .. }| { - if e.is_panic() { - panic!("panic in initial peer connections task: {e:?}"); - } else { - info!( - "task error during initial peer connections: {e:?},\ - is Zebra shutting down?" - ); - Err(e.into()) - } - }) .expect("unexpected error connecting to initial peers"); let active_initial_peer_count = active_outbound_connections.update_count(); @@ -354,22 +343,11 @@ where } .in_current_span(), ) + .wait_for_panics() }) .collect(); while let Some(handshake_result) = handshakes.next().await { - let handshake_result = handshake_result.unwrap_or_else(|e @ JoinError { .. }| { - if e.is_panic() { - panic!("panic in initial peer connection: {e:?}"); - } else { - info!( - "task error during initial peer connection: {e:?},\ - is Zebra shutting down?" - ); - // Fake the address, it doesn't matter because we're shutting down anyway - Err((PeerSocketAddr::unspecified(), e.into())) - } - }); match handshake_result { Ok(change) => { handshake_success_total += 1; @@ -637,36 +615,9 @@ where peerset_tx.clone(), ) .await? - .map(|res| match res { - Ok(()) => (), - Err(e @ JoinError { .. }) => { - if e.is_panic() { - panic!("panic during inbound handshaking: {e:?}"); - } else { - info!( - "task error during inbound handshaking: {e:?}, is Zebra shutting down?" - ) - } - } - }); - - let handshake_timeout = tokio::time::timeout( - // Only trigger this timeout if the inner handshake timeout fails - HANDSHAKE_TIMEOUT + Duration::from_millis(500), - handshake_task, - ) - .map(|res| match res { - Ok(()) => (), - Err(_e @ Elapsed { .. }) => { - info!( - "timeout in spawned accept_inbound_handshake() task: \ - inner task should have timed out already" - ); - } - }); + .wait_for_panics(); - // This timeout helps locate inbound peer connection hangs, see #6763 for details. - handshakes.push(Box::pin(handshake_timeout)); + handshakes.push(handshake_task); // Rate-limit inbound connection handshakes. // But sleep longer after a successful connection, @@ -918,80 +869,64 @@ where // Spawn each handshake or crawl into an independent task, so handshakes can make // progress while crawls are running. - let handshake_or_crawl_handle = tokio::spawn(async move { - // Try to get the next available peer for a handshake. - // - // candidates.next() has a short timeout, and briefly holds the address - // book lock, so it shouldn't hang. - // - // Hold the lock for as short a time as possible. - let candidate = { candidates.lock().await.next().await }; - - if let Some(candidate) = candidate { - // we don't need to spawn here, because there's nothing running concurrently - dial( - candidate, - outbound_connector, - outbound_connection_tracker, - peerset_tx, - address_book, - demand_tx, - ) - .await?; - - Ok(HandshakeFinished) - } else { - // There weren't any peers, so try to get more peers. - debug!("demand for peers but no available candidates"); + let handshake_or_crawl_handle = tokio::spawn( + async move { + // Try to get the next available peer for a handshake. + // + // candidates.next() has a short timeout, and briefly holds the address + // book lock, so it shouldn't hang. + // + // Hold the lock for as short a time as possible. + let candidate = { candidates.lock().await.next().await }; + + if let Some(candidate) = candidate { + // we don't need to spawn here, because there's nothing running concurrently + dial( + candidate, + outbound_connector, + outbound_connection_tracker, + peerset_tx, + address_book, + demand_tx, + ) + .await?; + + Ok(HandshakeFinished) + } else { + // There weren't any peers, so try to get more peers. + debug!("demand for peers but no available candidates"); - crawl(candidates, demand_tx).await?; + crawl(candidates, demand_tx).await?; - Ok(DemandCrawlFinished) - } - }.in_current_span()) - .map(|res| match res { - Ok(crawler_action) => crawler_action, - Err(e @ JoinError {..}) => { - if e.is_panic() { - panic!("panic during outbound handshake: {e:?}"); - } else { - info!("task error during outbound handshake: {e:?}, is Zebra shutting down?") + Ok(DemandCrawlFinished) } - // Just fake it - Ok(HandshakeFinished) } - }); + .in_current_span(), + ) + .wait_for_panics(); - handshakes.push(Box::pin(handshake_or_crawl_handle)); + handshakes.push(handshake_or_crawl_handle); } Ok(TimerCrawl { tick }) => { let candidates = candidates.clone(); let demand_tx = demand_tx.clone(); - let crawl_handle = tokio::spawn(async move { - debug!( - ?tick, - "crawling for more peers in response to the crawl timer" - ); + let crawl_handle = tokio::spawn( + async move { + debug!( + ?tick, + "crawling for more peers in response to the crawl timer" + ); - crawl(candidates, demand_tx).await?; + crawl(candidates, demand_tx).await?; - Ok(TimerCrawlFinished) - }.in_current_span()) - .map(move |res| match res { - Ok(crawler_action) => crawler_action, - Err(e @ JoinError {..}) => { - if e.is_panic() { - panic!("panic during outbound TimerCrawl: {tick:?} {e:?}"); - } else { - info!("task error during outbound TimerCrawl: {e:?}, is Zebra shutting down?") - } - // Just fake it Ok(TimerCrawlFinished) } - }); + .in_current_span(), + ) + .wait_for_panics(); - handshakes.push(Box::pin(crawl_handle)); + handshakes.push(crawl_handle); } // Completed spawned tasks @@ -1162,27 +1097,16 @@ async fn report_failed(address_book: Arc>, addr: M // // Spawn address book accesses on a blocking thread, to avoid deadlocks (see #1976). let span = Span::current(); - let task_result = tokio::task::spawn_blocking(move || { + let updated_addr = tokio::task::spawn_blocking(move || { span.in_scope(|| address_book.lock().unwrap().update(addr)) }) + .wait_for_panics() .await; - match task_result { - Ok(updated_addr) => assert_eq!( - updated_addr.map(|addr| addr.addr()), - Some(addr.addr()), - "incorrect address updated by address book: \ - original: {addr:?}, updated: {updated_addr:?}" - ), - Err(e @ JoinError { .. }) => { - if e.is_panic() { - panic!("panic in peer failure address book update task: {e:?}"); - } else { - info!( - "task error during peer failure address book update task: {e:?},\ - is Zebra shutting down?" - ) - } - } - } + assert_eq!( + updated_addr.map(|addr| addr.addr()), + Some(addr.addr()), + "incorrect address updated by address book: \ + original: {addr:?}, updated: {updated_addr:?}" + ); } diff --git a/zebra-state/Cargo.toml b/zebra-state/Cargo.toml index 198fc33bfb7..71ad137af6f 100644 --- a/zebra-state/Cargo.toml +++ b/zebra-state/Cargo.toml @@ -71,7 +71,7 @@ tracing = "0.1.37" elasticsearch = { version = "8.5.0-alpha.1", default-features = false, features = ["rustls-tls"], optional = true } serde_json = { version = "1.0.100", package = "serde_json", optional = true } -zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.27" } +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.27", features = ["async-error"] } # prod feature progress-bar howudoin = { version = "0.1.2", optional = true } diff --git a/zebra-state/src/response.rs b/zebra-state/src/response.rs index c238258f559..ba7f8d29ba8 100644 --- a/zebra-state/src/response.rs +++ b/zebra-state/src/response.rs @@ -32,6 +32,9 @@ pub enum Response { Depth(Option), /// Response to [`Request::Tip`] with the current best chain tip. + // + // TODO: remove this request, and replace it with a call to + // `LatestChainTip::best_tip_height_and_hash()` Tip(Option<(block::Height, block::Hash)>), /// Response to [`Request::BlockLocator`] with a block locator object. diff --git a/zebra-state/src/service.rs b/zebra-state/src/service.rs index e3955b63e86..d2a8eb237a0 100644 --- a/zebra-state/src/service.rs +++ b/zebra-state/src/service.rs @@ -43,7 +43,7 @@ use tower::buffer::Buffer; use zebra_chain::{ block::{self, CountedHeader, HeightDiff}, - diagnostic::CodeTimer, + diagnostic::{task::WaitForPanics, CodeTimer}, parameters::{Network, NetworkUpgrade}, }; @@ -1209,8 +1209,7 @@ impl Service for ReadStateService { Ok(ReadResponse::Tip(tip)) }) }) - .map(|join_result| join_result.expect("panic in ReadRequest::Tip")) - .boxed() + .wait_for_panics() } // Used by the StateService. @@ -1231,8 +1230,7 @@ impl Service for ReadStateService { Ok(ReadResponse::Depth(depth)) }) }) - .map(|join_result| join_result.expect("panic in ReadRequest::Depth")) - .boxed() + .wait_for_panics() } // Used by the StateService. @@ -1255,10 +1253,7 @@ impl Service for ReadStateService { Ok(ReadResponse::BestChainNextMedianTimePast(median_time_past?)) }) }) - .map(|join_result| { - join_result.expect("panic in ReadRequest::BestChainNextMedianTimePast") - }) - .boxed() + .wait_for_panics() } // Used by the get_block (raw) RPC and the StateService. @@ -1283,8 +1278,7 @@ impl Service for ReadStateService { Ok(ReadResponse::Block(block)) }) }) - .map(|join_result| join_result.expect("panic in ReadRequest::Block")) - .boxed() + .wait_for_panics() } // For the get_raw_transaction RPC and the StateService. @@ -1302,8 +1296,7 @@ impl Service for ReadStateService { Ok(ReadResponse::Transaction(response)) }) }) - .map(|join_result| join_result.expect("panic in ReadRequest::Transaction")) - .boxed() + .wait_for_panics() } // Used by the getblock (verbose) RPC. @@ -1332,10 +1325,7 @@ impl Service for ReadStateService { Ok(ReadResponse::TransactionIdsForBlock(transaction_ids)) }) }) - .map(|join_result| { - join_result.expect("panic in ReadRequest::TransactionIdsForBlock") - }) - .boxed() + .wait_for_panics() } ReadRequest::UnspentBestChainUtxo(outpoint) => { @@ -1359,8 +1349,7 @@ impl Service for ReadStateService { Ok(ReadResponse::UnspentBestChainUtxo(utxo)) }) }) - .map(|join_result| join_result.expect("panic in ReadRequest::UnspentBestChainUtxo")) - .boxed() + .wait_for_panics() } // Manually used by the StateService to implement part of AwaitUtxo. @@ -1381,8 +1370,7 @@ impl Service for ReadStateService { Ok(ReadResponse::AnyChainUtxo(utxo)) }) }) - .map(|join_result| join_result.expect("panic in ReadRequest::AnyChainUtxo")) - .boxed() + .wait_for_panics() } // Used by the StateService. @@ -1405,8 +1393,7 @@ impl Service for ReadStateService { )) }) }) - .map(|join_result| join_result.expect("panic in ReadRequest::BlockLocator")) - .boxed() + .wait_for_panics() } // Used by the StateService. @@ -1433,8 +1420,7 @@ impl Service for ReadStateService { Ok(ReadResponse::BlockHashes(block_hashes)) }) }) - .map(|join_result| join_result.expect("panic in ReadRequest::FindBlockHashes")) - .boxed() + .wait_for_panics() } // Used by the StateService. @@ -1466,8 +1452,7 @@ impl Service for ReadStateService { Ok(ReadResponse::BlockHeaders(block_headers)) }) }) - .map(|join_result| join_result.expect("panic in ReadRequest::FindBlockHeaders")) - .boxed() + .wait_for_panics() } ReadRequest::SaplingTree(hash_or_height) => { @@ -1491,8 +1476,7 @@ impl Service for ReadStateService { Ok(ReadResponse::SaplingTree(sapling_tree)) }) }) - .map(|join_result| join_result.expect("panic in ReadRequest::SaplingTree")) - .boxed() + .wait_for_panics() } ReadRequest::OrchardTree(hash_or_height) => { @@ -1516,8 +1500,7 @@ impl Service for ReadStateService { Ok(ReadResponse::OrchardTree(orchard_tree)) }) }) - .map(|join_result| join_result.expect("panic in ReadRequest::OrchardTree")) - .boxed() + .wait_for_panics() } // For the get_address_balance RPC. @@ -1542,8 +1525,7 @@ impl Service for ReadStateService { Ok(ReadResponse::AddressBalance(balance)) }) }) - .map(|join_result| join_result.expect("panic in ReadRequest::AddressBalance")) - .boxed() + .wait_for_panics() } // For the get_address_tx_ids RPC. @@ -1576,10 +1558,7 @@ impl Service for ReadStateService { tx_ids.map(ReadResponse::AddressesTransactionIds) }) }) - .map(|join_result| { - join_result.expect("panic in ReadRequest::TransactionIdsByAddresses") - }) - .boxed() + .wait_for_panics() } // For the get_address_utxos RPC. @@ -1605,8 +1584,7 @@ impl Service for ReadStateService { utxos.map(ReadResponse::AddressUtxos) }) }) - .map(|join_result| join_result.expect("panic in ReadRequest::UtxosByAddresses")) - .boxed() + .wait_for_panics() } ReadRequest::CheckBestChainTipNullifiersAndAnchors(unmined_tx) => { @@ -1639,11 +1617,7 @@ impl Service for ReadStateService { Ok(ReadResponse::ValidBestChainTipNullifiersAndAnchors) }) }) - .map(|join_result| { - join_result - .expect("panic in ReadRequest::CheckBestChainTipNullifiersAndAnchors") - }) - .boxed() + .wait_for_panics() } // Used by the get_block and get_block_hash RPCs. @@ -1672,8 +1646,7 @@ impl Service for ReadStateService { Ok(ReadResponse::BlockHash(hash)) }) }) - .map(|join_result| join_result.expect("panic in ReadRequest::BestChainBlockHash")) - .boxed() + .wait_for_panics() } // Used by get_block_template RPC. @@ -1712,8 +1685,7 @@ impl Service for ReadStateService { get_block_template_info.map(ReadResponse::ChainInfo) }) }) - .map(|join_result| join_result.expect("panic in ReadRequest::ChainInfo")) - .boxed() + .wait_for_panics() } // Used by getmininginfo, getnetworksolps, and getnetworkhashps RPCs. @@ -1766,8 +1738,7 @@ impl Service for ReadStateService { Ok(ReadResponse::SolutionRate(solution_rate)) }) }) - .map(|join_result| join_result.expect("panic in ReadRequest::SolutionRate")) - .boxed() + .wait_for_panics() } #[cfg(feature = "getblocktemplate-rpcs")] @@ -1815,10 +1786,7 @@ impl Service for ReadStateService { Ok(ReadResponse::ValidBlockProposal) }) }) - .map(|join_result| { - join_result.expect("panic in ReadRequest::CheckBlockProposalValidity") - }) - .boxed() + .wait_for_panics() } } } diff --git a/zebra-state/src/service/finalized_state/disk_format/upgrade.rs b/zebra-state/src/service/finalized_state/disk_format/upgrade.rs index 8527c8f4c76..15c1e003776 100644 --- a/zebra-state/src/service/finalized_state/disk_format/upgrade.rs +++ b/zebra-state/src/service/finalized_state/disk_format/upgrade.rs @@ -2,7 +2,6 @@ use std::{ cmp::Ordering, - panic, sync::{mpsc, Arc}, thread::{self, JoinHandle}, }; @@ -10,7 +9,11 @@ use std::{ use semver::Version; use tracing::Span; -use zebra_chain::{block::Height, parameters::Network}; +use zebra_chain::{ + block::Height, + diagnostic::task::{CheckForPanics, WaitForPanics}, + parameters::Network, +}; use DbFormatChange::*; @@ -482,42 +485,16 @@ impl DbFormatChangeThreadHandle { /// /// This method should be called regularly, so that panics are detected as soon as possible. pub fn check_for_panics(&mut self) { - let update_task = self.update_task.take(); - - if let Some(update_task) = update_task { - if update_task.is_finished() { - // We use into_inner() because it guarantees that exactly one of the tasks - // gets the JoinHandle. try_unwrap() lets us keep the JoinHandle, but it can also - // miss panics. - if let Some(update_task) = Arc::into_inner(update_task) { - // We are the last handle with a reference to this task, - // so we can propagate any panics - if let Err(thread_panic) = update_task.join() { - panic::resume_unwind(thread_panic); - } - } - } else { - // It hasn't finished, so we need to put it back - self.update_task = Some(update_task); - } - } + self.update_task.check_for_panics(); } /// Wait for the spawned thread to finish. If it exited with a panic, resume that panic. /// + /// Exits early if the thread has other outstanding handles. + /// /// This method should be called during shutdown. pub fn wait_for_panics(&mut self) { - if let Some(update_task) = self.update_task.take() { - // We use into_inner() because it guarantees that exactly one of the tasks - // gets the JoinHandle. See the comments in check_for_panics(). - if let Some(update_task) = Arc::into_inner(update_task) { - // We are the last handle with a reference to this task, - // so we can propagate any panics - if let Err(thread_panic) = update_task.join() { - panic::resume_unwind(thread_panic); - } - } - } + self.update_task.wait_for_panics(); } } From f9a56351059d016cf70f4661d143c3212198d9e2 Mon Sep 17 00:00:00 2001 From: Marek Date: Tue, 18 Jul 2023 06:54:22 +0200 Subject: [PATCH 221/265] fix(state): Remove workarounds for storing trees (#7218) * Remove duplicate asserts There are the same two asserts above the two removed ones. * Remove workarounds for inserting trees into NFS NFS = non finalized state * Use correct height for constructing new chain We were using the height of the last block instead of the initial block to construct a new chain. * Don't push the 0th block into a chain * Don't commit two blocks at the same height * Fix typo * Generate chains with at least two blocks --------- Co-authored-by: teor --- zebra-chain/src/block/height.rs | 3 - zebra-state/src/service/arbitrary.rs | 5 +- .../src/service/non_finalized_state/chain.rs | 95 ++++++------------- .../service/non_finalized_state/tests/prop.rs | 12 +-- .../non_finalized_state/tests/vectors.rs | 5 +- 5 files changed, 44 insertions(+), 76 deletions(-) diff --git a/zebra-chain/src/block/height.rs b/zebra-chain/src/block/height.rs index 1b5b3408177..16f73815410 100644 --- a/zebra-chain/src/block/height.rs +++ b/zebra-chain/src/block/height.rs @@ -205,9 +205,6 @@ fn operator_tests() { assert_eq!(None, Height(i32::MAX as u32) + 1); assert_eq!(None, Height(u32::MAX) + 0); - assert_eq!(Some(Height(2)), Height(1) + 1); - assert_eq!(None, Height::MAX + 1); - // Adding negative numbers assert_eq!(Some(Height(1)), Height(2) + -1); assert_eq!(Some(Height(0)), Height(1) + -1); diff --git a/zebra-state/src/service/arbitrary.rs b/zebra-state/src/service/arbitrary.rs index f7d4e4ddba1..f6185617c59 100644 --- a/zebra-state/src/service/arbitrary.rs +++ b/zebra-state/src/service/arbitrary.rs @@ -173,7 +173,10 @@ impl Strategy for PreparedChain { } let chain = chain.clone().expect("should be generated"); - let count = (1..chain.1.len()).new_tree(runner)?; + // The generated chain should contain at least two blocks: + // 1. the zeroth genesis block, and + // 2. a first block. + let count = (2..chain.1.len()).new_tree(runner)?; Ok(PreparedChainTree { chain: chain.1, count, diff --git a/zebra-state/src/service/non_finalized_state/chain.rs b/zebra-state/src/service/non_finalized_state/chain.rs index 2986919b755..3d92b63d23e 100644 --- a/zebra-state/src/service/non_finalized_state/chain.rs +++ b/zebra-state/src/service/non_finalized_state/chain.rs @@ -521,26 +521,16 @@ impl Chain { let anchor = tree.root(); trace!(?height, ?anchor, "adding sprout tree"); - // TODO: fix test code that incorrectly overwrites trees - #[cfg(not(test))] - { - assert_eq!( - self.sprout_trees_by_height.insert(height, tree.clone()), - None, - "incorrect overwrite of sprout tree: trees must be reverted then inserted", - ); - assert_eq!( - self.sprout_anchors_by_height.insert(height, anchor), - None, - "incorrect overwrite of sprout anchor: anchors must be reverted then inserted", - ); - } - - #[cfg(test)] - { - self.sprout_trees_by_height.insert(height, tree.clone()); - self.sprout_anchors_by_height.insert(height, anchor); - } + assert_eq!( + self.sprout_trees_by_height.insert(height, tree.clone()), + None, + "incorrect overwrite of sprout tree: trees must be reverted then inserted", + ); + assert_eq!( + self.sprout_anchors_by_height.insert(height, anchor), + None, + "incorrect overwrite of sprout anchor: anchors must be reverted then inserted", + ); // Multiple inserts are expected here, // because the anchors only change if a block has shielded transactions. @@ -633,26 +623,16 @@ impl Chain { let anchor = tree.root(); trace!(?height, ?anchor, "adding sapling tree"); - // TODO: fix test code that incorrectly overwrites trees - #[cfg(not(test))] - { - assert_eq!( - self.sapling_trees_by_height.insert(height, tree), - None, - "incorrect overwrite of sapling tree: trees must be reverted then inserted", - ); - assert_eq!( - self.sapling_anchors_by_height.insert(height, anchor), - None, - "incorrect overwrite of sapling anchor: anchors must be reverted then inserted", - ); - } - - #[cfg(test)] - { - self.sapling_trees_by_height.insert(height, tree); - self.sapling_anchors_by_height.insert(height, anchor); - } + assert_eq!( + self.sapling_trees_by_height.insert(height, tree), + None, + "incorrect overwrite of sapling tree: trees must be reverted then inserted", + ); + assert_eq!( + self.sapling_anchors_by_height.insert(height, anchor), + None, + "incorrect overwrite of sapling anchor: anchors must be reverted then inserted", + ); // Multiple inserts are expected here, // because the anchors only change if a block has shielded transactions. @@ -747,26 +727,16 @@ impl Chain { let anchor = tree.root(); trace!(?height, ?anchor, "adding orchard tree"); - // TODO: fix test code that incorrectly overwrites trees - #[cfg(not(test))] - { - assert_eq!( - self.orchard_trees_by_height.insert(height, tree), - None, - "incorrect overwrite of orchard tree: trees must be reverted then inserted", - ); - assert_eq!( - self.orchard_anchors_by_height.insert(height, anchor), - None, - "incorrect overwrite of orchard anchor: anchors must be reverted then inserted", - ); - } - - #[cfg(test)] - { - self.orchard_trees_by_height.insert(height, tree); - self.orchard_anchors_by_height.insert(height, anchor); - } + assert_eq!( + self.orchard_trees_by_height.insert(height, tree), + None, + "incorrect overwrite of orchard tree: trees must be reverted then inserted", + ); + assert_eq!( + self.orchard_anchors_by_height.insert(height, anchor), + None, + "incorrect overwrite of orchard anchor: anchors must be reverted then inserted", + ); // Multiple inserts are expected here, // because the anchors only change if a block has shielded transactions. @@ -850,16 +820,11 @@ impl Chain { // Use the previously cached root which was calculated in parallel. trace!(?height, "adding history tree"); - // TODO: fix test code that incorrectly overwrites trees - #[cfg(not(test))] assert_eq!( self.history_trees_by_height.insert(height, tree), None, "incorrect overwrite of history tree: trees must be reverted then inserted", ); - - #[cfg(test)] - self.history_trees_by_height.insert(height, tree); } /// Remove the History tree index at `height`. diff --git a/zebra-state/src/service/non_finalized_state/tests/prop.rs b/zebra-state/src/service/non_finalized_state/tests/prop.rs index fcf49f49b3c..56d103cb2d5 100644 --- a/zebra-state/src/service/non_finalized_state/tests/prop.rs +++ b/zebra-state/src/service/non_finalized_state/tests/prop.rs @@ -53,7 +53,7 @@ fn push_genesis_chain() -> Result<()> { chain_values.insert(None, (None, only_chain.chain_value_pools.into())); - for block in chain.iter().take(count).cloned() { + for block in chain.iter().take(count).skip(1).cloned() { let block = ContextuallyVerifiedBlock::with_block_and_spent_utxos( block, @@ -72,7 +72,7 @@ fn push_genesis_chain() -> Result<()> { chain_values.insert(block.height.into(), (block.chain_value_pool_change.into(), only_chain.chain_value_pools.into())); } - prop_assert_eq!(only_chain.blocks.len(), count); + prop_assert_eq!(only_chain.blocks.len(), count - 1); }); Ok(()) @@ -150,7 +150,7 @@ fn forked_equals_pushed_genesis() -> Result<()> { empty_tree.clone(), ValueBalance::zero(), ); - for block in chain.iter().take(fork_at_count).cloned() { + for block in chain.iter().take(fork_at_count).skip(1).cloned() { let block = ContextuallyVerifiedBlock::with_block_and_spent_utxos( block, partial_chain.unspent_utxos(), @@ -170,7 +170,7 @@ fn forked_equals_pushed_genesis() -> Result<()> { empty_tree, ValueBalance::zero(), ); - for block in chain.iter().cloned() { + for block in chain.iter().skip(1).cloned() { let block = ContextuallyVerifiedBlock::with_block_and_spent_utxos(block, full_chain.unspent_utxos())?; full_chain = full_chain @@ -460,7 +460,7 @@ fn rejection_restores_internal_state_genesis() -> Result<()> { .unwrap_or(DEFAULT_PARTIAL_CHAIN_PROPTEST_CASES)), |((chain, valid_count, network, mut bad_block) in (PreparedChain::default(), any::(), any::()) .prop_flat_map(|((chain, valid_count, network, _history_tree), is_nu5, is_v5)| { - let next_height = chain[valid_count - 1].height; + let next_height = chain[valid_count].height; ( Just(chain), Just(valid_count), @@ -486,7 +486,7 @@ fn rejection_restores_internal_state_genesis() -> Result<()> { // use `valid_count` as the number of valid blocks before an invalid block let valid_tip_height = chain[valid_count - 1].height; let valid_tip_hash = chain[valid_count - 1].hash; - let mut chain = chain.iter().take(valid_count).cloned(); + let mut chain = chain.iter().take(valid_count).skip(1).cloned(); prop_assert!(state.eq_internal_state(&state)); diff --git a/zebra-state/src/service/non_finalized_state/tests/vectors.rs b/zebra-state/src/service/non_finalized_state/tests/vectors.rs index 9179dee7f89..34242be752a 100644 --- a/zebra-state/src/service/non_finalized_state/tests/vectors.rs +++ b/zebra-state/src/service/non_finalized_state/tests/vectors.rs @@ -65,6 +65,9 @@ fn construct_many() -> Result<()> { let mut block: Arc = zebra_test::vectors::BLOCK_MAINNET_434873_BYTES.zcash_deserialize_into()?; + let initial_height = block + .coinbase_height() + .expect("Block 434873 should have its height in its coinbase tx."); let mut blocks = vec![]; while blocks.len() < 100 { @@ -75,7 +78,7 @@ fn construct_many() -> Result<()> { let mut chain = Chain::new( Network::Mainnet, - Height(block.coinbase_height().unwrap().0 - 1), + (initial_height - 1).expect("Initial height should be at least 1."), Default::default(), Default::default(), Default::default(), From af2962613d520248650d3e71440973324f004255 Mon Sep 17 00:00:00 2001 From: teor Date: Tue, 18 Jul 2023 14:55:13 +1000 Subject: [PATCH 222/265] Change dependabot.yml Actions schedule and fix groups (#7241) --- .github/dependabot.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/dependabot.yml b/.github/dependabot.yml index e5a061c8d86..ad39a513f61 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -5,6 +5,7 @@ updates: # serde, clap, and other dependencies sometimes have multiple updates in a week schedule: interval: weekly + day: monday timezone: America/New_York # Limit dependabot to 2 PRs per reviewer, but assume one reviewer is busy or away open-pull-requests-limit: 8 @@ -120,7 +121,6 @@ updates: jsonrpc: patterns: - "jsonrpc*" - - "serde_json" rand: patterns: - "rand*" @@ -135,6 +135,7 @@ updates: schedule: # tj-actions/changed-files often updates daily, which is too much for us interval: weekly + day: wednesday timezone: America/New_York open-pull-requests-limit: 6 labels: From f46ad54ae0812a79b025a63a65843d93eb0ff2e3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 18 Jul 2023 04:55:54 +0000 Subject: [PATCH 223/265] build(deps): bump insta from 1.30.0 to 1.31.0 (#7226) Bumps [insta](https://github.com/mitsuhiko/insta) from 1.30.0 to 1.31.0. - [Changelog](https://github.com/mitsuhiko/insta/blob/master/CHANGELOG.md) - [Commits](https://github.com/mitsuhiko/insta/compare/1.30.0...1.31.0) --- updated-dependencies: - dependency-name: insta dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- zebra-rpc/Cargo.toml | 2 +- zebra-state/Cargo.toml | 2 +- zebra-test/Cargo.toml | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c9983717e3d..482d83ab18e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2111,9 +2111,9 @@ dependencies = [ [[package]] name = "insta" -version = "1.30.0" +version = "1.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28491f7753051e5704d4d0ae7860d45fae3238d7d235bc4289dcd45c48d3cec3" +checksum = "a0770b0a3d4c70567f0d58331f3088b0e4c4f56c9b8d764efe654b4a5d46de3a" dependencies = [ "console", "lazy_static", diff --git a/zebra-rpc/Cargo.toml b/zebra-rpc/Cargo.toml index d0f6adecc26..e9a8f43055d 100644 --- a/zebra-rpc/Cargo.toml +++ b/zebra-rpc/Cargo.toml @@ -78,7 +78,7 @@ zebra-script = { path = "../zebra-script", version = "1.0.0-beta.27" } zebra-state = { path = "../zebra-state", version = "1.0.0-beta.27" } [dev-dependencies] -insta = { version = "1.30.0", features = ["redactions", "json", "ron"] } +insta = { version = "1.31.0", features = ["redactions", "json", "ron"] } proptest = "1.2.0" diff --git a/zebra-state/Cargo.toml b/zebra-state/Cargo.toml index 71ad137af6f..187a2b45eb9 100644 --- a/zebra-state/Cargo.toml +++ b/zebra-state/Cargo.toml @@ -91,7 +91,7 @@ once_cell = "1.18.0" spandoc = "0.2.2" hex = { version = "0.4.3", features = ["serde"] } -insta = { version = "1.30.0", features = ["ron", "redactions"] } +insta = { version = "1.31.0", features = ["ron", "redactions"] } proptest = "1.2.0" proptest-derive = "0.3.0" diff --git a/zebra-test/Cargo.toml b/zebra-test/Cargo.toml index 817f6d531d4..43676ae95ca 100644 --- a/zebra-test/Cargo.toml +++ b/zebra-test/Cargo.toml @@ -18,7 +18,7 @@ categories = ["command-line-utilities", "cryptography::cryptocurrencies"] hex = "0.4.3" indexmap = "2.0.0" lazy_static = "1.4.0" -insta = "1.30.0" +insta = "1.31.0" proptest = "1.2.0" once_cell = "1.18.0" rand = "0.8.5" From 62f717244ba20159081e7409248ec796ce7bd54a Mon Sep 17 00:00:00 2001 From: Arya Date: Tue, 18 Jul 2023 03:13:40 -0400 Subject: [PATCH 224/265] cleanup(deps): Bump sha2 and secp256k1 to remove duplicate dependencies (#7238) * Bumps sha2/secp256k1, updates deny.toml * removes unused import, updates method calls --- Cargo.lock | 87 ++++++------------------ deny.toml | 15 ---- zebra-chain/Cargo.toml | 4 +- zebra-chain/src/serialization/sha256d.rs | 4 +- zebra-chain/src/transparent/address.rs | 1 - 5 files changed, 23 insertions(+), 88 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 482d83ab18e..e52501272db 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -431,7 +431,7 @@ dependencies = [ "hmac", "pbkdf2", "rand 0.8.5", - "sha2 0.10.6", + "sha2", "unicode-normalization", "zeroize", ] @@ -507,15 +507,6 @@ dependencies = [ "constant_time_eq", ] -[[package]] -name = "block-buffer" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4" -dependencies = [ - "generic-array", -] - [[package]] name = "block-buffer" version = "0.10.4" @@ -553,7 +544,7 @@ version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f5353f36341f7451062466f0b755b96ac3a9547e4d7f6b70d603fc721a7d7896" dependencies = [ - "sha2 0.10.6", + "sha2", "tinyvec", ] @@ -1039,7 +1030,7 @@ dependencies = [ "cfg-if 1.0.0", "cpufeatures", "curve25519-dalek-derive", - "digest 0.10.7", + "digest", "fiat-crypto", "platforms", "rustc_version 0.4.0", @@ -1180,22 +1171,13 @@ dependencies = [ "uuid", ] -[[package]] -name = "digest" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066" -dependencies = [ - "generic-array", -] - [[package]] name = "digest" version = "0.10.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" dependencies = [ - "block-buffer 0.10.4", + "block-buffer", "crypto-common", "subtle", ] @@ -1260,7 +1242,7 @@ dependencies = [ "hex", "rand_core 0.6.4", "serde", - "sha2 0.10.6", + "sha2", "zeroize", ] @@ -1779,7 +1761,7 @@ dependencies = [ "lazy_static", "rand_core 0.6.4", "ring", - "secp256k1 0.26.0", + "secp256k1", "thiserror", ] @@ -1828,7 +1810,7 @@ version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e" dependencies = [ - "digest 0.10.7", + "digest", ] [[package]] @@ -2980,7 +2962,7 @@ version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "271779f35b581956db91a3e55737327a03aa051e90b1c47aeb189508533adfd7" dependencies = [ - "digest 0.10.7", + "digest", "password-hash", ] @@ -3037,7 +3019,7 @@ checksum = "745a452f8eb71e39ffd8ee32b3c5f51d03845f99786fa9b68db6ff509c505411" dependencies = [ "once_cell", "pest", - "sha2 0.10.6", + "sha2", ] [[package]] @@ -3689,7 +3671,7 @@ version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bd124222d17ad93a644ed9d011a40f4fb64aa54275c08cc216524a9ea82fb09f" dependencies = [ - "digest 0.10.7", + "digest", ] [[package]] @@ -3873,32 +3855,14 @@ dependencies = [ "untrusted", ] -[[package]] -name = "secp256k1" -version = "0.21.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c42e6f1735c5f00f51e43e28d6634141f2bcad10931b2609ddd74a86d751260" -dependencies = [ - "secp256k1-sys 0.4.2", - "serde", -] - [[package]] name = "secp256k1" version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4124a35fe33ae14259c490fd70fa199a32b9ce9502f2ee6bc4f81ec06fa65894" dependencies = [ - "secp256k1-sys 0.8.1", -] - -[[package]] -name = "secp256k1-sys" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "957da2573cde917463ece3570eab4a0b3f19de6f1646cde62e6fd3868f566036" -dependencies = [ - "cc", + "secp256k1-sys", + "serde", ] [[package]] @@ -4167,26 +4131,13 @@ dependencies = [ [[package]] name = "sha2" -version = "0.9.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d58a1e1bf39749807d89cf2d98ac2dfa0ff1cb3faa38fbb64dd88ac8013d800" -dependencies = [ - "block-buffer 0.9.0", - "cfg-if 1.0.0", - "cpufeatures", - "digest 0.9.0", - "opaque-debug", -] - -[[package]] -name = "sha2" -version = "0.10.6" +version = "0.10.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82e6b795fe2e3b1e845bafcb27aa35405c4d47cdfc92af5fc8d3002f76cebdc0" +checksum = "479fb9d862239e610720565ca91403019f2f00410f1864c5aa7479b950a76ed8" dependencies = [ "cfg-if 1.0.0", "cpufeatures", - "digest 0.10.7", + "digest", ] [[package]] @@ -5616,8 +5567,8 @@ dependencies = [ "rand 0.8.5", "rand_core 0.6.4", "ripemd", - "secp256k1 0.26.0", - "sha2 0.10.6", + "secp256k1", + "sha2", "subtle", "zcash_address", "zcash_encoding", @@ -5722,12 +5673,12 @@ dependencies = [ "reddsa", "redjubjub", "ripemd", - "secp256k1 0.21.3", + "secp256k1", "serde", "serde-big-array", "serde_json", "serde_with 3.0.0", - "sha2 0.9.9", + "sha2", "spandoc", "static_assertions", "thiserror", diff --git a/deny.toml b/deny.toml index 7e932dbf08e..59002a66a92 100644 --- a/deny.toml +++ b/deny.toml @@ -68,26 +68,11 @@ skip-tree = [ # ZF crates - # wait for zcashd and zcash_script to upgrade - # https://github.com/ZcashFoundation/zcash_script/pulls - { name = "sha2", version = "=0.9.9" }, - # wait for indexmap, toml_edit, serde_json, tower to upgrade { name = "hashbrown", version = "=0.12.3" }, - # wait for metrics-exporter-prometheus to upgrade - { name = "hashbrown", version = "=0.13.2" }, - - # wait for zebra-chain to upgrade - { name = "secp256k1", version = "=0.21.3" }, - - # wait for zebra-chain to upgrade `secp256k1` - { name = "secp256k1-sys", version = "=0.4.2" }, # ECC crates - # wait for zcash_primitives to remove duplicated dependencies - { name = "block-buffer", version = "=0.9.0" }, - # wait for minreq and zcash_proofs to upgrade { name = "rustls", version = "=0.20.8" }, diff --git a/zebra-chain/Cargo.toml b/zebra-chain/Cargo.toml index 74272274200..0003f8cc695 100644 --- a/zebra-chain/Cargo.toml +++ b/zebra-chain/Cargo.toml @@ -71,8 +71,8 @@ primitive-types = "0.11.1" rand_core = "0.6.4" ripemd = "0.1.3" # Matches version used by hdwallet -secp256k1 = { version = "0.21.3", features = ["serde"] } -sha2 = { version = "0.9.9", features = ["compress"] } +secp256k1 = { version = "0.26.0", features = ["serde"] } +sha2 = { version = "0.10.7", features = ["compress"] } uint = "0.9.5" x25519-dalek = { version = "2.0.0-rc.3", features = ["serde"] } diff --git a/zebra-chain/src/serialization/sha256d.rs b/zebra-chain/src/serialization/sha256d.rs index 484d921eb91..00eab58314b 100644 --- a/zebra-chain/src/serialization/sha256d.rs +++ b/zebra-chain/src/serialization/sha256d.rs @@ -14,7 +14,7 @@ impl Writer { /// Consume the Writer and produce the hash result. pub fn finish(self) -> [u8; 32] { let result1 = self.hash.finalize(); - let result2 = Sha256::digest(&result1); + let result2 = Sha256::digest(result1); let mut buffer = [0u8; 32]; buffer[0..32].copy_from_slice(&result2[0..32]); buffer @@ -39,7 +39,7 @@ pub struct Checksum(pub [u8; 4]); impl<'a> From<&'a [u8]> for Checksum { fn from(bytes: &'a [u8]) -> Self { let hash1 = Sha256::digest(bytes); - let hash2 = Sha256::digest(&hash1); + let hash2 = Sha256::digest(hash1); let mut checksum = [0u8; 4]; checksum[0..4].copy_from_slice(&hash2[0..4]); Self(checksum) diff --git a/zebra-chain/src/transparent/address.rs b/zebra-chain/src/transparent/address.rs index 01b78320cc2..0faeb9216fb 100644 --- a/zebra-chain/src/transparent/address.rs +++ b/zebra-chain/src/transparent/address.rs @@ -4,7 +4,6 @@ use std::{fmt, io}; use ripemd::{Digest, Ripemd160}; use secp256k1::PublicKey; -use sha2::Digest as Sha256Digest; use sha2::Sha256; use crate::{ From e6a35610ef67388abcec7ffb3f73694a12b9185a Mon Sep 17 00:00:00 2001 From: teor Date: Tue, 18 Jul 2023 17:14:06 +1000 Subject: [PATCH 225/265] When testnet is selected, disable jobs that don't work on it (#7198) Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> --- .../workflows/continous-integration-docker.yml | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/.github/workflows/continous-integration-docker.yml b/.github/workflows/continous-integration-docker.yml index 1a5fbb27423..a88df245ab1 100644 --- a/.github/workflows/continous-integration-docker.yml +++ b/.github/workflows/continous-integration-docker.yml @@ -392,7 +392,7 @@ jobs: name: Zebra tip needs: [ build, get-available-disks ] uses: ./.github/workflows/deploy-gcp-tests.yml - if: ${{ github.event_name == 'schedule' || !fromJSON(needs.get-available-disks.outputs.zebra_tip_disk) || (github.event.inputs.run-full-sync == 'true' && github.event.inputs.network == 'Mainnet') }} + if: ${{ github.event_name == 'schedule' || !fromJSON(needs.get-available-disks.outputs.zebra_tip_disk) || (github.event.inputs.run-full-sync == 'true' && (inputs.network || vars.ZCASH_NETWORK) == 'Mainnet') }} with: app_name: zebrad test_id: full-sync-to-tip @@ -506,7 +506,7 @@ jobs: name: Zebra tip on testnet needs: [ build, get-available-disks-testnet ] uses: ./.github/workflows/deploy-gcp-tests.yml - if: ${{ (github.event_name == 'schedule' && vars.SCHEDULE_TESTNET_FULL_SYNC == 'true') || !fromJSON(needs.get-available-disks-testnet.outputs.zebra_tip_disk) || (github.event.inputs.run-full-sync == 'true' && github.event.inputs.network == 'Testnet') }} + if: ${{ (github.event_name == 'schedule' && vars.SCHEDULE_TESTNET_FULL_SYNC == 'true') || !fromJSON(needs.get-available-disks-testnet.outputs.zebra_tip_disk) || (github.event.inputs.run-full-sync == 'true' && (inputs.network || vars.ZCASH_NETWORK) == 'Testnet') }} with: app_name: zebrad test_id: full-sync-to-tip-testnet @@ -585,7 +585,8 @@ jobs: name: lightwalletd tip needs: [ test-full-sync, get-available-disks ] uses: ./.github/workflows/deploy-gcp-tests.yml - if: ${{ !cancelled() && !failure() && (fromJSON(needs.get-available-disks.outputs.zebra_tip_disk) || needs.test-full-sync.result == 'success') && (github.event_name == 'schedule' || !fromJSON(needs.get-available-disks.outputs.lwd_tip_disk) || github.event.inputs.run-lwd-sync == 'true' ) }} + # Currently the lightwalletd tests only work on Mainnet + if: ${{ !cancelled() && !failure() && (inputs.network || vars.ZCASH_NETWORK) == 'Mainnet' && (fromJSON(needs.get-available-disks.outputs.zebra_tip_disk) || needs.test-full-sync.result == 'success') && (github.event_name == 'schedule' || !fromJSON(needs.get-available-disks.outputs.lwd_tip_disk) || github.event.inputs.run-lwd-sync == 'true' ) }} with: app_name: lightwalletd test_id: lwd-full-sync @@ -625,7 +626,7 @@ jobs: name: lightwalletd tip update needs: [ lightwalletd-full-sync, get-available-disks ] uses: ./.github/workflows/deploy-gcp-tests.yml - if: ${{ !cancelled() && !failure() && (fromJSON(needs.get-available-disks.outputs.lwd_tip_disk) || needs.lightwalletd-full-sync.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} + if: ${{ !cancelled() && !failure() && (inputs.network || vars.ZCASH_NETWORK) == 'Mainnet' && (fromJSON(needs.get-available-disks.outputs.lwd_tip_disk) || needs.lightwalletd-full-sync.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} with: app_name: lightwalletd test_id: lwd-update-sync @@ -658,7 +659,7 @@ jobs: name: Zebra tip JSON-RPC needs: [ test-full-sync, get-available-disks ] uses: ./.github/workflows/deploy-gcp-tests.yml - if: ${{ !cancelled() && !failure() && (fromJSON(needs.get-available-disks.outputs.zebra_tip_disk) || needs.test-full-sync.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} + if: ${{ !cancelled() && !failure() && (inputs.network || vars.ZCASH_NETWORK) == 'Mainnet' && (fromJSON(needs.get-available-disks.outputs.zebra_tip_disk) || needs.test-full-sync.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} with: app_name: lightwalletd test_id: fully-synced-rpc @@ -684,7 +685,7 @@ jobs: name: lightwalletd tip send needs: [ lightwalletd-full-sync, get-available-disks ] uses: ./.github/workflows/deploy-gcp-tests.yml - if: ${{ !cancelled() && !failure() && (fromJSON(needs.get-available-disks.outputs.lwd_tip_disk) || needs.lightwalletd-full-sync.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} + if: ${{ !cancelled() && !failure() && (inputs.network || vars.ZCASH_NETWORK) == 'Mainnet' && (fromJSON(needs.get-available-disks.outputs.lwd_tip_disk) || needs.lightwalletd-full-sync.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} with: app_name: lightwalletd test_id: lwd-send-transactions @@ -712,7 +713,7 @@ jobs: name: lightwalletd GRPC tests needs: [ lightwalletd-full-sync, get-available-disks ] uses: ./.github/workflows/deploy-gcp-tests.yml - if: ${{ !cancelled() && !failure() && (fromJSON(needs.get-available-disks.outputs.lwd_tip_disk) || needs.lightwalletd-full-sync.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} + if: ${{ !cancelled() && !failure() && (inputs.network || vars.ZCASH_NETWORK) == 'Mainnet' && (fromJSON(needs.get-available-disks.outputs.lwd_tip_disk) || needs.lightwalletd-full-sync.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} with: app_name: lightwalletd test_id: lwd-grpc-wallet From e6d3295b8a7add859e9d65fcbe165156880be3ca Mon Sep 17 00:00:00 2001 From: Gustavo Valverde Date: Tue, 18 Jul 2023 08:20:57 +0100 Subject: [PATCH 226/265] refactor(issues): Include DevOps template and improve others (#7232) * add(issues): add DevOps ticket template * imp(issues): add visual feedback and context * fix: use correct links * fix(issues): typo * fix: typo * fix(issues): wrong file type * fix(issues): yaml error --- .github/ISSUE_TEMPLATE/bug_report.yml | 2 +- .github/ISSUE_TEMPLATE/config.yml | 9 ++- .github/ISSUE_TEMPLATE/devops_report.yml | 68 +++++++++++++++++++ .github/ISSUE_TEMPLATE/feature_request.md | 2 +- .../ISSUE_TEMPLATE/private_security_issue.yml | 2 +- .github/ISSUE_TEMPLATE/release.md | 2 +- .../ISSUE_TEMPLATE/usability_testing_plan.md | 2 +- 7 files changed, 81 insertions(+), 6 deletions(-) create mode 100644 .github/ISSUE_TEMPLATE/devops_report.yml diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml index f3bf463a17b..46b71d2367a 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.yml +++ b/.github/ISSUE_TEMPLATE/bug_report.yml @@ -1,4 +1,4 @@ -name: Bug report +name: ":bug: Bug report" description: Create a report to help us improve title: '[User reported bug]: ' labels: C-bug, S-needs-triage diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml index 0086358db1e..e1816836e3b 100644 --- a/.github/ISSUE_TEMPLATE/config.yml +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -1 +1,8 @@ -blank_issues_enabled: true +blank_issues_enabled: false +contact_links: + - name: 💬 Zcash Community Support + url: https://forum.zcashcommunity.com/ + about: You're invited to ask questions about the ecosystem, community and Zebra + - name: ❓ General Questions about Zebra + url: https://github.com/ZcashFoundation/zebra/discussions/categories/q-a + about: Please ask and answer questions about Zebra as a discussion threads diff --git a/.github/ISSUE_TEMPLATE/devops_report.yml b/.github/ISSUE_TEMPLATE/devops_report.yml new file mode 100644 index 00000000000..9f4ce614d8e --- /dev/null +++ b/.github/ISSUE_TEMPLATE/devops_report.yml @@ -0,0 +1,68 @@ +--- +name: ":octocat: DevOps Report" +description: Issues related to the Zebra build, test, or release process. +title: "(short issue description)" +labels: [A-devops, C-bug, S-needs-triage] +body: + +- type: markdown + attributes: + value: | + Thank you for taking the time to report a bug in Zebra! + + Please fill out the sections below to help us reproduce and fix the bug. + If you have a question, please ask on [Discord](https://discord.gg/fP2JGmhm) or [GitHub Discussions](https://github.com/ZcashFoundation/zebra/discussions) +- type: textarea + id: description + attributes: + label: Describe the issue or request + description: What is the problem? A clear and concise description of the bug. + validations: + required: true +- type: textarea + id: expected + attributes: + label: Expected Behavior + description: | + What did you expect to happen? + validations: + required: true +- type: textarea + id: current + attributes: + label: Current Behavior + description: | + What actually happened? + + Please include full errors, uncaught exceptions, stack traces, and relevant logs. + Links to the faulty logs in GitHub Actions or other places are also welcomed. + validations: + required: true +- type: textarea + id: solution + attributes: + label: Possible Solution + description: | + Suggest a fix/reason for the bug + validations: + required: false +- type: textarea + id: context + attributes: + label: Additional Information/Context + description: | + Anything else that might be relevant for troubleshooting this bug. Providing context helps us come up with a solution that is most useful for the community. + validations: + required: false +- type: input + id: on-prs + attributes: + label: Is this happening on PRs? + validations: + required: true +- type: input + id: on-main + attributes: + label: Is this happening on the main branch? + validations: + required: true diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md index 9ac8226f988..2ecf68b68b4 100644 --- a/.github/ISSUE_TEMPLATE/feature_request.md +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -1,5 +1,5 @@ --- -name: Change request +name: ":construction: Change request" about: Suggest a feature or change for this project title: '' labels: C-enhancement, S-needs-triage diff --git a/.github/ISSUE_TEMPLATE/private_security_issue.yml b/.github/ISSUE_TEMPLATE/private_security_issue.yml index ffd7e69154d..97e16ab49f0 100644 --- a/.github/ISSUE_TEMPLATE/private_security_issue.yml +++ b/.github/ISSUE_TEMPLATE/private_security_issue.yml @@ -1,5 +1,5 @@ --- -name: Private Security Issue +name: ":unlock: Private Security Issue" about: Zebra team use only title: 'Security Issue #NNN' labels: C-security, S-needs-triage diff --git a/.github/ISSUE_TEMPLATE/release.md b/.github/ISSUE_TEMPLATE/release.md index af1a68331c7..05da69d09b6 100644 --- a/.github/ISSUE_TEMPLATE/release.md +++ b/.github/ISSUE_TEMPLATE/release.md @@ -1,5 +1,5 @@ --- -name: 'Zebra Release' +name: ":rocket: Zebra Release" about: 'Zebra team use only' title: 'Publish next Zebra release: (version)' labels: 'A-release, C-trivial, P-Medium :zap:' diff --git a/.github/ISSUE_TEMPLATE/usability_testing_plan.md b/.github/ISSUE_TEMPLATE/usability_testing_plan.md index 16b333eb36a..4c25c394fc5 100644 --- a/.github/ISSUE_TEMPLATE/usability_testing_plan.md +++ b/.github/ISSUE_TEMPLATE/usability_testing_plan.md @@ -1,5 +1,5 @@ --- -name: Usability Testing Plan +name: ":clipboard: Usability Testing Plan" about: Create a Usability Testing Plan title: 'Usability Testing Plan' labels: C-research From c6f4e28bff2f453b11a75c05eb58754a762e1123 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 18 Jul 2023 09:59:41 +0000 Subject: [PATCH 227/265] build(deps): bump the cli group with 1 update (#7244) Bumps the cli group with 1 update: [clap](https://github.com/clap-rs/clap). - [Release notes](https://github.com/clap-rs/clap/releases) - [Changelog](https://github.com/clap-rs/clap/blob/master/CHANGELOG.md) - [Commits](https://github.com/clap-rs/clap/compare/v4.3.11...v4.3.15) --- updated-dependencies: - dependency-name: clap dependency-type: direct:production update-type: version-update:semver-patch dependency-group: cli ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 18 +++++++++--------- zebrad/Cargo.toml | 2 +- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e52501272db..3803a03e5c1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -12,7 +12,7 @@ dependencies = [ "arc-swap", "backtrace", "canonical-path", - "clap 4.3.11", + "clap 4.3.15", "color-eyre", "fs-err", "once_cell", @@ -753,9 +753,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.3.11" +version = "4.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1640e5cc7fb47dbb8338fd471b105e7ed6c3cb2aeb00c2e067127ffd3764a05d" +checksum = "8f644d0dac522c8b05ddc39aaaccc5b136d5dc4ff216610c5641e3be5becf56c" dependencies = [ "clap_builder", "clap_derive", @@ -764,9 +764,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.3.11" +version = "4.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98c59138d527eeaf9b53f35a77fcc1fad9d883116070c63d5de1c7dc7b00c72b" +checksum = "af410122b9778e024f9e0fb35682cc09cc3f85cad5e8d3ba8f47a9702df6e73d" dependencies = [ "anstream", "anstyle", @@ -777,9 +777,9 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.3.2" +version = "4.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8cd2b2a819ad6eec39e8f1d6b53001af1e5469f8c177579cdaeb313115b825f" +checksum = "54a9bb5758fc5dfe728d1019941681eccaf0cf8a4189b692a0ee2f2ecf90a050" dependencies = [ "heck 0.4.1", "proc-macro2 1.0.63", @@ -935,7 +935,7 @@ dependencies = [ "anes", "cast", "ciborium", - "clap 4.3.11", + "clap 4.3.15", "criterion-plot", "is-terminal", "itertools 0.10.5", @@ -5935,7 +5935,7 @@ dependencies = [ "abscissa_core", "atty", "chrono", - "clap 4.3.11", + "clap 4.3.15", "color-eyre", "console-subscriber", "dirs", diff --git a/zebrad/Cargo.toml b/zebrad/Cargo.toml index 3a3571eff81..381e9dfc66b 100644 --- a/zebrad/Cargo.toml +++ b/zebrad/Cargo.toml @@ -153,7 +153,7 @@ zebra-state = { path = "../zebra-state", version = "1.0.0-beta.27" } zebra-utils = { path = "../zebra-utils", version = "1.0.0-beta.27", optional = true } abscissa_core = "0.7.0" -clap = { version = "4.3.11", features = ["cargo"] } +clap = { version = "4.3.15", features = ["cargo"] } chrono = { version = "0.4.26", default-features = false, features = ["clock", "std"] } humantime-serde = "1.1.1" indexmap = "2.0.0" From 3e75cb50f66ac783a477eb0db92529e88df69115 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 18 Jul 2023 12:13:51 +0000 Subject: [PATCH 228/265] build(deps): bump the crypto group with 1 update (#7222) Bumps the crypto group with 1 update: [reddsa](https://github.com/ZcashFoundation/reddsa). - [Release notes](https://github.com/ZcashFoundation/reddsa/releases) - [Changelog](https://github.com/ZcashFoundation/reddsa/blob/main/CHANGELOG.md) - [Commits](https://github.com/ZcashFoundation/reddsa/compare/0.5.0...0.5.1) --- updated-dependencies: - dependency-name: reddsa dependency-type: direct:production update-type: version-update:semver-patch dependency-group: crypto ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- zebra-chain/Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3803a03e5c1..3f689d29192 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3495,9 +3495,9 @@ dependencies = [ [[package]] name = "reddsa" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54b34d2c0df43159d2ff79d3cf929c9f11415529127344edb8160ad2be499fcd" +checksum = "78a5191930e84973293aa5f532b513404460cd2216c1cfb76d08748c15b40b02" dependencies = [ "blake2b_simd", "byteorder", diff --git a/zebra-chain/Cargo.toml b/zebra-chain/Cargo.toml index 0003f8cc695..bd597eae51f 100644 --- a/zebra-chain/Cargo.toml +++ b/zebra-chain/Cargo.toml @@ -108,7 +108,7 @@ rayon = "1.7.0" # ZF deps ed25519-zebra = "4.0.1" redjubjub = "0.7.0" -reddsa = "0.5.0" +reddsa = "0.5.1" # Production feature json-conversion serde_json = { version = "1.0.100", optional = true } From 1db4f567f7348f83bef8d8a95c3d55d8681ceede Mon Sep 17 00:00:00 2001 From: Marek Date: Wed, 19 Jul 2023 01:04:11 +0200 Subject: [PATCH 229/265] change(state): Deduplicate note commitment trees in non-finalized state (#7239) * Remove duplicate asserts There are the same two asserts above the two removed ones. * Remove workarounds for inserting trees into NFS NFS = non finalized state * Use correct height for constructing new chain We were using the height of the last block instead of the initial block to construct a new chain. * Don't push the 0th block into a chain * Don't commit two blocks at the same height * Add helpers for heights * Support the retrieval of deduped Sprout trees * Dedup Sprout trees * Refactor docs for adding & removing Sprout trees * Support the retrieval of deduped Sapling trees * Dedup Sapling trees * Refactor docs for adding & removing Sapling trees * Support the retrieval of deduped Orchard trees * Dedup Orchard trees * Refactor docs for adding & removing Orchard trees * Make the docs for storing trees clearer --- zebra-chain/src/block/height.rs | 23 ++ .../src/service/non_finalized_state/chain.rs | 294 +++++++++++++----- 2 files changed, 245 insertions(+), 72 deletions(-) diff --git a/zebra-chain/src/block/height.rs b/zebra-chain/src/block/height.rs index 16f73815410..46569221c9c 100644 --- a/zebra-chain/src/block/height.rs +++ b/zebra-chain/src/block/height.rs @@ -65,6 +65,29 @@ impl Height { /// previous to Nu5 and in non-coinbase transactions from Nu5 activation /// height and above. pub const MAX_EXPIRY_HEIGHT: Height = Height(499_999_999); + + /// Returns the next [`Height`]. + /// + /// # Panics + /// + /// - If the current height is at its maximum. + pub fn next(self) -> Self { + (self + 1).expect("Height should not be at its maximum.") + } + + /// Returns the previous [`Height`]. + /// + /// # Panics + /// + /// - If the current height is at its minimum. + pub fn previous(self) -> Self { + (self - 1).expect("Height should not be at its minimum.") + } + + /// Returns `true` if the [`Height`] is at its minimum. + pub fn is_min(self) -> bool { + self == Self::MIN + } } /// A difference between two [`Height`]s, possibly negative. diff --git a/zebra-state/src/service/non_finalized_state/chain.rs b/zebra-state/src/service/non_finalized_state/chain.rs index 3d92b63d23e..8400ae0a222 100644 --- a/zebra-state/src/service/non_finalized_state/chain.rs +++ b/zebra-state/src/service/non_finalized_state/chain.rs @@ -500,14 +500,25 @@ impl Chain { let height = hash_or_height.height_or_else(|hash| self.height_by_hash.get(&hash).cloned())?; - self.sprout_trees_by_height.get(&height).cloned() + self.sprout_trees_by_height + .range(..=height) + .next_back() + .map(|(_height, tree)| tree.clone()) } - /// Add the Sprout `tree` to the tree and anchor indexes at `height`. + /// Adds the Sprout `tree` to the tree and anchor indexes at `height`. /// /// `height` can be either: + /// /// - the height of a new block that has just been added to the chain tip, or - /// - the finalized tip height: the height of the parent of the first block of a new chain. + /// - the finalized tip height—the height of the parent of the first block of a new chain. + /// + /// Stores only the first tree in each series of identical trees. + /// + /// # Panics + /// + /// - If there's a tree already stored at `height`. + /// - If there's an anchor already stored at `height`. fn add_sprout_tree_and_anchor( &mut self, height: Height, @@ -521,11 +532,20 @@ impl Chain { let anchor = tree.root(); trace!(?height, ?anchor, "adding sprout tree"); - assert_eq!( - self.sprout_trees_by_height.insert(height, tree.clone()), - None, - "incorrect overwrite of sprout tree: trees must be reverted then inserted", - ); + // Don't add a new tree unless it differs from the previous one or there's no previous tree. + if height.is_min() + || self + .sprout_tree(height.previous().into()) + .map_or(true, |prev_tree| prev_tree != tree) + { + assert_eq!( + self.sprout_trees_by_height.insert(height, tree.clone()), + None, + "incorrect overwrite of sprout tree: trees must be reverted then inserted", + ); + } + + // Store the root. assert_eq!( self.sprout_anchors_by_height.insert(height, anchor), None, @@ -538,24 +558,35 @@ impl Chain { self.sprout_trees_by_anchor.insert(anchor, tree); } - /// Remove the Sprout tree and anchor indexes at `height`. + /// Removes the Sprout tree and anchor indexes at `height`. /// /// `height` can be at two different [`RevertPosition`]s in the chain: - /// - a tip block above a chain fork: only that height is removed, or - /// - a root block: all trees and anchors below that height are removed, - /// including temporary finalized tip trees. + /// + /// - a tip block above a chain fork—only the tree and anchor at that height are removed, or + /// - a root block—all trees and anchors at and below that height are removed, including + /// temporary finalized tip trees. + /// + /// # Panics + /// + /// - If the anchor being removed is not present. + /// - If there is no tree at `height`. fn remove_sprout_tree_and_anchor(&mut self, position: RevertPosition, height: Height) { - let removed_heights: Vec = if position == RevertPosition::Root { - // Remove all trees and anchors at or below the removed block. - // This makes sure the temporary trees from finalized tip forks are removed. - self.sprout_anchors_by_height - .keys() - .cloned() - .filter(|index_height| *index_height <= height) - .collect() + let (removed_heights, highest_removed_tree) = if position == RevertPosition::Root { + ( + // Remove all trees and anchors at or below the removed block. + // This makes sure the temporary trees from finalized tip forks are removed. + self.sprout_anchors_by_height + .keys() + .cloned() + .filter(|index_height| *index_height <= height) + .collect(), + // Cache the highest (rightmost) tree before its removal. + self.sprout_tree(height.into()), + ) } else { // Just remove the reverted tip trees and anchors. - vec![height] + // We don't need to cache the highest (rightmost) tree. + (vec![height], None) }; for height in &removed_heights { @@ -563,9 +594,8 @@ impl Chain { .sprout_anchors_by_height .remove(height) .expect("Sprout anchor must be present if block was added to chain"); - self.sprout_trees_by_height - .remove(height) - .expect("Sprout note commitment tree must be present if block was added to chain"); + + self.sprout_trees_by_height.remove(height); trace!(?height, ?position, ?anchor, "removing sprout tree"); @@ -579,6 +609,26 @@ impl Chain { self.sprout_trees_by_anchor.remove(&anchor); } } + + // # Invariant + // + // The height following after the removed heights in a non-empty non-finalized state must + // always have its tree. + // + // The loop above can violate the invariant, and if `position` is [`RevertPosition::Root`], + // it will always violate the invariant. We restore the invariant by storing the highest + // (rightmost) removed tree just above `height` if there is no tree at that height. + if !self.is_empty() && height < self.non_finalized_tip_height() { + let next_height = height.next(); + + if self.sprout_trees_by_height.get(&next_height).is_none() { + // TODO: Use `try_insert` once it stabilises. + self.sprout_trees_by_height.insert( + next_height, + highest_removed_tree.expect("There should be a cached removed tree."), + ); + } + } } /// Returns the Sapling note commitment tree of the tip of this [`Chain`], @@ -607,14 +657,25 @@ impl Chain { let height = hash_or_height.height_or_else(|hash| self.height_by_hash.get(&hash).cloned())?; - self.sapling_trees_by_height.get(&height).cloned() + self.sapling_trees_by_height + .range(..=height) + .next_back() + .map(|(_height, tree)| tree.clone()) } - /// Add the Sapling `tree` to the tree and anchor indexes at `height`. + /// Adds the Sapling `tree` to the tree and anchor indexes at `height`. /// /// `height` can be either: + /// /// - the height of a new block that has just been added to the chain tip, or - /// - the finalized tip height: the height of the parent of the first block of a new chain. + /// - the finalized tip height—the height of the parent of the first block of a new chain. + /// + /// Stores only the first tree in each series of identical trees. + /// + /// # Panics + /// + /// - If there's a tree already stored at `height`. + /// - If there's an anchor already stored at `height`. fn add_sapling_tree_and_anchor( &mut self, height: Height, @@ -623,11 +684,20 @@ impl Chain { let anchor = tree.root(); trace!(?height, ?anchor, "adding sapling tree"); - assert_eq!( - self.sapling_trees_by_height.insert(height, tree), - None, - "incorrect overwrite of sapling tree: trees must be reverted then inserted", - ); + // Don't add a new tree unless it differs from the previous one or there's no previous tree. + if height.is_min() + || self + .sapling_tree(height.previous().into()) + .map_or(true, |prev_tree| prev_tree != tree) + { + assert_eq!( + self.sapling_trees_by_height.insert(height, tree), + None, + "incorrect overwrite of sapling tree: trees must be reverted then inserted", + ); + } + + // Store the root. assert_eq!( self.sapling_anchors_by_height.insert(height, anchor), None, @@ -639,24 +709,35 @@ impl Chain { self.sapling_anchors.insert(anchor); } - /// Remove the Sapling tree and anchor indexes at `height`. + /// Removes the Sapling tree and anchor indexes at `height`. /// /// `height` can be at two different [`RevertPosition`]s in the chain: - /// - a tip block above a chain fork: only that height is removed, or - /// - a root block: all trees and anchors below that height are removed, - /// including temporary finalized tip trees. + /// + /// - a tip block above a chain fork—only the tree and anchor at that height are removed, or + /// - a root block—all trees and anchors at and below that height are removed, including + /// temporary finalized tip trees. + /// + /// # Panics + /// + /// - If the anchor being removed is not present. + /// - If there is no tree at `height`. fn remove_sapling_tree_and_anchor(&mut self, position: RevertPosition, height: Height) { - let removed_heights: Vec = if position == RevertPosition::Root { - // Remove all trees and anchors at or below the removed block. - // This makes sure the temporary trees from finalized tip forks are removed. - self.sapling_anchors_by_height - .keys() - .cloned() - .filter(|index_height| *index_height <= height) - .collect() + let (removed_heights, highest_removed_tree) = if position == RevertPosition::Root { + ( + // Remove all trees and anchors at or below the removed block. + // This makes sure the temporary trees from finalized tip forks are removed. + self.sapling_anchors_by_height + .keys() + .cloned() + .filter(|index_height| *index_height <= height) + .collect(), + // Cache the highest (rightmost) tree before its removal. + self.sapling_tree(height.into()), + ) } else { // Just remove the reverted tip trees and anchors. - vec![height] + // We don't need to cache the highest (rightmost) tree. + (vec![height], None) }; for height in &removed_heights { @@ -664,9 +745,8 @@ impl Chain { .sapling_anchors_by_height .remove(height) .expect("Sapling anchor must be present if block was added to chain"); - self.sapling_trees_by_height - .remove(height) - .expect("Sapling note commitment tree must be present if block was added to chain"); + + self.sapling_trees_by_height.remove(height); trace!(?height, ?position, ?anchor, "removing sapling tree"); @@ -677,6 +757,26 @@ impl Chain { "Sapling anchor must be present if block was added to chain" ); } + + // # Invariant + // + // The height following after the removed heights in a non-empty non-finalized state must + // always have its tree. + // + // The loop above can violate the invariant, and if `position` is [`RevertPosition::Root`], + // it will always violate the invariant. We restore the invariant by storing the highest + // (rightmost) removed tree just above `height` if there is no tree at that height. + if !self.is_empty() && height < self.non_finalized_tip_height() { + let next_height = height.next(); + + if self.sapling_trees_by_height.get(&next_height).is_none() { + // TODO: Use `try_insert` once it stabilises. + self.sapling_trees_by_height.insert( + next_height, + highest_removed_tree.expect("There should be a cached removed tree."), + ); + } + } } /// Returns the Orchard note commitment tree of the tip of this [`Chain`], @@ -706,14 +806,25 @@ impl Chain { let height = hash_or_height.height_or_else(|hash| self.height_by_hash.get(&hash).cloned())?; - self.orchard_trees_by_height.get(&height).cloned() + self.orchard_trees_by_height + .range(..=height) + .next_back() + .map(|(_height, tree)| tree.clone()) } - /// Add the Orchard `tree` to the tree and anchor indexes at `height`. + /// Adds the Orchard `tree` to the tree and anchor indexes at `height`. /// /// `height` can be either: + /// /// - the height of a new block that has just been added to the chain tip, or - /// - the finalized tip height: the height of the parent of the first block of a new chain. + /// - the finalized tip height—the height of the parent of the first block of a new chain. + /// + /// Stores only the first tree in each series of identical trees. + /// + /// # Panics + /// + /// - If there's a tree already stored at `height`. + /// - If there's an anchor already stored at `height`. fn add_orchard_tree_and_anchor( &mut self, height: Height, @@ -727,11 +838,20 @@ impl Chain { let anchor = tree.root(); trace!(?height, ?anchor, "adding orchard tree"); - assert_eq!( - self.orchard_trees_by_height.insert(height, tree), - None, - "incorrect overwrite of orchard tree: trees must be reverted then inserted", - ); + // Don't add a new tree unless it differs from the previous one or there's no previous tree. + if height.is_min() + || self + .orchard_tree(height.previous().into()) + .map_or(true, |prev_tree| prev_tree != tree) + { + assert_eq!( + self.orchard_trees_by_height.insert(height, tree), + None, + "incorrect overwrite of orchard tree: trees must be reverted then inserted", + ); + } + + // Store the root. assert_eq!( self.orchard_anchors_by_height.insert(height, anchor), None, @@ -743,24 +863,35 @@ impl Chain { self.orchard_anchors.insert(anchor); } - /// Remove the Orchard tree and anchor indexes at `height`. + /// Removes the Orchard tree and anchor indexes at `height`. /// /// `height` can be at two different [`RevertPosition`]s in the chain: - /// - a tip block above a chain fork: only that height is removed, or - /// - a root block: all trees and anchors below that height are removed, - /// including temporary finalized tip trees. + /// + /// - a tip block above a chain fork—only the tree and anchor at that height are removed, or + /// - a root block—all trees and anchors at and below that height are removed, including + /// temporary finalized tip trees. + /// + /// # Panics + /// + /// - If the anchor being removed is not present. + /// - If there is no tree at `height`. fn remove_orchard_tree_and_anchor(&mut self, position: RevertPosition, height: Height) { - let removed_heights: Vec = if position == RevertPosition::Root { - // Remove all trees and anchors at or below the removed block. - // This makes sure the temporary trees from finalized tip forks are removed. - self.orchard_anchors_by_height - .keys() - .cloned() - .filter(|index_height| *index_height <= height) - .collect() + let (removed_heights, highest_removed_tree) = if position == RevertPosition::Root { + ( + // Remove all trees and anchors at or below the removed block. + // This makes sure the temporary trees from finalized tip forks are removed. + self.orchard_anchors_by_height + .keys() + .cloned() + .filter(|index_height| *index_height <= height) + .collect(), + // Cache the highest (rightmost) tree before its removal. + self.orchard_tree(height.into()), + ) } else { // Just remove the reverted tip trees and anchors. - vec![height] + // We don't need to cache the highest (rightmost) tree. + (vec![height], None) }; for height in &removed_heights { @@ -768,9 +899,8 @@ impl Chain { .orchard_anchors_by_height .remove(height) .expect("Orchard anchor must be present if block was added to chain"); - self.orchard_trees_by_height - .remove(height) - .expect("Orchard note commitment tree must be present if block was added to chain"); + + self.orchard_trees_by_height.remove(height); trace!(?height, ?position, ?anchor, "removing orchard tree"); @@ -781,6 +911,26 @@ impl Chain { "Orchard anchor must be present if block was added to chain" ); } + + // # Invariant + // + // The height following after the removed heights in a non-empty non-finalized state must + // always have its tree. + // + // The loop above can violate the invariant, and if `position` is [`RevertPosition::Root`], + // it will always violate the invariant. We restore the invariant by storing the highest + // (rightmost) removed tree just above `height` if there is no tree at that height. + if !self.is_empty() && height < self.non_finalized_tip_height() { + let next_height = height.next(); + + if self.orchard_trees_by_height.get(&next_height).is_none() { + // TODO: Use `try_insert` once it stabilises. + self.orchard_trees_by_height.insert( + next_height, + highest_removed_tree.expect("There should be a cached removed tree."), + ); + } + } } /// Returns the History tree of the tip of this [`Chain`], From 7f64ff35a46e8ca309fba3eff8f4009765898285 Mon Sep 17 00:00:00 2001 From: Pili Guerra Date: Wed, 19 Jul 2023 00:04:32 +0100 Subject: [PATCH 230/265] add(metrics): Track mempool actions and size bucketed by weight (copy of #6972, credit @str4d) (#7019) * metrics: Track mempool actions and size bucketed by weight * Fix tests * draft fix tests * fix `fix_arbitrary_generated_action_overflows` * add some docs * manually derive arbitrary * remove unused import --------- Co-authored-by: Jack Grigg Co-authored-by: Marek Co-authored-by: Alfredo Garcia --- zebra-chain/src/transaction/arbitrary.rs | 50 ++++++++- zebra-chain/src/transaction/unmined.rs | 10 +- zebra-chain/src/transaction/unmined/zip317.rs | 2 +- zebra-rpc/src/methods/tests/vectors.rs | 5 +- .../mempool/storage/verified_set.rs | 100 ++++++++++++++++++ 5 files changed, 163 insertions(+), 4 deletions(-) diff --git a/zebra-chain/src/transaction/arbitrary.rs b/zebra-chain/src/transaction/arbitrary.rs index c218ccb6238..43581e07a0e 100644 --- a/zebra-chain/src/transaction/arbitrary.rs +++ b/zebra-chain/src/transaction/arbitrary.rs @@ -30,7 +30,9 @@ use crate::{ use itertools::Itertools; -use super::{FieldNotPresent, JoinSplitData, LockTime, Memo, Transaction, UnminedTx}; +use super::{ + FieldNotPresent, JoinSplitData, LockTime, Memo, Transaction, UnminedTx, VerifiedUnminedTx, +}; /// The maximum number of arbitrary transactions, inputs, or outputs. /// @@ -783,6 +785,52 @@ impl Arbitrary for UnminedTx { type Strategy = BoxedStrategy; } +impl Arbitrary for VerifiedUnminedTx { + type Parameters = (); + + fn arbitrary_with(_args: Self::Parameters) -> Self::Strategy { + ( + any::(), + any::>(), + any::(), + any::<(u16, u16)>().prop_map(|(unpaid_actions, conventional_actions)| { + ( + unpaid_actions % conventional_actions.saturating_add(1), + conventional_actions, + ) + }), + any::(), + ) + .prop_map( + |( + transaction, + miner_fee, + legacy_sigop_count, + (conventional_actions, mut unpaid_actions), + fee_weight_ratio, + )| { + if unpaid_actions > conventional_actions { + unpaid_actions = conventional_actions; + } + + let conventional_actions = conventional_actions as u32; + let unpaid_actions = unpaid_actions as u32; + + Self { + transaction, + miner_fee, + legacy_sigop_count, + conventional_actions, + unpaid_actions, + fee_weight_ratio, + } + }, + ) + .boxed() + } + type Strategy = BoxedStrategy; +} + // Utility functions /// Convert `trans` into a fake v5 transaction, diff --git a/zebra-chain/src/transaction/unmined.rs b/zebra-chain/src/transaction/unmined.rs index 6b953966627..da716573e8b 100644 --- a/zebra-chain/src/transaction/unmined.rs +++ b/zebra-chain/src/transaction/unmined.rs @@ -325,7 +325,6 @@ impl From<&Arc> for UnminedTx { // // This struct can't be `Eq`, because it contains a `f32`. #[derive(Clone, PartialEq)] -#[cfg_attr(any(test, feature = "proptest-impl"), derive(Arbitrary))] pub struct VerifiedUnminedTx { /// The unmined transaction. pub transaction: UnminedTx, @@ -337,6 +336,13 @@ pub struct VerifiedUnminedTx { /// transparent inputs and outputs. pub legacy_sigop_count: u64, + /// The number of conventional actions for `transaction`, as defined by [ZIP-317]. + /// + /// The number of actions is limited by [`MAX_BLOCK_BYTES`], so it fits in a u32. + /// + /// [ZIP-317]: https://zips.z.cash/zip-0317#block-production + pub conventional_actions: u32, + /// The number of unpaid actions for `transaction`, /// as defined by [ZIP-317] for block production. /// @@ -381,6 +387,7 @@ impl VerifiedUnminedTx { legacy_sigop_count: u64, ) -> Result { let fee_weight_ratio = zip317::conventional_fee_weight_ratio(&transaction, miner_fee); + let conventional_actions = zip317::conventional_actions(&transaction.transaction); let unpaid_actions = zip317::unpaid_actions(&transaction, miner_fee); zip317::mempool_checks(unpaid_actions, miner_fee, transaction.size)?; @@ -390,6 +397,7 @@ impl VerifiedUnminedTx { miner_fee, legacy_sigop_count, fee_weight_ratio, + conventional_actions, unpaid_actions, }) } diff --git a/zebra-chain/src/transaction/unmined/zip317.rs b/zebra-chain/src/transaction/unmined/zip317.rs index 44ef709aacd..e9f4a757e53 100644 --- a/zebra-chain/src/transaction/unmined/zip317.rs +++ b/zebra-chain/src/transaction/unmined/zip317.rs @@ -133,7 +133,7 @@ pub fn conventional_fee_weight_ratio( /// as defined by [ZIP-317]. /// /// [ZIP-317]: https://zips.z.cash/zip-0317#fee-calculation -fn conventional_actions(transaction: &Transaction) -> u32 { +pub fn conventional_actions(transaction: &Transaction) -> u32 { let tx_in_total_size: usize = transaction .inputs() .iter() diff --git a/zebra-rpc/src/methods/tests/vectors.rs b/zebra-rpc/src/methods/tests/vectors.rs index b5892a3583b..8cb49e40c2e 100644 --- a/zebra-rpc/src/methods/tests/vectors.rs +++ b/zebra-rpc/src/methods/tests/vectors.rs @@ -1185,7 +1185,7 @@ async fn rpc_getblocktemplate_mining_address(use_p2pkh: bool) { block::{Hash, MAX_BLOCK_BYTES, ZCASH_BLOCK_VERSION}, chain_sync_status::MockSyncStatus, serialization::DateTime32, - transaction::VerifiedUnminedTx, + transaction::{zip317, VerifiedUnminedTx}, work::difficulty::{CompactDifficulty, ExpandedDifficulty, U256}, }; use zebra_consensus::MAX_BLOCK_SIGOPS; @@ -1441,10 +1441,13 @@ async fn rpc_getblocktemplate_mining_address(use_p2pkh: bool) { conventional_fee: 0.try_into().unwrap(), }; + let conventional_actions = zip317::conventional_actions(&unmined_tx.transaction); + let verified_unmined_tx = VerifiedUnminedTx { transaction: unmined_tx, miner_fee: 0.try_into().unwrap(), legacy_sigop_count: 0, + conventional_actions, unpaid_actions: 0, fee_weight_ratio: 1.0, }; diff --git a/zebrad/src/components/mempool/storage/verified_set.rs b/zebrad/src/components/mempool/storage/verified_set.rs index e6f0dcbd3f1..1d1f835fb36 100644 --- a/zebrad/src/components/mempool/storage/verified_set.rs +++ b/zebrad/src/components/mempool/storage/verified_set.rs @@ -286,10 +286,110 @@ impl VerifiedSet { } fn update_metrics(&mut self) { + // Track the sum of unpaid actions within each transaction (as they are subject to the + // unpaid action limit). Transactions that have weight >= 1 have no unpaid actions by + // definition. + let mut unpaid_actions_with_weight_lt20pct = 0; + let mut unpaid_actions_with_weight_lt40pct = 0; + let mut unpaid_actions_with_weight_lt60pct = 0; + let mut unpaid_actions_with_weight_lt80pct = 0; + let mut unpaid_actions_with_weight_lt1 = 0; + + // Track the total number of paid actions across all transactions in the mempool. This + // added to the bucketed unpaid actions above is equal to the total number of conventional + // actions in the mempool. + let mut paid_actions = 0; + + // Track the sum of transaction sizes (the metric by which they are mainly limited) across + // several buckets. + let mut size_with_weight_lt1 = 0; + let mut size_with_weight_eq1 = 0; + let mut size_with_weight_gt1 = 0; + let mut size_with_weight_gt2 = 0; + let mut size_with_weight_gt3 = 0; + + for entry in self.full_transactions() { + paid_actions += entry.conventional_actions - entry.unpaid_actions; + + if entry.fee_weight_ratio > 3.0 { + size_with_weight_gt3 += entry.transaction.size; + } else if entry.fee_weight_ratio > 2.0 { + size_with_weight_gt2 += entry.transaction.size; + } else if entry.fee_weight_ratio > 1.0 { + size_with_weight_gt1 += entry.transaction.size; + } else if entry.fee_weight_ratio == 1.0 { + size_with_weight_eq1 += entry.transaction.size; + } else { + size_with_weight_lt1 += entry.transaction.size; + if entry.fee_weight_ratio < 0.2 { + unpaid_actions_with_weight_lt20pct += entry.unpaid_actions; + } else if entry.fee_weight_ratio < 0.4 { + unpaid_actions_with_weight_lt40pct += entry.unpaid_actions; + } else if entry.fee_weight_ratio < 0.6 { + unpaid_actions_with_weight_lt60pct += entry.unpaid_actions; + } else if entry.fee_weight_ratio < 0.8 { + unpaid_actions_with_weight_lt80pct += entry.unpaid_actions; + } else { + unpaid_actions_with_weight_lt1 += entry.unpaid_actions; + } + } + } + + metrics::gauge!( + "zcash.mempool.actions.unpaid", + unpaid_actions_with_weight_lt20pct as f64, + "bk" => "< 0.2", + ); + metrics::gauge!( + "zcash.mempool.actions.unpaid", + unpaid_actions_with_weight_lt40pct as f64, + "bk" => "< 0.4", + ); + metrics::gauge!( + "zcash.mempool.actions.unpaid", + unpaid_actions_with_weight_lt60pct as f64, + "bk" => "< 0.6", + ); + metrics::gauge!( + "zcash.mempool.actions.unpaid", + unpaid_actions_with_weight_lt80pct as f64, + "bk" => "< 0.8", + ); + metrics::gauge!( + "zcash.mempool.actions.unpaid", + unpaid_actions_with_weight_lt1 as f64, + "bk" => "< 1", + ); + metrics::gauge!("zcash.mempool.actions.paid", paid_actions as f64); metrics::gauge!( "zcash.mempool.size.transactions", self.transaction_count() as f64, ); + metrics::gauge!( + "zcash.mempool.size.weighted", + size_with_weight_lt1 as f64, + "bk" => "< 1", + ); + metrics::gauge!( + "zcash.mempool.size.weighted", + size_with_weight_eq1 as f64, + "bk" => "1", + ); + metrics::gauge!( + "zcash.mempool.size.weighted", + size_with_weight_gt1 as f64, + "bk" => "> 1", + ); + metrics::gauge!( + "zcash.mempool.size.weighted", + size_with_weight_gt2 as f64, + "bk" => "> 2", + ); + metrics::gauge!( + "zcash.mempool.size.weighted", + size_with_weight_gt3 as f64, + "bk" => "> 3", + ); metrics::gauge!( "zcash.mempool.size.bytes", self.transactions_serialized_size as f64, From 3d02d54f7fb277086d47608220dc122ae0106fb9 Mon Sep 17 00:00:00 2001 From: Gustavo Valverde Date: Wed, 19 Jul 2023 00:41:36 +0100 Subject: [PATCH 231/265] ref(issues): use newer template where applicable (#7254) * ref(issues): use newer template where applicable This also fixes emojis, indenting and issues with GitHub YAML schema for issues template * fix(issues): do not require testing or related work * fix(issues): textarea required * fix(issues): do not be strict on issues structure --- .github/ISSUE_TEMPLATE/bug_report.yml | 32 ++--- .github/ISSUE_TEMPLATE/devops_report.yml | 123 +++++++++--------- .github/ISSUE_TEMPLATE/feature_request.md | 43 ------ .github/ISSUE_TEMPLATE/feature_request.yml | 57 ++++++++ .../ISSUE_TEMPLATE/private_security_issue.yml | 29 +++-- .github/ISSUE_TEMPLATE/release.md | 2 +- .../ISSUE_TEMPLATE/usability_testing_plan.md | 2 +- 7 files changed, 154 insertions(+), 134 deletions(-) delete mode 100644 .github/ISSUE_TEMPLATE/feature_request.md create mode 100644 .github/ISSUE_TEMPLATE/feature_request.yml diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml index 46b71d2367a..dfb6ef72758 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.yml +++ b/.github/ISSUE_TEMPLATE/bug_report.yml @@ -1,31 +1,34 @@ -name: ":bug: Bug report" +name: '🐛 Bug report' description: Create a report to help us improve -title: '[User reported bug]: ' -labels: C-bug, S-needs-triage +title: 'bug: ' +labels: [C-bug, S-needs-triage] body: - type: markdown attributes: value: | - Thanks for taking the time to fill out this bug report! + Thank you for taking the time to report a bug in Zebra! + + Please fill out the sections below to help us reproduce and fix the bug. + If you have a question, please ask on [Discord](https://discord.gg/fP2JGmhm) or [GitHub Discussions](https://github.com/ZcashFoundation/zebra/discussions) - type: textarea id: what-happened attributes: label: What happened? description: Also tell us, what did you expect to happen? - value: "I expected to see this happen: - + value: ' + I expected to see this happen: - Instead, this happened: - " + Instead, this happened: + ' validations: required: true - type: textarea id: reproduce attributes: - label: What were you doing when the issue happened? + label: What were you doing when the issue happened? description: Copy and paste the exact commands or code here. - placeholder: "Behavior or code sample that causes the bug" + placeholder: 'Behavior or code sample that causes the bug' validations: required: false - type: textarea @@ -33,15 +36,15 @@ body: attributes: label: Zebra logs description: Copy and paste the last 100 Zebra log lines or upload the full logs to https://gist.github.com/ and add a link to them here. - placeholder: "Copy and paste the logs here" + placeholder: 'Copy and paste the logs here' validations: required: false - type: input id: zebrad-version attributes: label: Zebra Version - description: "For bugs in `zebrad`, run `zebrad --version`." - placeholder: "zebrad 1.0.0-placeholder" + description: 'For bugs in `zebrad`, run `zebrad --version`.' + placeholder: 'zebrad 1.0.0-placeholder' validations: required: false - type: checkboxes @@ -58,8 +61,7 @@ body: id: os-details attributes: label: OS details - description: "Linux, macOS, BSD: the output of `uname -a`; Windows: version and 32-bit or 64-bit; Other OS: name and version" - placeholder: + description: 'Linux, macOS, BSD: the output of `uname -a`; Windows: version and 32-bit or 64-bit; Other OS: name and version' validations: required: false - type: textarea diff --git a/.github/ISSUE_TEMPLATE/devops_report.yml b/.github/ISSUE_TEMPLATE/devops_report.yml index 9f4ce614d8e..3e5d56fe10b 100644 --- a/.github/ISSUE_TEMPLATE/devops_report.yml +++ b/.github/ISSUE_TEMPLATE/devops_report.yml @@ -1,68 +1,67 @@ --- -name: ":octocat: DevOps Report" +name: '🚦 DevOps Report' description: Issues related to the Zebra build, test, or release process. -title: "(short issue description)" +title: 'devops: ' labels: [A-devops, C-bug, S-needs-triage] body: + - type: markdown + attributes: + value: | + Thank you for taking the time to report a bug in Zebra! -- type: markdown - attributes: - value: | - Thank you for taking the time to report a bug in Zebra! + Please fill out the sections below to help us reproduce and fix the bug. + If you have a question, please ask on [Discord](https://discord.gg/fP2JGmhm) or [GitHub Discussions](https://github.com/ZcashFoundation/zebra/discussions) + - type: textarea + id: description + attributes: + label: Describe the issue or request + description: What is the problem? A clear and concise description of the bug. + validations: + required: true + - type: textarea + id: expected + attributes: + label: Expected Behavior + description: | + What did you expect to happen? + validations: + required: false + - type: textarea + id: current + attributes: + label: Current Behavior + description: | + What actually happened? - Please fill out the sections below to help us reproduce and fix the bug. - If you have a question, please ask on [Discord](https://discord.gg/fP2JGmhm) or [GitHub Discussions](https://github.com/ZcashFoundation/zebra/discussions) -- type: textarea - id: description - attributes: - label: Describe the issue or request - description: What is the problem? A clear and concise description of the bug. - validations: - required: true -- type: textarea - id: expected - attributes: - label: Expected Behavior - description: | - What did you expect to happen? - validations: - required: true -- type: textarea - id: current - attributes: - label: Current Behavior - description: | - What actually happened? - - Please include full errors, uncaught exceptions, stack traces, and relevant logs. - Links to the faulty logs in GitHub Actions or other places are also welcomed. - validations: - required: true -- type: textarea - id: solution - attributes: - label: Possible Solution - description: | - Suggest a fix/reason for the bug - validations: - required: false -- type: textarea - id: context - attributes: - label: Additional Information/Context - description: | - Anything else that might be relevant for troubleshooting this bug. Providing context helps us come up with a solution that is most useful for the community. - validations: - required: false -- type: input - id: on-prs - attributes: - label: Is this happening on PRs? - validations: - required: true -- type: input - id: on-main - attributes: - label: Is this happening on the main branch? - validations: - required: true + Please include full errors, uncaught exceptions, stack traces, and relevant logs. + Links to the faulty logs in GitHub Actions or other places are also welcomed. + validations: + required: false + - type: textarea + id: solution + attributes: + label: Possible Solution + description: | + Suggest a fix/reason for the bug + validations: + required: false + - type: textarea + id: context + attributes: + label: Additional Information/Context + description: | + Anything else that might be relevant for troubleshooting this bug. Providing context helps us come up with a solution that is most useful for the community. + validations: + required: false + - type: input + id: on-prs + attributes: + label: Is this happening on PRs? + validations: + required: false + - type: input + id: on-main + attributes: + label: Is this happening on the main branch? + validations: + required: false diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md deleted file mode 100644 index 2ecf68b68b4..00000000000 --- a/.github/ISSUE_TEMPLATE/feature_request.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -name: ":construction: Change request" -about: Suggest a feature or change for this project -title: '' -labels: C-enhancement, S-needs-triage -assignees: '' - ---- - -## Motivation - - - -### Specifications - - - -### Complex Code or Requirements - - - -### Testing - - - -## Related Work - - diff --git a/.github/ISSUE_TEMPLATE/feature_request.yml b/.github/ISSUE_TEMPLATE/feature_request.yml new file mode 100644 index 00000000000..5d40057589b --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature_request.yml @@ -0,0 +1,57 @@ +--- +name: "🚧 Change request" +description: Suggest a feature or change for this project +title: 'feature: ' +labels: [C-enhancement, S-needs-triage] +body: + - type: markdown + attributes: + value: | + Thank you for taking the time to suggest a feature or change for Zebra! + + Please fill out the sections below to help us understand your request. + If you have a question, please ask on [Discord](https://discord.gg/fP2JGmhm) or [GitHub Discussions](https://github.com/ZcashFoundation/zebra/discussions) + - type: textarea + id: motivation + attributes: + label: Motivation + description: | + Is your feature request related to a problem? + How does this change improve Zebra? + validations: + required: true + - type: textarea + id: specs + attributes: + label: Specifications + description: | + If this change is based on consensus rules, quote them, and link to the Zcash spec or ZIP: https://zips.z.cash/#nu5-zips + If this changes network behaviour, quote and link to the Bitcoin network reference: https://developer.bitcoin.org/reference/p2p_networking.html + validations: + required: false + - type: textarea + id: complexity + attributes: + label: Complex Code or Requirements + description: | + Does this PR change concurrency, unsafe code, or complex consensus rules? + If it does, explain how we will implement, review, and test it. + validations: + required: false + - type: textarea + id: tests + attributes: + label: Testing + description: | + How can we check that this change does what we want it to do? + validations: + required: false + - type: textarea + id: related + attributes: + label: Related Work + description: | + Is this change related to other features or tickets? + validations: + required: false + diff --git a/.github/ISSUE_TEMPLATE/private_security_issue.yml b/.github/ISSUE_TEMPLATE/private_security_issue.yml index 97e16ab49f0..82e6e88dce4 100644 --- a/.github/ISSUE_TEMPLATE/private_security_issue.yml +++ b/.github/ISSUE_TEMPLATE/private_security_issue.yml @@ -1,15 +1,20 @@ --- -name: ":unlock: Private Security Issue" -about: Zebra team use only +name: '🔓 Private Security Issue' +description: Zebra team use only title: 'Security Issue #NNN' -labels: C-security, S-needs-triage -assignees: '' +labels: [C-security, S-needs-triage] +body: + - type: markdown + attributes: + value: | + This ticket is a public placeholder for a security issue that the Zebra team is fixing privately. + The issue number is chosen by our internal tracker, it is not meaningful. ---- - -## Motivation - -This ticket is a public placeholder for a security issue that the Zebra team is fixing privately. -The issue number is chosen by our internal tracker, it is not meaningful. - -Zebra developers must discuss the details of this issue using secure channels. + Zebra developers must discuss the details of this issue using secure channels. + Please do not discuss this issue in public. + - type: textarea + id: issue + attributes: + label: Description + description: | + Any relevant information about the issue diff --git a/.github/ISSUE_TEMPLATE/release.md b/.github/ISSUE_TEMPLATE/release.md index 05da69d09b6..fab41907d82 100644 --- a/.github/ISSUE_TEMPLATE/release.md +++ b/.github/ISSUE_TEMPLATE/release.md @@ -1,5 +1,5 @@ --- -name: ":rocket: Zebra Release" +name: "🚀 Zebra Release" about: 'Zebra team use only' title: 'Publish next Zebra release: (version)' labels: 'A-release, C-trivial, P-Medium :zap:' diff --git a/.github/ISSUE_TEMPLATE/usability_testing_plan.md b/.github/ISSUE_TEMPLATE/usability_testing_plan.md index 4c25c394fc5..c93f413b605 100644 --- a/.github/ISSUE_TEMPLATE/usability_testing_plan.md +++ b/.github/ISSUE_TEMPLATE/usability_testing_plan.md @@ -1,5 +1,5 @@ --- -name: ":clipboard: Usability Testing Plan" +name: "📋 Usability Testing Plan" about: Create a Usability Testing Plan title: 'Usability Testing Plan' labels: C-research From 1fa9d61c7c18a37ed7ec5f33d3fcdc666eefbd78 Mon Sep 17 00:00:00 2001 From: Arya Date: Wed, 19 Jul 2023 00:40:32 -0400 Subject: [PATCH 232/265] build(deps): Update dependencies for release (#7255) * Updates dependencies * Dedups bindgen, removes windows-sys from deny.toml --- Cargo.lock | 709 ++++++++++++++++++++++++++--------------------------- deny.toml | 14 +- 2 files changed, 358 insertions(+), 365 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3f689d29192..cb7297935eb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -12,7 +12,7 @@ dependencies = [ "arc-swap", "backtrace", "canonical-path", - "clap 4.3.15", + "clap 4.3.16", "color-eyre", "fs-err", "once_cell", @@ -35,17 +35,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "55bfb86e57d13c06e482c570826ddcddcc8f07fab916760e8911141d4fda8b62" dependencies = [ "ident_case", - "proc-macro2 1.0.63", - "quote 1.0.29", + "proc-macro2 1.0.66", + "quote 1.0.31", "syn 1.0.109", "synstructure", ] [[package]] name = "addr2line" -version = "0.19.0" +version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a76fd60b23679b7d19bd066031410fb7e458ccc5e958eb5c325888ce4baedc97" +checksum = "f4fa78e18c64fce05e902adecd7a5eed15a5e0a3439f7b0e169f0252214865e3" dependencies = [ "gimli", ] @@ -68,9 +68,9 @@ dependencies = [ [[package]] name = "aes" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "433cfd6710c9986c576a25ca913c39d66a6474107b406f34f91d4a8923395241" +checksum = "ac1f845298e95f983ff1944b728ae08b8cebab80d684f0a832ed0fc74dfa27e2" dependencies = [ "cfg-if 1.0.0", "cipher", @@ -89,15 +89,6 @@ dependencies = [ "version_check", ] -[[package]] -name = "aho-corasick" -version = "0.7.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc936419f96fa211c1b9166887b38e5e40b19958e5b895be7c1f93adec7071ac" -dependencies = [ - "memchr", -] - [[package]] name = "aho-corasick" version = "1.0.2" @@ -109,9 +100,9 @@ dependencies = [ [[package]] name = "allocator-api2" -version = "0.2.14" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4f263788a35611fba42eb41ff811c5d0360c58b97402570312a350736e2542e" +checksum = "0942ffc6dcaadf03badf6e6a2d0228460359d5e34b57ccdc720b7382dfbd5ec5" [[package]] name = "android-tzdata" @@ -160,15 +151,15 @@ dependencies = [ [[package]] name = "anstyle" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41ed9a86bf92ae6580e0a31281f65a1b1d867c0cc68d5346e2ae128dddfa6a7d" +checksum = "3a30da5c5f2d5e72842e00bcb57657162cdabef0931f40e2deb9b4140440cecd" [[package]] name = "anstyle-parse" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e765fd216e48e067936442276d1d57399e37bce53c264d6fefbe298080cb57ee" +checksum = "938874ff5980b03a87c5524b3ae5b59cf99b1d6bc836848df7bc5ada9643c333" dependencies = [ "utf8parse", ] @@ -194,9 +185,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.71" +version = "1.0.72" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c7d0618f0e0b7e8ff11427422b64564d5fb0be1940354bfe2e0529b18a9d9b8" +checksum = "3b13c32d80ecc7ab747b80c3784bce54ee8a7a0cc4fbda9bf4cda2cf6fe90854" [[package]] name = "arc-swap" @@ -212,15 +203,15 @@ checksum = "6b4930d2cb77ce62f89ee5d5289b4ac049559b1c45539271f5ed4fdc7db34545" [[package]] name = "arrayvec" -version = "0.7.2" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8da52d66c7071e2e3fa2a1e5c6d088fec47b593032b254f5e980de8ea54454d6" +checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711" [[package]] name = "async-compression" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b0122885821398cc923ece939e24d1056a2384ee719432397fa9db87230ff11" +checksum = "62b74f44609f0f91493e3082d3734d98497e094777144380ea4db9f9905dd5b6" dependencies = [ "flate2", "futures-core", @@ -246,20 +237,20 @@ version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ - "proc-macro2 1.0.63", - "quote 1.0.29", - "syn 2.0.23", + "proc-macro2 1.0.66", + "quote 1.0.31", + "syn 2.0.26", ] [[package]] name = "async-trait" -version = "0.1.68" +version = "0.1.71" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9ccdd8f2a161be9bd5c023df56f1b2a0bd1d83872ae53b71a84a12c9bf6e842" +checksum = "a564d521dd56509c4c47480d00b80ee55f7e385ae48db5744c67ad50c92d2ebf" dependencies = [ - "proc-macro2 1.0.63", - "quote 1.0.29", - "syn 2.0.23", + "proc-macro2 1.0.66", + "quote 1.0.31", + "syn 2.0.26", ] [[package]] @@ -281,9 +272,9 @@ checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "axum" -version = "0.6.18" +version = "0.6.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8175979259124331c1d7bf6586ee7e0da434155e4b2d48ec2c8386281d8df39" +checksum = "a6a1de45611fdb535bfde7b7de4fd54f4fd2b17b1737c0a59b69bf9b92074b8c" dependencies = [ "async-trait", "axum-core", @@ -326,15 +317,15 @@ dependencies = [ [[package]] name = "backtrace" -version = "0.3.67" +version = "0.3.68" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "233d376d6d185f2a3093e58f283f60f880315b6c60075b01f36b3b85154564ca" +checksum = "4319208da049c43661739c5fade2ba182f09d1dc2299b32298d3a31692b17e12" dependencies = [ "addr2line", "cc", "cfg-if 1.0.0", "libc", - "miniz_oxide 0.6.2", + "miniz_oxide", "object", "rustc-demangle", ] @@ -412,13 +403,13 @@ dependencies = [ "lazycell", "log", "peeking_take_while", - "prettyplease 0.2.6", - "proc-macro2 1.0.63", - "quote 1.0.29", + "prettyplease 0.2.10", + "proc-macro2 1.0.66", + "quote 1.0.31", "regex", "rustc-hash", "shlex", - "syn 2.0.23", + "syn 2.0.26", "which", ] @@ -550,9 +541,9 @@ dependencies = [ [[package]] name = "bstr" -version = "1.5.0" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a246e68bb43f6cd9db24bea052a53e40405417c5fb372e3d1a8a7f770a564ef5" +checksum = "6798148dccfbff0fae41c7574d2fa8f1ef3492fba0face179de5d8d447d67b05" dependencies = [ "memchr", "serde", @@ -753,9 +744,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.3.15" +version = "4.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f644d0dac522c8b05ddc39aaaccc5b136d5dc4ff216610c5641e3be5becf56c" +checksum = "74bb1b4028935821b2d6b439bba2e970bdcf740832732437ead910c632e30d7d" dependencies = [ "clap_builder", "clap_derive", @@ -764,9 +755,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.3.15" +version = "4.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af410122b9778e024f9e0fb35682cc09cc3f85cad5e8d3ba8f47a9702df6e73d" +checksum = "5ae467cbb0111869b765e13882a1dbbd6cb52f58203d8b80c44f667d4dd19843" dependencies = [ "anstream", "anstyle", @@ -782,9 +773,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "54a9bb5758fc5dfe728d1019941681eccaf0cf8a4189b692a0ee2f2ecf90a050" dependencies = [ "heck 0.4.1", - "proc-macro2 1.0.63", - "quote 1.0.29", - "syn 2.0.23", + "proc-macro2 1.0.66", + "quote 1.0.31", + "syn 2.0.26", ] [[package]] @@ -888,9 +879,9 @@ dependencies = [ [[package]] name = "constant_time_eq" -version = "0.2.5" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13418e745008f7349ec7e449155f419a61b92b58a99cc3616942b926825ec76b" +checksum = "21a53c0a4d288377e7415b53dcfc3c04da5cdc2cc95c8d5ac178b58f0b861ad6" [[package]] name = "core-foundation" @@ -910,9 +901,9 @@ checksum = "e496a50fda8aacccc86d7529e2c1e0892dbd0f898a6b5645b5561b89c3210efa" [[package]] name = "cpufeatures" -version = "0.2.7" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e4c1eaa2012c47becbbad2ab175484c2a84d1185b566fb2cc5b8707343dfe58" +checksum = "a17b76ff3a4162b0b27f354a0c87015ddad39d35f9c0c36607a3bdd175dde1f1" dependencies = [ "libc", ] @@ -935,7 +926,7 @@ dependencies = [ "anes", "cast", "ciborium", - "clap 4.3.15", + "clap 4.3.16", "criterion-plot", "is-terminal", "itertools 0.10.5", @@ -985,9 +976,9 @@ dependencies = [ [[package]] name = "crossbeam-epoch" -version = "0.9.14" +version = "0.9.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46bd5f3f85273295a9d14aedfb86f6aadbff6d8f5295c4a9edb08e819dcf5695" +checksum = "ae211234986c545741a7dc064309f67ee1e5ad243d0e48335adc0484d960bcc7" dependencies = [ "autocfg", "cfg-if 1.0.0", @@ -998,9 +989,9 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.8.15" +version = "0.8.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c063cd8cc95f5c377ed0d4b49a4b21f632396ff690e8470c29b3359b346984b" +checksum = "5a22b2d63d4d1dc0b7f1b6b2747dd0088008a9be28b6ddf0b1e7d335e3037294" dependencies = [ "cfg-if 1.0.0", ] @@ -1045,9 +1036,9 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "83fdaf97f4804dcebfa5862639bc9ce4121e82140bec2a987ac5140294865b5b" dependencies = [ - "proc-macro2 1.0.63", - "quote 1.0.29", - "syn 2.0.23", + "proc-macro2 1.0.66", + "quote 1.0.31", + "syn 2.0.26", ] [[package]] @@ -1064,14 +1055,14 @@ dependencies = [ [[package]] name = "cxx-gen" -version = "0.7.95" +version = "0.7.101" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b677bcf759c79656defee3b0374aeff759122d3fc80edb0b77eeb0fd06e8fd20" +checksum = "400bb5c322e41b40e0014270ed5759b377eab9cb5c8754d82342548c6a719483" dependencies = [ "codespan-reporting", - "proc-macro2 1.0.63", - "quote 1.0.29", - "syn 2.0.23", + "proc-macro2 1.0.66", + "quote 1.0.31", + "syn 2.0.26", ] [[package]] @@ -1086,9 +1077,9 @@ version = "1.0.95" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4a076022ece33e7686fb76513518e219cca4fce5750a8ae6d1ce6c0f48fd1af9" dependencies = [ - "proc-macro2 1.0.63", - "quote 1.0.29", - "syn 2.0.23", + "proc-macro2 1.0.66", + "quote 1.0.31", + "syn 2.0.26", ] [[package]] @@ -1103,12 +1094,12 @@ dependencies = [ [[package]] name = "darling" -version = "0.20.1" +version = "0.20.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0558d22a7b463ed0241e993f76f09f30b126687447751a8638587b864e4b3944" +checksum = "0209d94da627ab5605dcccf08bb18afa5009cfbef48d8a8b7d7bdbc79be25c5e" dependencies = [ - "darling_core 0.20.1", - "darling_macro 0.20.1", + "darling_core 0.20.3", + "darling_macro 0.20.3", ] [[package]] @@ -1119,24 +1110,24 @@ checksum = "859d65a907b6852c9361e3185c862aae7fafd2887876799fa55f5f99dc40d610" dependencies = [ "fnv", "ident_case", - "proc-macro2 1.0.63", - "quote 1.0.29", + "proc-macro2 1.0.66", + "quote 1.0.31", "strsim 0.10.0", "syn 1.0.109", ] [[package]] name = "darling_core" -version = "0.20.1" +version = "0.20.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab8bfa2e259f8ee1ce5e97824a3c55ec4404a0d772ca7fa96bf19f0752a046eb" +checksum = "177e3443818124b357d8e76f53be906d60937f0d3a90773a664fa63fa253e621" dependencies = [ "fnv", "ident_case", - "proc-macro2 1.0.63", - "quote 1.0.29", + "proc-macro2 1.0.66", + "quote 1.0.31", "strsim 0.10.0", - "syn 2.0.23", + "syn 2.0.26", ] [[package]] @@ -1146,19 +1137,19 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c972679f83bdf9c42bd905396b6c3588a843a17f0f16dfcfa3e2c5d57441835" dependencies = [ "darling_core 0.13.4", - "quote 1.0.29", + "quote 1.0.31", "syn 1.0.109", ] [[package]] name = "darling_macro" -version = "0.20.1" +version = "0.20.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29a358ff9f12ec09c3e61fef9b5a9902623a695a46a917b07f269bff1445611a" +checksum = "836a9bbc7ad63342d6d6e7b815ccab164bc77a2d95d84bc3117a8c0d5c98e2d5" dependencies = [ - "darling_core 0.20.1", - "quote 1.0.29", - "syn 2.0.23", + "darling_core 0.20.3", + "quote 1.0.31", + "syn 2.0.26", ] [[package]] @@ -1209,16 +1200,16 @@ version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" dependencies = [ - "proc-macro2 1.0.63", - "quote 1.0.29", - "syn 2.0.23", + "proc-macro2 1.0.66", + "quote 1.0.31", + "syn 2.0.26", ] [[package]] name = "dyn-clone" -version = "1.0.11" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68b0cf012f1230e43cd00ebb729c6bb58707ecfa8ad08b52ef3a4ccd2697fc30" +checksum = "304e6508efa593091e97a9abbc10f90aa7ca635b6d2784feff3c89d41dd12272" [[package]] name = "ed25519" @@ -1309,9 +1300,9 @@ dependencies = [ [[package]] name = "equivalent" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88bffebc5d80432c9b140ee17875ff173a8ab62faad5b257da912bd2f6c1c0a1" +checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" [[package]] name = "errno" @@ -1404,7 +1395,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3b9429470923de8e8cbd4d2dc513535400b4b3fef0319fb5c4e1f520a7bef743" dependencies = [ "crc32fast", - "miniz_oxide 0.7.1", + "miniz_oxide", ] [[package]] @@ -1530,9 +1521,9 @@ version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ - "proc-macro2 1.0.63", - "quote 1.0.29", - "syn 2.0.23", + "proc-macro2 1.0.66", + "quote 1.0.31", + "syn 2.0.26", ] [[package]] @@ -1601,9 +1592,9 @@ dependencies = [ [[package]] name = "gimli" -version = "0.27.2" +version = "0.27.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad0a93d233ebf96623465aad4046a8d3aa4da22d4f4beba5388838c8a434bbb4" +checksum = "b6c80984affa11d98d1b88b66ac8853f143217b399d3c74116778ff8fdb4ed2e" [[package]] name = "git2" @@ -1626,11 +1617,11 @@ checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" [[package]] name = "globset" -version = "0.4.10" +version = "0.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "029d74589adefde59de1a0c4f4732695c32805624aec7b68d91503d4dba79afc" +checksum = "1391ab1f92ffcc08911957149833e682aa3fe252b9f45f966d2ef972274c97df" dependencies = [ - "aho-corasick 0.7.20", + "aho-corasick", "bstr", "fnv", "log", @@ -1651,9 +1642,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.3.19" +version = "0.3.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d357c7ae988e7d2182f7d7871d0b963962420b0678b0997ce7de72001aeab782" +checksum = "97ec8491ebaf99c8eaa73058b045fe58073cd6be7f596ac993ced0b0a0c01049" dependencies = [ "bytes", "fnv", @@ -1722,9 +1713,9 @@ checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" [[package]] name = "hashbrown" -version = "0.13.2" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43a3c133739dddd0d2990f9a4bdf8eb4b21ef50e4851ca85ab661199821d510e" +checksum = "33ff8ae62cd3a9102e5637afc8452c55acf3844001bd5374e0b0bd7b6616c038" dependencies = [ "ahash", ] @@ -1791,9 +1782,9 @@ dependencies = [ [[package]] name = "hermit-abi" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fed44880c466736ef9a5c5b5facefb5ed0785676d0c02d612db14e54f0d84286" +checksum = "443144c8cdadd93ebf52ddb4056d257f5b52c04d3c804e657d19eb73fc33668b" [[package]] name = "hex" @@ -1920,13 +1911,14 @@ dependencies = [ [[package]] name = "hyper-rustls" -version = "0.24.0" +version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0646026eb1b3eea4cd9ba47912ea5ce9cc07713d105b1a14698f4e6433d348b7" +checksum = "8d78e1e73ec14cf7375674f74d7dde185c8206fd9dea6fb6295e8a98098aaa97" dependencies = [ + "futures-util", "http", "hyper", - "rustls 0.21.2", + "rustls 0.21.5", "tokio", "tokio-rustls", ] @@ -2010,8 +2002,8 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "11d7a9f6330b71fea57921c9b61c47ee6e84f72d394754eff6163ae67e7395eb" dependencies = [ - "proc-macro2 1.0.63", - "quote 1.0.29", + "proc-macro2 1.0.66", + "quote 1.0.31", "syn 1.0.109", ] @@ -2123,26 +2115,25 @@ version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eae7b9aee968036d54dce06cebaefd919e4472e753296daccd6d344e3e2df0c2" dependencies = [ - "hermit-abi 0.3.1", + "hermit-abi 0.3.2", "libc", "windows-sys 0.48.0", ] [[package]] name = "ipnet" -version = "2.7.2" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12b6ee2129af8d4fb011108c73d99a1b83a85977f23b82460c0ae2e25bb4b57f" +checksum = "28b29a3cd74f0f4598934efe3aeba42bae0eb4680554128851ebbecb02af14e6" [[package]] name = "is-terminal" -version = "0.4.7" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "adcf93614601c8129ddf72e2d5633df827ba6551541c6d8c59520a371475be1f" +checksum = "cb0889898416213fab133e1d33a0e5858a48177452750691bde3666d0fdbaf8b" dependencies = [ - "hermit-abi 0.3.1", - "io-lifetimes", - "rustix", + "hermit-abi 0.3.2", + "rustix 0.38.4", "windows-sys 0.48.0", ] @@ -2166,9 +2157,9 @@ dependencies = [ [[package]] name = "itoa" -version = "1.0.6" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "453ad9f582a441959e5f0d088b02ce04cfe8d51a8eaf077f12ac6d3e94164ca6" +checksum = "af150ab688ff2122fcef229be89cb50dd66af9e01a4ff320cc137eecc9bacc38" [[package]] name = "jobserver" @@ -2181,9 +2172,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.63" +version = "0.3.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f37a4a5928311ac501dee68b3c7613a1037d0edb30c8e5427bd832d55d1b790" +checksum = "c5f195fe497f702db0f318b07fdd68edb16955aed830df8363d837542f8f935a" dependencies = [ "wasm-bindgen", ] @@ -2210,8 +2201,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b939a78fa820cdfcb7ee7484466746a7377760970f6f9c6fe19f9edcc8a38d2" dependencies = [ "proc-macro-crate 0.1.5", - "proc-macro2 1.0.63", - "quote 1.0.29", + "proc-macro2 1.0.66", + "quote 1.0.31", "syn 1.0.109", ] @@ -2350,9 +2341,9 @@ dependencies = [ [[package]] name = "link-cplusplus" -version = "1.0.8" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ecd207c9c713c34f95a097a5b029ac2ce6010530c7b49d7fea24d977dede04f5" +checksum = "9d240c6f7e1ba3a28b0249f774e6a9dd0175054b52dfbb61b16eb8505c3785c9" dependencies = [ "cc", ] @@ -2369,6 +2360,12 @@ version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519" +[[package]] +name = "linux-raw-sys" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09fc20d2ca12cb9f044c93e3bd6d32d523e6e2ec3db4f7b2939cd99026ecd3f0" + [[package]] name = "lock_api" version = "0.4.10" @@ -2443,9 +2440,9 @@ checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" [[package]] name = "memoffset" -version = "0.8.0" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d61c719bcfbcf5d62b3a09efa6088de8c54bc0bfcd3ea7ae39fcc186108b8de1" +checksum = "5a634b1c61a95585bd15607c6ab0c4e5b226e695ff2800ba0cdccddf208c406c" dependencies = [ "autocfg", ] @@ -2493,20 +2490,20 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ddece26afd34c31585c74a4db0630c376df271c285d682d1e55012197830b6df" dependencies = [ - "proc-macro2 1.0.63", - "quote 1.0.29", - "syn 2.0.23", + "proc-macro2 1.0.66", + "quote 1.0.31", + "syn 2.0.26", ] [[package]] name = "metrics-util" -version = "0.15.0" +version = "0.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "111cb375987443c3de8d503580b536f77dc8416d32db62d9456db5d93bd7ac47" +checksum = "4de2ed6e491ed114b40b732e4d1659a9d53992ebd87490c44a6ffe23739d973e" dependencies = [ "crossbeam-epoch", "crossbeam-utils", - "hashbrown 0.13.2", + "hashbrown 0.13.1", "metrics", "num_cpus", "quanta", @@ -2525,15 +2522,6 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" -[[package]] -name = "miniz_oxide" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b275950c28b37e794e8c55d88aeb5e139d0ce23fdbbeda68f8d7174abdf9e8fa" -dependencies = [ - "adler", -] - [[package]] name = "miniz_oxide" version = "0.7.1" @@ -2608,9 +2596,9 @@ dependencies = [ [[package]] name = "net2" -version = "0.2.38" +version = "0.2.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74d0df99cfcd2530b2e694f6e17e7f37b8e26bb23983ac530c0c97408837c631" +checksum = "b13b648036a2339d06de780866fbdfda0dde886de7b3af2ddeba8b14f4ee34ac" dependencies = [ "cfg-if 0.1.10", "libc", @@ -2690,7 +2678,7 @@ version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" dependencies = [ - "hermit-abi 0.3.1", + "hermit-abi 0.3.2", "libc", ] @@ -2711,9 +2699,9 @@ checksum = "830b246a0e5f20af87141b25c173cd1b609bd7779a4617d6ec582abaf90870f3" [[package]] name = "object" -version = "0.30.4" +version = "0.31.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03b4680b86d9cfafba8fc491dc9b6df26b68cf40e9e6cd73909194759a63c385" +checksum = "8bda667d9f2b5051b8833f59f3bf748b28ef54f850f4fcb389a252aa383866d1" dependencies = [ "memchr", ] @@ -2738,9 +2726,9 @@ checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" [[package]] name = "openssl" -version = "0.10.54" +version = "0.10.55" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69b3f656a17a6cbc115b5c7a40c616947d213ba182135b014d6051b73ab6f019" +checksum = "345df152bc43501c5eb9e4654ff05f794effb78d4efe3d53abc158baddc0703d" dependencies = [ "bitflags 1.3.2", "cfg-if 1.0.0", @@ -2757,9 +2745,9 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ - "proc-macro2 1.0.63", - "quote 1.0.29", - "syn 2.0.23", + "proc-macro2 1.0.66", + "quote 1.0.31", + "syn 2.0.26", ] [[package]] @@ -2770,9 +2758,9 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-sys" -version = "0.9.88" +version = "0.9.90" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2ce0f250f34a308dcfdbb351f511359857d4ed2134ba715a4eadd46e1ffd617" +checksum = "374533b0e45f3a7ced10fcaeccca020e66656bc03dac384f852e4e5a7a8104a6" dependencies = [ "cc", "libc", @@ -2858,9 +2846,9 @@ dependencies = [ [[package]] name = "parity-scale-codec" -version = "3.5.0" +version = "3.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ddb756ca205bd108aee3c62c6d3c994e1df84a59b9d6d4a5ea42ee1fd5a9a28" +checksum = "dd8e946cc0cc711189c0b0249fb8b599cbeeab9784d83c415719368bb8d4ac64" dependencies = [ "arrayvec", "bitvec", @@ -2872,13 +2860,13 @@ dependencies = [ [[package]] name = "parity-scale-codec-derive" -version = "3.1.4" +version = "3.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86b26a931f824dd4eca30b3e43bb4f31cd5f0d3a403c5f5ff27106b805bfde7b" +checksum = "2a296c3079b5fefbc499e1de58dc26c09b1b9a5952d26694ee89f04a43ebbb3e" dependencies = [ "proc-macro-crate 1.3.1", - "proc-macro2 1.0.63", - "quote 1.0.29", + "proc-macro2 1.0.66", + "quote 1.0.31", "syn 1.0.109", ] @@ -2927,7 +2915,7 @@ dependencies = [ "libc", "redox_syscall 0.3.5", "smallvec", - "windows-targets 0.48.0", + "windows-targets 0.48.1", ] [[package]] @@ -2980,9 +2968,9 @@ checksum = "9b2a4787296e9989611394c33f193f676704af1686e70b8f8033ab5ba9a35a94" [[package]] name = "pest" -version = "2.6.0" +version = "2.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e68e84bfb01f0507134eac1e9b410a12ba379d064eab48c50ba4ce329a527b70" +checksum = "0d2d1d55045829d65aad9d389139882ad623b33b904e7c9f1b10c5b8927298e5" dependencies = [ "thiserror", "ucd-trie", @@ -2990,9 +2978,9 @@ dependencies = [ [[package]] name = "pest_derive" -version = "2.6.0" +version = "2.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b79d4c71c865a25a4322296122e3924d30bc8ee0834c8bfc8b95f7f054afbfb" +checksum = "5f94bca7e7a599d89dea5dfa309e217e7906c3c007fb9c3299c40b10d6a315d3" dependencies = [ "pest", "pest_generator", @@ -3000,22 +2988,22 @@ dependencies = [ [[package]] name = "pest_generator" -version = "2.6.0" +version = "2.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c435bf1076437b851ebc8edc3a18442796b30f1728ffea6262d59bbe28b077e" +checksum = "99d490fe7e8556575ff6911e45567ab95e71617f43781e5c05490dc8d75c965c" dependencies = [ "pest", "pest_meta", - "proc-macro2 1.0.63", - "quote 1.0.29", - "syn 2.0.23", + "proc-macro2 1.0.66", + "quote 1.0.31", + "syn 2.0.26", ] [[package]] name = "pest_meta" -version = "2.6.0" +version = "2.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "745a452f8eb71e39ffd8ee32b3c5f51d03845f99786fa9b68db6ff509c505411" +checksum = "2674c66ebb4b4d9036012091b537aae5878970d6999f81a265034d85b136b341" dependencies = [ "once_cell", "pest", @@ -3047,16 +3035,16 @@ version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec2e072ecce94ec471b13398d5402c188e76ac03cf74dd1a975161b23a3f6d9c" dependencies = [ - "proc-macro2 1.0.63", - "quote 1.0.29", - "syn 2.0.23", + "proc-macro2 1.0.66", + "quote 1.0.31", + "syn 2.0.26", ] [[package]] name = "pin-project-lite" -version = "0.2.9" +version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0a7ae3ac2f1173085d398531c705756c94a4c56843785df85a60c1a0afac116" +checksum = "4c40d25201921e5ff0c862a505c6557ea88568a4e3ace775ab55e93f2f4f9d57" [[package]] name = "pin-utils" @@ -3078,9 +3066,9 @@ checksum = "e3d7ddaed09e0eb771a79ab0fd64609ba0afb0a8366421957936ad14cbd13630" [[package]] name = "plotters" -version = "0.3.4" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2538b639e642295546c50fcd545198c9d64ee2a38620a628724a3b266d5fbf97" +checksum = "d2c224ba00d7cadd4d5c660deaf2098e5e80e07846537c51f9cfa4be50c1fd45" dependencies = [ "num-traits", "plotters-backend", @@ -3091,15 +3079,15 @@ dependencies = [ [[package]] name = "plotters-backend" -version = "0.3.4" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "193228616381fecdc1224c62e96946dfbc73ff4384fba576e052ff8c1bea8142" +checksum = "9e76628b4d3a7581389a35d5b6e2139607ad7c75b17aed325f210aa91f4a9609" [[package]] name = "plotters-svg" -version = "0.3.3" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9a81d2759aae1dae668f783c308bc5c8ebd191ff4184aaa1b37f65a6ae5a56f" +checksum = "38f6d39893cca0701371e3c27294f09797214b86f1fb951b89ade8ec04e2abab" dependencies = [ "plotters-backend", ] @@ -3117,9 +3105,9 @@ dependencies = [ [[package]] name = "portable-atomic" -version = "1.3.3" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "767eb9f07d4a5ebcb39bbf2d452058a93c011373abf6832e24194a1c3f004794" +checksum = "edc55135a600d700580e406b4de0d59cb9ad25e344a3a091a97ded2622ec4ec6" [[package]] name = "ppv-lite86" @@ -3133,18 +3121,18 @@ version = "0.1.25" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c8646e95016a7a6c4adea95bafa8a16baab64b583356217f2c85db4a39d9a86" dependencies = [ - "proc-macro2 1.0.63", + "proc-macro2 1.0.66", "syn 1.0.109", ] [[package]] name = "prettyplease" -version = "0.2.6" +version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b69d39aab54d069e7f2fe8cb970493e7834601ca2d8c65fd7bbd183578080d1" +checksum = "92139198957b410250d43fad93e630d956499a625c527eda65175c8680f83387" dependencies = [ - "proc-macro2 1.0.63", - "syn 2.0.23", + "proc-macro2 1.0.66", + "syn 2.0.26", ] [[package]] @@ -3184,8 +3172,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" dependencies = [ "proc-macro-error-attr", - "proc-macro2 1.0.63", - "quote 1.0.29", + "proc-macro2 1.0.66", + "quote 1.0.31", "syn 1.0.109", "version_check", ] @@ -3196,8 +3184,8 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" dependencies = [ - "proc-macro2 1.0.63", - "quote 1.0.29", + "proc-macro2 1.0.66", + "quote 1.0.31", "version_check", ] @@ -3212,9 +3200,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.63" +version = "1.0.66" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b368fba921b0dce7e60f5e04ec15e565b3303972b42bcfde1d0713b881959eb" +checksum = "18fb31db3f9bddb2ea821cde30a9f70117e3f119938b5ee630b7403aa6e2ead9" dependencies = [ "unicode-ident", ] @@ -3290,8 +3278,8 @@ checksum = "e5d2d8d10f3c6ded6da8b05b5fb3b8a5082514344d56c9f871412d29b4e075b4" dependencies = [ "anyhow", "itertools 0.10.5", - "proc-macro2 1.0.63", - "quote 1.0.29", + "proc-macro2 1.0.66", + "quote 1.0.31", "syn 1.0.109", ] @@ -3353,8 +3341,8 @@ version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "608c156fd8e97febc07dc9c2e2c80bf74cfc6ef26893eae3daf8bc2bc94a4b7f" dependencies = [ - "proc-macro2 1.0.63", - "quote 1.0.29", + "proc-macro2 1.0.66", + "quote 1.0.31", "syn 1.0.109", ] @@ -3369,11 +3357,11 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.29" +version = "1.0.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "573015e8ab27661678357f27dc26460738fd2b6c86e46f386fde94cb5d913105" +checksum = "5fe8a65d69dd0808184ebb5f836ab526bb259db23c657efa38711b1072ee47f0" dependencies = [ - "proc-macro2 1.0.63", + "proc-macro2 1.0.66", ] [[package]] @@ -3559,10 +3547,10 @@ version = "1.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b2eae68fc220f7cf2532e4494aded17545fce192d59cd996e0fe7887f4ceb575" dependencies = [ - "aho-corasick 1.0.2", + "aho-corasick", "memchr", - "regex-automata 0.3.2", - "regex-syntax 0.7.3", + "regex-automata 0.3.3", + "regex-syntax 0.7.4", ] [[package]] @@ -3576,13 +3564,13 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83d3daa6976cffb758ec878f108ba0e062a45b2d6ca3a2cca965338855476caf" +checksum = "39354c10dd07468c2e73926b23bb9c2caca74c5501e38a35da70406f1d923310" dependencies = [ - "aho-corasick 1.0.2", + "aho-corasick", "memchr", - "regex-syntax 0.7.3", + "regex-syntax 0.7.4", ] [[package]] @@ -3593,9 +3581,9 @@ checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] name = "regex-syntax" -version = "0.7.3" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ab07dc67230e4a4718e70fd5c20055a4334b121f1f9db8fe63ef39ce9b8c846" +checksum = "e5ea92a5b6195c6ef2a0295ea818b312502c6fc94dde986c5553242e18fd4ce2" [[package]] name = "reqwest" @@ -3623,7 +3611,7 @@ dependencies = [ "once_cell", "percent-encoding", "pin-project-lite", - "rustls 0.21.2", + "rustls 0.21.5", "rustls-pemfile", "serde", "serde_json", @@ -3742,15 +3730,28 @@ dependencies = [ [[package]] name = "rustix" -version = "0.37.19" +version = "0.37.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "acf8729d8542766f1b2cf77eb034d52f40d375bb8b615d0b147089946e16613d" +checksum = "4d69718bf81c6127a49dc64e44a742e8bb9213c0ff8869a22c308f84c1d4ab06" dependencies = [ "bitflags 1.3.2", "errno", "io-lifetimes", "libc", - "linux-raw-sys", + "linux-raw-sys 0.3.8", + "windows-sys 0.48.0", +] + +[[package]] +name = "rustix" +version = "0.38.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0a962918ea88d644592894bc6dc55acc6c0956488adcebbfb6e273506b7fd6e5" +dependencies = [ + "bitflags 2.3.3", + "errno", + "libc", + "linux-raw-sys 0.4.3", "windows-sys 0.48.0", ] @@ -3768,21 +3769,21 @@ dependencies = [ [[package]] name = "rustls" -version = "0.21.2" +version = "0.21.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e32ca28af694bc1bbf399c33a516dbdf1c90090b8ab23c2bc24f834aa2247f5f" +checksum = "79ea77c539259495ce8ca47f53e66ae0330a8819f67e23ac96ca02f50e7b7d36" dependencies = [ "log", "ring", - "rustls-webpki", + "rustls-webpki 0.101.1", "sct", ] [[package]] name = "rustls-pemfile" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d194b56d58803a43635bdc398cd17e383d6f71f9182b9a192c127ca42494a59b" +checksum = "2d3987094b1d07b653b7dfdc3f70ce9a1da9c51ac18c1b06b662e4f9a0e9f4b2" dependencies = [ "base64 0.21.2", ] @@ -3797,11 +3798,21 @@ dependencies = [ "untrusted", ] +[[package]] +name = "rustls-webpki" +version = "0.101.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "15f36a6828982f422756984e47912a7a51dcbc2a197aa791158f8ca61cd8204e" +dependencies = [ + "ring", + "untrusted", +] + [[package]] name = "rustversion" -version = "1.0.13" +version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc31bd9b61a32c31f9650d18add92aa83a49ba979c143eefd27fe7177b05bd5f" +checksum = "7ffc183a10b4478d04cbbbfc96d0873219d962dd5accaff2ffbd4ceb7df837f4" [[package]] name = "rusty-fork" @@ -3817,9 +3828,9 @@ dependencies = [ [[package]] name = "ryu" -version = "1.0.13" +version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f91339c0467de62360649f8d3e185ca8de4224ff281f66000de5eb2a77a79041" +checksum = "1ad4cc8da4ef723ed60bced201181d83791ad433213d8c24efffda1eec85d741" [[package]] name = "same-file" @@ -3832,18 +3843,18 @@ dependencies = [ [[package]] name = "schannel" -version = "0.1.21" +version = "0.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "713cfb06c7059f3588fb8044c0fad1d09e3c01d225e25b9220dbfdcf16dbb1b3" +checksum = "0c3733bf4cf7ea0880754e19cb5a462007c4a8c1914bff372ccc95b464f1df88" dependencies = [ - "windows-sys 0.42.0", + "windows-sys 0.48.0", ] [[package]] name = "scopeguard" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" +checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" [[package]] name = "sct" @@ -3939,7 +3950,7 @@ checksum = "01b0ad16faa5d12372f914ed40d00bda21a6d1bdcc99264c5e5e1c9495cf3654" dependencies = [ "httpdate", "reqwest", - "rustls 0.21.2", + "rustls 0.21.5", "sentry-backtrace", "sentry-contexts", "sentry-core", @@ -4019,9 +4030,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.168" +version = "1.0.171" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d614f89548720367ded108b3c843be93f3a341e22d5674ca0dd5cd57f34926af" +checksum = "30e27d1e4fd7659406c492fd6cfaf2066ba8773de45ca75e855590f856dc34a9" dependencies = [ "serde_derive", ] @@ -4037,20 +4048,20 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.168" +version = "1.0.171" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4fe589678c688e44177da4f27152ee2d190757271dc7f1d5b6b9f68d869d641" +checksum = "389894603bd18c46fa56231694f8d827779c0951a667087194cf9de94ed24682" dependencies = [ - "proc-macro2 1.0.63", - "quote 1.0.29", - "syn 2.0.23", + "proc-macro2 1.0.66", + "quote 1.0.31", + "syn 2.0.26", ] [[package]] name = "serde_json" -version = "1.0.100" +version = "1.0.103" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f1e14e89be7aa4c4b78bdbdc9eb5bf8517829a600ae8eaa39a6e1d960b5185c" +checksum = "d03b412469450d4404fe8499a268edd7f8b79fecb074b0d812ad64ca21f4031b" dependencies = [ "indexmap 2.0.0", "itoa", @@ -4091,9 +4102,9 @@ dependencies = [ [[package]] name = "serde_with" -version = "3.0.0" +version = "3.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f02d8aa6e3c385bf084924f660ce2a3a6bd333ba55b35e8590b321f35d88513" +checksum = "21e47d95bc83ed33b2ecf84f4187ad1ab9685d18ff28db000c99deac8ce180e3" dependencies = [ "base64 0.21.2", "chrono", @@ -4101,7 +4112,7 @@ dependencies = [ "indexmap 1.9.3", "serde", "serde_json", - "serde_with_macros 3.0.0", + "serde_with_macros 3.1.0", "time", ] @@ -4112,21 +4123,21 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e182d6ec6f05393cc0e5ed1bf81ad6db3a8feedf8ee515ecdd369809bcce8082" dependencies = [ "darling 0.13.4", - "proc-macro2 1.0.63", - "quote 1.0.29", + "proc-macro2 1.0.66", + "quote 1.0.31", "syn 1.0.109", ] [[package]] name = "serde_with_macros" -version = "3.0.0" +version = "3.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "edc7d5d3932fb12ce722ee5e64dd38c504efba37567f0c402f6ca728c3b8b070" +checksum = "ea3cee93715c2e266b9338b7544da68a9f24e227722ba482bd1c024367c77c65" dependencies = [ - "darling 0.20.1", - "proc-macro2 1.0.63", - "quote 1.0.29", - "syn 2.0.23", + "darling 0.20.3", + "proc-macro2 1.0.66", + "quote 1.0.31", + "syn 2.0.26", ] [[package]] @@ -4193,9 +4204,9 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.10.0" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a507befe795404456341dfab10cef66ead4c041f62b8b11bbb92bffe5d0953e0" +checksum = "62bb4feee49fdd9f707ef802e22365a35de4b7b299de4763d44bfea899442ff9" [[package]] name = "socket2" @@ -4224,8 +4235,8 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5bdfb59103e43a0f99a346b57860d50f2138a7008d08acd964e9ac0fef3ae9a5" dependencies = [ - "proc-macro2 1.0.63", - "quote 1.0.29", + "proc-macro2 1.0.66", + "quote 1.0.31", "syn 1.0.109", ] @@ -4287,8 +4298,8 @@ checksum = "dcb5ae327f9cc13b68763b5749770cb9e048a99bd9dfdfa58d0cf05d5f64afe0" dependencies = [ "heck 0.3.3", "proc-macro-error", - "proc-macro2 1.0.63", - "quote 1.0.29", + "proc-macro2 1.0.66", + "quote 1.0.31", "syn 1.0.109", ] @@ -4315,19 +4326,19 @@ version = "1.0.109" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" dependencies = [ - "proc-macro2 1.0.63", - "quote 1.0.29", + "proc-macro2 1.0.66", + "quote 1.0.31", "unicode-ident", ] [[package]] name = "syn" -version = "2.0.23" +version = "2.0.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59fb7d6d8281a51045d62b8eb3a7d1ce347b76f312af50cd3dc0af39c87c1737" +checksum = "45c3457aacde3c65315de5031ec191ce46604304d2446e803d71ade03308d970" dependencies = [ - "proc-macro2 1.0.63", - "quote 1.0.29", + "proc-macro2 1.0.66", + "quote 1.0.31", "unicode-ident", ] @@ -4343,8 +4354,8 @@ version = "0.12.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f36bdaa60a83aca3921b5259d5400cbf5e90fc51931376a9bd4a0eb79aa7210f" dependencies = [ - "proc-macro2 1.0.63", - "quote 1.0.29", + "proc-macro2 1.0.66", + "quote 1.0.31", "syn 1.0.109", "unicode-xid 0.2.4", ] @@ -4365,7 +4376,7 @@ dependencies = [ "cfg-if 1.0.0", "fastrand", "redox_syscall 0.3.5", - "rustix", + "rustix 0.37.23", "windows-sys 0.48.0", ] @@ -4402,9 +4413,9 @@ version = "1.0.43" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "463fe12d7993d3b327787537ce8dd4dfa058de32fc2b195ef3cde03dc4771e8f" dependencies = [ - "proc-macro2 1.0.63", - "quote 1.0.29", - "syn 2.0.23", + "proc-macro2 1.0.66", + "quote 1.0.31", + "syn 2.0.26", ] [[package]] @@ -4419,9 +4430,9 @@ dependencies = [ [[package]] name = "time" -version = "0.3.22" +version = "0.3.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea9e1b3cf1243ae005d9e74085d4d542f3125458f3a81af210d901dcd7411efd" +checksum = "59e399c068f43a5d116fedaf73b203fa4f9c519f17e2b34f63221d3792f81446" dependencies = [ "itoa", "libc", @@ -4439,9 +4450,9 @@ checksum = "7300fbefb4dadc1af235a9cef3737cea692a9d97e1b9cbcd4ebdae6f8868e6fb" [[package]] name = "time-macros" -version = "0.2.9" +version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "372950940a5f07bf38dbe211d7283c9e6d7327df53794992d293e534c733d09b" +checksum = "96ba15a897f3c86766b757e5ac7221554c6750054d74d5b28844fce5fb36a6c4" dependencies = [ "time-core", ] @@ -4508,9 +4519,9 @@ version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ - "proc-macro2 1.0.63", - "quote 1.0.29", - "syn 2.0.23", + "proc-macro2 1.0.66", + "quote 1.0.31", + "syn 2.0.26", ] [[package]] @@ -4529,7 +4540,7 @@ version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" dependencies = [ - "rustls 0.21.2", + "rustls 0.21.5", "tokio", ] @@ -4618,9 +4629,9 @@ dependencies = [ [[package]] name = "toml_edit" -version = "0.19.12" +version = "0.19.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c500344a19072298cd05a7224b3c0c629348b78692bf48466c5238656e315a78" +checksum = "f8123f27e969974a3dfba720fdb560be359f57b44302d280ba72e76a74480e8a" dependencies = [ "indexmap 2.0.0", "serde", @@ -4664,9 +4675,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a6fdaae4c2c638bb70fe42803a26fbd6fc6ac8c72f5c59f67ecc2a2dcabf4b07" dependencies = [ "prettyplease 0.1.25", - "proc-macro2 1.0.63", + "proc-macro2 1.0.66", "prost-build", - "quote 1.0.29", + "quote 1.0.31", "syn 1.0.109", ] @@ -4779,13 +4790,13 @@ dependencies = [ [[package]] name = "tracing-attributes" -version = "0.1.24" +version = "0.1.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f57e3ca2a01450b1a921183a9c9cbfda207fd822cef4ccb00a65402cbba7a74" +checksum = "5f4f31f56159e98206da9efd823404b79b6ef3143b4a7ab76e67b1751b25a4ab" dependencies = [ - "proc-macro2 1.0.63", - "quote 1.0.29", - "syn 2.0.23", + "proc-macro2 1.0.66", + "quote 1.0.31", + "syn 2.0.26", ] [[package]] @@ -4888,7 +4899,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "258bc1c4f8e2e73a977812ab339d503e6feeb92700f6d07a6de4d321522d5c08" dependencies = [ "lazy_static", - "quote 1.0.29", + "quote 1.0.31", "syn 1.0.109", ] @@ -4906,9 +4917,9 @@ checksum = "497961ef93d974e23eb6f433eb5fe1b7930b659f06d12dec6fc44a8f554c0bba" [[package]] name = "ucd-trie" -version = "0.1.5" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e79c4d996edb816c91e4308506774452e55e95c3c9de07b6729e17e15a5ef81" +checksum = "ed646292ffc8188ef8ea4d1e0e0150fb15a5c2e12ad9b8fc191ae7a8a7f3c4b9" [[package]] name = "uint" @@ -4954,9 +4965,9 @@ checksum = "92888ba5573ff080736b3648696b70cafad7d250551175acbaa4e0385b3e1460" [[package]] name = "unicode-ident" -version = "1.0.9" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b15811caf2415fb889178633e7724bad2509101cde276048e013b9def5e51fa0" +checksum = "301abaae475aa91687eb82514b328ab47a211a533026cb25fc3e519b86adfc3c" [[package]] name = "unicode-normalization" @@ -5009,15 +5020,15 @@ checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" [[package]] name = "ureq" -version = "2.7.0" +version = "2.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4b45063f47caea744e48f5baa99169bd8bd9b882d80a99941141327bbb00f99" +checksum = "0b11c96ac7ee530603dcdf68ed1557050f374ce55a5a07193ebf8cbc9f8927e9" dependencies = [ "base64 0.21.2", "log", "once_cell", - "rustls 0.21.2", - "rustls-webpki", + "rustls 0.21.5", + "rustls-webpki 0.100.1", "url", "webpki-roots 0.23.1", ] @@ -5042,9 +5053,9 @@ checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a" [[package]] name = "uuid" -version = "1.3.3" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "345444e32442451b267fc254ae85a209c64be56d2890e601a0c37ff0c3c5ecd2" +checksum = "79daa5ed5740825c40b389c5e50312b9c86df53fccd33f281df655642b43869d" dependencies = [ "getrandom 0.2.10", "serde", @@ -5114,11 +5125,10 @@ dependencies = [ [[package]] name = "want" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ce8a968cb1cd110d136ff8b819a556d6fb6d919363c61534f6860c7eb172ba0" +checksum = "bfa7760aed19e106de2c7c0b581b509f2f25d3dacaf737cb82ac61bc6d760b0e" dependencies = [ - "log", "try-lock", ] @@ -5136,9 +5146,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.86" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5bba0e8cb82ba49ff4e229459ff22a191bbe9a1cb3a341610c9c33efc27ddf73" +checksum = "7706a72ab36d8cb1f80ffbf0e071533974a60d0a308d01a5d0375bf60499a342" dependencies = [ "cfg-if 1.0.0", "wasm-bindgen-macro", @@ -5146,24 +5156,24 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.86" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19b04bc93f9d6bdee709f6bd2118f57dd6679cf1176a1af464fca3ab0d66d8fb" +checksum = "5ef2b6d3c510e9625e5fe6f509ab07d66a760f0885d858736483c32ed7809abd" dependencies = [ "bumpalo", "log", "once_cell", - "proc-macro2 1.0.63", - "quote 1.0.29", - "syn 2.0.23", + "proc-macro2 1.0.66", + "quote 1.0.31", + "syn 2.0.26", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.36" +version = "0.4.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d1985d03709c53167ce907ff394f5316aa22cb4e12761295c5dc57dacb6297e" +checksum = "c02dbc21516f9f1f04f187958890d7e6026df8d16540b7ad9492bc34a67cea03" dependencies = [ "cfg-if 1.0.0", "js-sys", @@ -5173,38 +5183,38 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.86" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14d6b024f1a526bb0234f52840389927257beb670610081360e5a03c5df9c258" +checksum = "dee495e55982a3bd48105a7b947fd2a9b4a8ae3010041b9e0faab3f9cd028f1d" dependencies = [ - "quote 1.0.29", + "quote 1.0.31", "wasm-bindgen-macro-support", ] [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.86" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e128beba882dd1eb6200e1dc92ae6c5dbaa4311aa7bb211ca035779e5efc39f8" +checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" dependencies = [ - "proc-macro2 1.0.63", - "quote 1.0.29", - "syn 2.0.23", + "proc-macro2 1.0.66", + "quote 1.0.31", + "syn 2.0.26", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.86" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed9d5b4305409d1fc9482fee2d7f9bcbf24b3972bf59817ef757e23982242a93" +checksum = "ca6ad05a4870b2bf5fe995117d3728437bd27d7cd5f06f13c17443ef369775a1" [[package]] name = "web-sys" -version = "0.3.63" +version = "0.3.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3bdd9ef4e984da1187bf8110c5cf5b845fbc87a23602cdf912386a76fcd3a7c2" +checksum = "9b85cbef8c220a6abc02aefd892dfc0fc23afb1c6a426316ec33253a3877249b" dependencies = [ "js-sys", "wasm-bindgen", @@ -5235,7 +5245,7 @@ version = "0.23.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b03058f88386e5ff5310d9111d53f48b17d732b401aeb83a8d5190f2ac459338" dependencies = [ - "rustls-webpki", + "rustls-webpki 0.100.1", ] [[package]] @@ -5286,22 +5296,7 @@ version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e686886bc078bc1b0b600cac0147aadb815089b6e4da64016cbd754b6342700f" dependencies = [ - "windows-targets 0.48.0", -] - -[[package]] -name = "windows-sys" -version = "0.42.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a3e1820f08b8513f676f7ab6c1f99ff312fb97b553d30ff4dd86f9f15728aa7" -dependencies = [ - "windows_aarch64_gnullvm 0.42.2", - "windows_aarch64_msvc 0.42.2", - "windows_i686_gnu 0.42.2", - "windows_i686_msvc 0.42.2", - "windows_x86_64_gnu 0.42.2", - "windows_x86_64_gnullvm 0.42.2", - "windows_x86_64_msvc 0.42.2", + "windows-targets 0.48.1", ] [[package]] @@ -5319,7 +5314,7 @@ version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" dependencies = [ - "windows-targets 0.48.0", + "windows-targets 0.48.1", ] [[package]] @@ -5339,9 +5334,9 @@ dependencies = [ [[package]] name = "windows-targets" -version = "0.48.0" +version = "0.48.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b1eb6f0cd7c80c79759c929114ef071b87354ce476d9d94271031c0497adfd5" +checksum = "05d4b17490f70499f20b9e791dcf6a299785ce8af4d709018206dc5b4953e95f" dependencies = [ "windows_aarch64_gnullvm 0.48.0", "windows_aarch64_msvc 0.48.0", @@ -5438,9 +5433,9 @@ checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a" [[package]] name = "winnow" -version = "0.4.6" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61de7bac303dc551fe038e2b3cef0f571087a47571ea6e79a87692ac99b99699" +checksum = "81fac9742fd1ad1bd9643b991319f72dd031016d44b77039a26977eb667141e7" dependencies = [ "memchr", ] @@ -5677,7 +5672,7 @@ dependencies = [ "serde", "serde-big-array", "serde_json", - "serde_with 3.0.0", + "serde_with 3.1.0", "sha2", "spandoc", "static_assertions", @@ -5935,7 +5930,7 @@ dependencies = [ "abscissa_core", "atty", "chrono", - "clap 4.3.15", + "clap 4.3.16", "color-eyre", "console-subscriber", "dirs", @@ -6008,7 +6003,7 @@ version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ - "proc-macro2 1.0.63", - "quote 1.0.29", - "syn 2.0.23", + "proc-macro2 1.0.66", + "quote 1.0.31", + "syn 2.0.26", ] diff --git a/deny.toml b/deny.toml index 59002a66a92..5f91c7d266f 100644 --- a/deny.toml +++ b/deny.toml @@ -60,12 +60,15 @@ skip-tree = [ # wait for criterion to upgrade { name = "itertools", version = "=0.10.5" }, - # wait for backtrace and multiple dependents to upgrade - { name = "miniz_oxide", version = "=0.6.2" }, - # wait for h2 and tower to upgrade { name = "indexmap", version = "=1.9.3" }, + # wait for rocksdb to upgrade + { name = "bindgen", version = "=0.65.1" }, + + # wait for tempfile to upgrade + { name = "rustix", version = "=0.37.23" }, + # ZF crates # wait for indexmap, toml_edit, serde_json, tower to upgrade @@ -106,11 +109,6 @@ skip-tree = [ # wait for elasticsearch to update base64, darling, rustc_version, serde_with { name = "elasticsearch", version = "=8.5.0-alpha.1" }, - - # Unused dependencies - - # we don't support Windows at the moment (#3801) - { name = "windows-sys", version = "=0.42.0" }, ] # This section is considered when running `cargo deny check sources`. From 9ebd56092bcdfc1a09062e15a0574c94af37f389 Mon Sep 17 00:00:00 2001 From: Marek Date: Wed, 19 Jul 2023 23:20:37 +0200 Subject: [PATCH 233/265] change(test): Refactor the tests of non-finalized state (#7262) * Allow generating blocks with only coinbase tx * Fix the `forked_equals_pushed_genesis` test --- zebra-chain/src/block/arbitrary.rs | 4 +++- .../service/non_finalized_state/tests/prop.rs | 18 +++++++++--------- 2 files changed, 12 insertions(+), 10 deletions(-) diff --git a/zebra-chain/src/block/arbitrary.rs b/zebra-chain/src/block/arbitrary.rs index 9d6eb1867fb..36734c86b24 100644 --- a/zebra-chain/src/block/arbitrary.rs +++ b/zebra-chain/src/block/arbitrary.rs @@ -349,7 +349,9 @@ impl Arbitrary for Block { fn arbitrary_with(ledger_state: Self::Parameters) -> Self::Strategy { let transactions_strategy = - (1..MAX_ARBITRARY_ITEMS).prop_flat_map(move |transaction_count| { + // Generate a random number transactions. A coinbase tx is always generated, so if + // `transaction_count` is zero, the block will contain only the coinbase tx. + (0..MAX_ARBITRARY_ITEMS).prop_flat_map(move |transaction_count| { Transaction::vec_strategy(ledger_state, transaction_count) }); diff --git a/zebra-state/src/service/non_finalized_state/tests/prop.rs b/zebra-state/src/service/non_finalized_state/tests/prop.rs index 56d103cb2d5..ea72609bcc4 100644 --- a/zebra-state/src/service/non_finalized_state/tests/prop.rs +++ b/zebra-state/src/service/non_finalized_state/tests/prop.rs @@ -170,14 +170,12 @@ fn forked_equals_pushed_genesis() -> Result<()> { empty_tree, ValueBalance::zero(), ); - for block in chain.iter().skip(1).cloned() { + + for block in chain.iter().cloned() { let block = ContextuallyVerifiedBlock::with_block_and_spent_utxos(block, full_chain.unspent_utxos())?; - full_chain = full_chain - .push(block.clone()) - .expect("full chain push is valid"); - // Check some other properties of generated chains. + // Check some properties of the genesis block and don't push it to the chain. if block.height == block::Height(0) { prop_assert_eq!( block @@ -188,11 +186,13 @@ fn forked_equals_pushed_genesis() -> Result<()> { .filter_map(|i| i.outpoint()) .count(), 0, - "unexpected transparent prevout input at height {:?}: \ - genesis transparent outputs must be ignored, \ - so there can not be any spends in the genesis block", - block.height, + "Unexpected transparent prevout input at height 0. Genesis transparent outputs \ + must be ignored, so there can not be any spends in the genesis block.", ); + } else { + full_chain = full_chain + .push(block) + .expect("full chain push is valid"); } } From 512dd9bc5d69228b400e8aa7d35ea9bbdad709e9 Mon Sep 17 00:00:00 2001 From: teor Date: Thu, 20 Jul 2023 11:50:25 +1000 Subject: [PATCH 234/265] change(doc): Document how to upgrade the database format (#7261) * Move the state format into a new doc * Add upgrade instructions * Link to the format upgrade docs from the upgrade code * Fix typo Co-authored-by: Marek --------- Co-authored-by: Marek --- book/src/SUMMARY.md | 13 +- book/src/dev/rfcs/0005-state-updates.md | 300 +-------------- book/src/dev/state-db-upgrades.md | 356 ++++++++++++++++++ .../finalized_state/disk_format/upgrade.rs | 5 +- 4 files changed, 367 insertions(+), 307 deletions(-) create mode 100644 book/src/dev/state-db-upgrades.md diff --git a/book/src/SUMMARY.md b/book/src/SUMMARY.md index 77a8a8e354d..e0c32275940 100644 --- a/book/src/SUMMARY.md +++ b/book/src/SUMMARY.md @@ -20,7 +20,14 @@ - [Developer Documentation](dev.md) - [Contribution Guide](CONTRIBUTING.md) - [Design Overview](dev/overview.md) + - [Diagrams](dev/diagrams.md) + - [Network Architecture](dev/diagrams/zebra-network.md) + - [Upgrading the State Database](dev/state-db-upgrades.md) - [Zebra versioning and releases](dev/release-process.md) + - [Continuous Integration](dev/continuous-integration.md) + - [Continuous Delivery](dev/continuous-delivery.md) + - [Generating Zebra Checkpoints](dev/zebra-checkpoints.md) + - [Doing Mass Renames](dev/mass-renames.md) - [Zebra RFCs](dev/rfcs.md) - [Pipelinable Block Lookup](dev/rfcs/0001-pipelinable-block-lookup.md) - [Parallel Verification](dev/rfcs/0002-parallel-verification.md) @@ -32,10 +39,4 @@ - [V5 Transaction](dev/rfcs/0010-v5-transaction.md) - [Async Rust in Zebra](dev/rfcs/0011-async-rust-in-zebra.md) - [Value Pools](dev/rfcs/0012-value-pools.md) - - [Diagrams](dev/diagrams.md) - - [Network Architecture](dev/diagrams/zebra-network.md) - - [Continuous Integration](dev/continuous-integration.md) - - [Continuous Delivery](dev/continuous-delivery.md) - - [Generating Zebra Checkpoints](dev/zebra-checkpoints.md) - - [Doing Mass Renames](dev/mass-renames.md) - [API Reference](api.md) diff --git a/book/src/dev/rfcs/0005-state-updates.md b/book/src/dev/rfcs/0005-state-updates.md index e47245ad175..7767975fd11 100644 --- a/book/src/dev/rfcs/0005-state-updates.md +++ b/book/src/dev/rfcs/0005-state-updates.md @@ -663,305 +663,7 @@ New `non-finalized` blocks are committed as follows: ## rocksdb data structures [rocksdb]: #rocksdb -rocksdb provides a persistent, thread-safe `BTreeMap<&[u8], &[u8]>`. Each map is -a distinct "tree". Keys are sorted using lex order on byte strings, so -integer values should be stored using big-endian encoding (so that the lex -order on byte strings is the numeric ordering). - -Note that the lex order storage allows creating 1-to-many maps using keys only. -For example, the `tx_loc_by_transparent_addr_loc` allows mapping each address -to all transactions related to it, by simply storing each transaction prefixed -with the address as the key, leaving the value empty. Since rocksdb allows -listing all keys with a given prefix, it will allow listing all transactions -related to a given address. - -We use the following rocksdb column families: - -| Column Family | Keys | Values | Changes | -| ---------------------------------- | ---------------------- | ----------------------------- | ------- | -| *Blocks* | | | | -| `hash_by_height` | `block::Height` | `block::Hash` | Create | -| `height_by_hash` | `block::Hash` | `block::Height` | Create | -| `block_header_by_height` | `block::Height` | `block::Header` | Create | -| *Transactions* | | | | -| `tx_by_loc` | `TransactionLocation` | `Transaction` | Create | -| `hash_by_tx_loc` | `TransactionLocation` | `transaction::Hash` | Create | -| `tx_loc_by_hash` | `transaction::Hash` | `TransactionLocation` | Create | -| *Transparent* | | | | -| `balance_by_transparent_addr` | `transparent::Address` | `Amount \|\| AddressLocation` | Update | -| `tx_loc_by_transparent_addr_loc` | `AddressTransaction` | `()` | Create | -| `utxo_by_out_loc` | `OutputLocation` | `transparent::Output` | Delete | -| `utxo_loc_by_transparent_addr_loc` | `AddressUnspentOutput` | `()` | Delete | -| *Sprout* | | | | -| `sprout_nullifiers` | `sprout::Nullifier` | `()` | Create | -| `sprout_anchors` | `sprout::tree::Root` | `sprout::NoteCommitmentTree` | Create | -| `sprout_note_commitment_tree` | `block::Height` | `sprout::NoteCommitmentTree` | Delete | -| *Sapling* | | | | -| `sapling_nullifiers` | `sapling::Nullifier` | `()` | Create | -| `sapling_anchors` | `sapling::tree::Root` | `()` | Create | -| `sapling_note_commitment_tree` | `block::Height` | `sapling::NoteCommitmentTree` | Create | -| *Orchard* | | | | -| `orchard_nullifiers` | `orchard::Nullifier` | `()` | Create | -| `orchard_anchors` | `orchard::tree::Root` | `()` | Create | -| `orchard_note_commitment_tree` | `block::Height` | `orchard::NoteCommitmentTree` | Create | -| *Chain* | | | | -| `history_tree` | `block::Height` | `NonEmptyHistoryTree` | Delete | -| `tip_chain_value_pool` | `()` | `ValueBalance` | Update | - -Zcash structures are encoded using `ZcashSerialize`/`ZcashDeserialize`. -Other structures are encoded using `IntoDisk`/`FromDisk`. - -Block and Transaction Data: -- `Height`: 24 bits, big-endian, unsigned (allows for ~30 years worth of blocks) -- `TransactionIndex`: 16 bits, big-endian, unsigned (max ~23,000 transactions in the 2 MB block limit) -- `TransactionCount`: same as `TransactionIndex` -- `TransactionLocation`: `Height \|\| TransactionIndex` -- `OutputIndex`: 24 bits, big-endian, unsigned (max ~223,000 transfers in the 2 MB block limit) -- transparent and shielded input indexes, and shielded output indexes: 16 bits, big-endian, unsigned (max ~49,000 transfers in the 2 MB block limit) -- `OutputLocation`: `TransactionLocation \|\| OutputIndex` -- `AddressLocation`: the first `OutputLocation` used by a `transparent::Address`. - Always has the same value for each address, even if the first output is spent. -- `Utxo`: `Output`, derives extra fields from the `OutputLocation` key -- `AddressUnspentOutput`: `AddressLocation \|\| OutputLocation`, - used instead of a `BTreeSet` value, to improve database performance -- `AddressTransaction`: `AddressLocation \|\| TransactionLocation` - used instead of a `BTreeSet` value, to improve database performance - -We use big-endian encoding for keys, to allow database index prefix searches. - -Amounts: -- `Amount`: 64 bits, little-endian, signed -- `ValueBalance`: `[Amount; 4]` - -Derived Formats: -- `*::NoteCommitmentTree`: `bincode` using `serde` -- `NonEmptyHistoryTree`: `bincode` using `serde`, using `zcash_history`'s `serde` implementation - - -The following figure helps visualizing the address index, which is the most complicated part. -Numbers in brackets are array sizes; bold arrows are compositions (i.e. `TransactionLocation` is the -concatenation of `Height` and `TransactionIndex`); dashed arrows are compositions that are also 1-to-many -maps (i.e. `AddressTransaction` is the concatenation of `AddressLocation` and `TransactionLocation`, -but also is used to map each `AddressLocation` to multiple `TransactionLocation`s). - -```mermaid -graph TD; - Address -->|"balance_by_transparent_addr
"| AddressBalance; - AddressBalance ==> Amount; - AddressBalance ==> AddressLocation; - AddressLocation ==> FirstOutputLocation; - AddressLocation -.->|"tx_loc_by_transparent_addr_loc
(AddressTransaction[13])"| TransactionLocation; - TransactionLocation ==> Height; - TransactionLocation ==> TransactionIndex; - OutputLocation -->|utxo_by_out_loc| Output; - OutputLocation ==> TransactionLocation; - OutputLocation ==> OutputIndex; - AddressLocation -.->|"utxo_loc_by_transparent_addr_loc
(AddressUnspentOutput[16])"| OutputLocation; - - AddressBalance["AddressBalance[16]"]; - Amount["Amount[8]"]; - Height["Height[3]"]; - Address["Address[21]"]; - TransactionIndex["TransactionIndex[2]"]; - TransactionLocation["TransactionLocation[5]"]; - OutputIndex["OutputIndex[3]"]; - OutputLocation["OutputLocation[8]"]; - FirstOutputLocation["First OutputLocation[8]"]; - AddressLocation["AddressLocation[8]"]; -``` - -### Implementing consensus rules using rocksdb -[rocksdb-consensus-rules]: #rocksdb-consensus-rules - -Each column family handles updates differently, based on its specific consensus rules: -- Create: - - Each key-value entry is created once. - - Keys are never deleted, values are never updated. -- Delete: - - Each key-value entry is created once. - - Keys can be deleted, but values are never updated. - - Code called by ReadStateService must ignore deleted keys, or use a read lock. - - TODO: should we prevent re-inserts of keys that have been deleted? -- Update: - - Each key-value entry is created once. - - Keys are never deleted, but values can be updated. - - Code called by ReadStateService must handle old or new values, or use a read lock. - -We can't do some kinds of value updates, because they cause RocksDB performance issues: -- Append: - - Keys are never deleted. - - Existing values are never updated. - - Sets of values have additional items appended to the end of the set. - - Code called by ReadStateService must handle shorter or longer sets, or use a read lock. -- Up/Del: - - Keys can be deleted. - - Sets of values have items added or deleted (in any position). - - Code called by ReadStateService must ignore deleted keys and values, - accept shorter or longer sets, and accept old or new values. - Or it should use a read lock. - -Avoid using large sets of values as RocksDB keys or values. - -### RocksDB read locks -[rocksdb-read-locks]: #rocksdb-read-locks - -The read-only ReadStateService needs to handle concurrent writes and deletes of the finalized -column families it reads. It must also handle overlaps between the cached non-finalized `Chain`, -and the current finalized state database. - -The StateService uses RocksDB transactions for each block write. -So ReadStateService queries that only access a single key or value will always see -a consistent view of the database. - -If a ReadStateService query only uses column families that have keys and values appended -(`Never` in the Updates table above), it should ignore extra appended values. -Most queries do this by default. - -For more complex queries, there are several options: - -Reading across multiple column families: -1. Ignore deleted values using custom Rust code -2. Take a database snapshot - https://docs.rs/rocksdb/latest/rocksdb/struct.DBWithThreadMode.html#method.snapshot - -Reading a single column family: -3. multi_get - https://docs.rs/rocksdb/latest/rocksdb/struct.DBWithThreadMode.html#method.multi_get_cf -4. iterator - https://docs.rs/rocksdb/latest/rocksdb/struct.DBWithThreadMode.html#method.iterator_cf - -RocksDB also has read transactions, but they don't seem to be exposed in the Rust crate. - -### Low-Level Implementation Details -[rocksdb-low-level]: #rocksdb-low-level - -RocksDB ignores duplicate puts and deletes, preserving the latest values. -If rejecting duplicate puts or deletes is consensus-critical, -check [`db.get_cf(cf, key)?`](https://docs.rs/rocksdb/0.16.0/rocksdb/struct.DBWithThreadMode.html#method.get_cf) -before putting or deleting any values in a batch. - -Currently, these restrictions should be enforced by code review: -- multiple `zs_insert`s are only allowed on Update column families, and -- [`delete_cf`](https://docs.rs/rocksdb/0.16.0/rocksdb/struct.WriteBatch.html#method.delete_cf) - is only allowed on Delete column families. - -In future, we could enforce these restrictions by: -- creating traits for Never, Delete, and Update -- doing different checks in `zs_insert` depending on the trait -- wrapping `delete_cf` in a trait, and only implementing that trait for types that use Delete column families. - -As of June 2021, the Rust `rocksdb` crate [ignores the delete callback](https://docs.rs/rocksdb/0.16.0/src/rocksdb/merge_operator.rs.html#83-94), -and merge operators are unreliable (or have undocumented behaviour). -So they should not be used for consensus-critical checks. - -### Notes on rocksdb column families -[rocksdb-column-families]: #rocksdb-column-families - -- The `hash_by_height` and `height_by_hash` column families provide a bijection between - block heights and block hashes. (Since the rocksdb state only stores finalized - state, they are actually a bijection). - -- Similarly, the `tx_loc_by_hash` and `hash_by_tx_loc` column families provide a bijection between - transaction locations and transaction hashes. - -- The `block_header_by_height` column family provides a bijection between block - heights and block header data. There is no corresponding `height_by_block` column - family: instead, hash the block header, and use the hash from `height_by_hash`. - (Since the rocksdb state only stores finalized state, they are actually a bijection). - Similarly, there are no column families that go from transaction data - to transaction locations: hash the transaction and use `tx_loc_by_hash`. - -- Block headers and transactions are stored separately in the database, - so that individual transactions can be accessed efficiently. - Blocks can be re-created on request using the following process: - - Look up `height` in `height_by_hash` - - Get the block header for `height` from `block_header_by_height` - - Iterate from `TransactionIndex` 0, - to get each transaction with `height` from `tx_by_loc`, - stopping when there are no more transactions in the block - -- Block headers are stored by height, not by hash. This has the downside that looking - up a block by hash requires an extra level of indirection. The upside is - that blocks with adjacent heights are adjacent in the database, and many - common access patterns, such as helping a client sync the chain or doing - analysis, access blocks in (potentially sparse) height order. In addition, - the fact that we commit blocks in order means we're writing only to the end - of the rocksdb column family, which may help save space. - -- Similarly, transaction data is stored in chain order in `tx_by_loc` and `utxo_by_out_loc`, - and chain order within each vector in `utxo_loc_by_transparent_addr_loc` and - `tx_loc_by_transparent_addr_loc`. - -- `TransactionLocation`s are stored as a `(height, index)` pair referencing the - height of the transaction's parent block and the transaction's index in that - block. This would more traditionally be a `(hash, index)` pair, but because - we store blocks by height, storing the height saves one level of indirection. - Transaction hashes can be looked up using `hash_by_tx_loc`. - -- Similarly, UTXOs are stored in `utxo_by_out_loc` by `OutputLocation`, - rather than `OutPoint`. `OutPoint`s can be looked up using `tx_loc_by_hash`, - and reconstructed using `hash_by_tx_loc`. - -- The `Utxo` type can be constructed from the `OutputLocation` and `Output` data, - `height: OutputLocation.height`, and - `is_coinbase: OutputLocation.transaction_index == 0` - (coinbase transactions are always the first transaction in a block). - -- `balance_by_transparent_addr` is the sum of all `utxo_loc_by_transparent_addr_loc`s - that are still in `utxo_by_out_loc`. It is cached to improve performance for - addresses with large UTXO sets. It also stores the `AddressLocation` for each - address, which allows for efficient lookups. - -- `utxo_loc_by_transparent_addr_loc` stores unspent transparent output locations - by address. The address location and UTXO location are stored as a RocksDB key, - so they are in chain order, and get good database performance. - This column family includes also includes the original address location UTXO, - if it has not been spent. - -- When a block write deletes a UTXO from `utxo_by_out_loc`, - that UTXO location should be deleted from `utxo_loc_by_transparent_addr_loc`. - The deleted UTXO can be removed efficiently, because the UTXO location is part of the key. - This is an index optimisation, which does not affect query results. - -- `tx_loc_by_transparent_addr_loc` stores transaction locations by address. - This list includes transactions containing spent UTXOs. - The address location and transaction location are stored as a RocksDB key, - so they are in chain order, and get good database performance. - This column family also includes the `TransactionLocation` - of the transaction for the `AddressLocation`. - -- The `sprout_note_commitment_tree` stores the note commitment tree state - at the tip of the finalized state, for the specific pool. There is always - a single entry. Each tree is stored - as a "Merkle tree frontier" which is basically a (logarithmic) subset of - the Merkle tree nodes as required to insert new items. - For each block committed, the old tree is deleted and a new one is inserted - by its new height. - **TODO:** store the sprout note commitment tree by `()`, - to avoid ReadStateService concurrent write issues. - -- The `{sapling, orchard}_note_commitment_tree` stores the note commitment tree - state for every height, for the specific pool. Each tree is stored - as a "Merkle tree frontier" which is basically a (logarithmic) subset of - the Merkle tree nodes as required to insert new items. - -- `history_tree` stores the ZIP-221 history tree state at the tip of the finalized - state. There is always a single entry for it. The tree is stored as the set of "peaks" - of the "Merkle mountain range" tree structure, which is what is required to - insert new items. - **TODO:** store the history tree by `()`, to avoid ReadStateService concurrent write issues. - -- Each `*_anchors` stores the anchor (the root of a Merkle tree) of the note commitment - tree of a certain block. We only use the keys since we just need the set of anchors, - regardless of where they come from. The exception is `sprout_anchors` which also maps - the anchor to the matching note commitment tree. This is required to support interstitial - treestates, which are unique to Sprout. - **TODO:** store the `Root` hash in `sprout_note_commitment_tree`, and use it to look up the - note commitment tree. This de-duplicates tree state data. But we currently only store one sprout tree by height. - -- The value pools are only stored for the finalized tip. - -- We do not store the cumulative work for the finalized chain, - because the finalized work is equal for all non-finalized chains. - So the additional non-finalized work can be used to calculate the relative chain order, - and choose the best chain. +The current database format is documented in [Upgrading the State Database](../state-db-upgrades.md). ## Committing finalized blocks diff --git a/book/src/dev/state-db-upgrades.md b/book/src/dev/state-db-upgrades.md new file mode 100644 index 00000000000..2174aba24ec --- /dev/null +++ b/book/src/dev/state-db-upgrades.md @@ -0,0 +1,356 @@ +# Zebra Cached State Database Implementation + +## Upgrading the State Database + +For most state upgrades, we want to modify the database format of the existing database. If we +change the major database version, every user needs to re-download and re-verify all the blocks, +which can take days. + +### In-Place Upgrade Goals + +- avoid a full download and rebuild of the state +- the previous state format must be able to be loaded by the new state + - this is checked the first time CI runs on a PR with a new state version. + After the first CI run, the cached state is marked as upgraded, so the upgrade doesn't run + again. If CI fails on the first run, any cached states with that version should be deleted. +- previous zebra versions should be able to load the new format + - this is checked by other PRs running using the upgraded cached state, but only if a Rust PR + runs after the new PR's CI finishes, but before it merges +- best-effort loading of older supported states by newer Zebra versions +- best-effort compatibility between newer states and older supported Zebra versions + +### Design Constraints +[design]: #design + +Upgrades run concurrently with state verification and RPC requests. + +This means that: +- the state must be able to read the old and new formats + - it can't panic if the data is missing + - it can't give incorrect results, because that can affect verification or wallets + - it can return an error + - it can only return an `Option` if the caller handles it correctly +- multiple upgrades must produce a valid state format + - if Zebra is restarted, the format upgrade will run multiple times + - if an older Zebra version opens the state, data can be written in an older format +- the format must be valid before and after each database transaction or API call, because an upgrade can be cancelled at any time + - multi-column family changes should made in database transactions + - if you are building new column family, disable state queries, then enable them once it's done + - if each database API call produces a valid format, transactions aren't needed + +If there is an upgrade failure, it can panic and tell the user to delete their cached state and re-launch Zebra. + +### Implementation Steps + +- [ ] update the [database format](https://github.com/ZcashFoundation/zebra/blob/main/book/src/dev/state-db-upgrades.md#current) in the Zebra docs +- [ ] increment the state minor version +- [ ] write the new format in the block write task +- [ ] update older formats in the format upgrade task +- [ ] test that the new format works when creating a new state, and updating an older state + +See the [upgrade design docs](https://github.com/ZcashFoundation/zebra/blob/main/book/src/dev/state-db-upgrades.md#design) for more details. + +These steps can be copied into tickets. + +## Current State Database Format +[current]: #current + +rocksdb provides a persistent, thread-safe `BTreeMap<&[u8], &[u8]>`. Each map is +a distinct "tree". Keys are sorted using lexographic order (`[u8].sorted()`) on byte strings, so +integer values should be stored using big-endian encoding (so that the lex +order on byte strings is the numeric ordering). + +Note that the lex order storage allows creating 1-to-many maps using keys only. +For example, the `tx_loc_by_transparent_addr_loc` allows mapping each address +to all transactions related to it, by simply storing each transaction prefixed +with the address as the key, leaving the value empty. Since rocksdb allows +listing all keys with a given prefix, it will allow listing all transactions +related to a given address. + +We use the following rocksdb column families: + +| Column Family | Keys | Values | Changes | +| ---------------------------------- | ---------------------- | ----------------------------- | ------- | +| *Blocks* | | | | +| `hash_by_height` | `block::Height` | `block::Hash` | Create | +| `height_by_hash` | `block::Hash` | `block::Height` | Create | +| `block_header_by_height` | `block::Height` | `block::Header` | Create | +| *Transactions* | | | | +| `tx_by_loc` | `TransactionLocation` | `Transaction` | Create | +| `hash_by_tx_loc` | `TransactionLocation` | `transaction::Hash` | Create | +| `tx_loc_by_hash` | `transaction::Hash` | `TransactionLocation` | Create | +| *Transparent* | | | | +| `balance_by_transparent_addr` | `transparent::Address` | `Amount \|\| AddressLocation` | Update | +| `tx_loc_by_transparent_addr_loc` | `AddressTransaction` | `()` | Create | +| `utxo_by_out_loc` | `OutputLocation` | `transparent::Output` | Delete | +| `utxo_loc_by_transparent_addr_loc` | `AddressUnspentOutput` | `()` | Delete | +| *Sprout* | | | | +| `sprout_nullifiers` | `sprout::Nullifier` | `()` | Create | +| `sprout_anchors` | `sprout::tree::Root` | `sprout::NoteCommitmentTree` | Create | +| `sprout_note_commitment_tree` | `block::Height` | `sprout::NoteCommitmentTree` | Delete | +| *Sapling* | | | | +| `sapling_nullifiers` | `sapling::Nullifier` | `()` | Create | +| `sapling_anchors` | `sapling::tree::Root` | `()` | Create | +| `sapling_note_commitment_tree` | `block::Height` | `sapling::NoteCommitmentTree` | Create | +| *Orchard* | | | | +| `orchard_nullifiers` | `orchard::Nullifier` | `()` | Create | +| `orchard_anchors` | `orchard::tree::Root` | `()` | Create | +| `orchard_note_commitment_tree` | `block::Height` | `orchard::NoteCommitmentTree` | Create | +| *Chain* | | | | +| `history_tree` | `block::Height` | `NonEmptyHistoryTree` | Delete | +| `tip_chain_value_pool` | `()` | `ValueBalance` | Update | + +Zcash structures are encoded using `ZcashSerialize`/`ZcashDeserialize`. +Other structures are encoded using `IntoDisk`/`FromDisk`. + +Block and Transaction Data: +- `Height`: 24 bits, big-endian, unsigned (allows for ~30 years worth of blocks) +- `TransactionIndex`: 16 bits, big-endian, unsigned (max ~23,000 transactions in the 2 MB block limit) +- `TransactionCount`: same as `TransactionIndex` +- `TransactionLocation`: `Height \|\| TransactionIndex` +- `OutputIndex`: 24 bits, big-endian, unsigned (max ~223,000 transfers in the 2 MB block limit) +- transparent and shielded input indexes, and shielded output indexes: 16 bits, big-endian, unsigned (max ~49,000 transfers in the 2 MB block limit) +- `OutputLocation`: `TransactionLocation \|\| OutputIndex` +- `AddressLocation`: the first `OutputLocation` used by a `transparent::Address`. + Always has the same value for each address, even if the first output is spent. +- `Utxo`: `Output`, derives extra fields from the `OutputLocation` key +- `AddressUnspentOutput`: `AddressLocation \|\| OutputLocation`, + used instead of a `BTreeSet` value, to improve database performance +- `AddressTransaction`: `AddressLocation \|\| TransactionLocation` + used instead of a `BTreeSet` value, to improve database performance + +We use big-endian encoding for keys, to allow database index prefix searches. + +Amounts: +- `Amount`: 64 bits, little-endian, signed +- `ValueBalance`: `[Amount; 4]` + +Derived Formats: +- `*::NoteCommitmentTree`: `bincode` using `serde` +- `NonEmptyHistoryTree`: `bincode` using `serde`, using `zcash_history`'s `serde` implementation + + +The following figure helps visualizing the address index, which is the most complicated part. +Numbers in brackets are array sizes; bold arrows are compositions (i.e. `TransactionLocation` is the +concatenation of `Height` and `TransactionIndex`); dashed arrows are compositions that are also 1-to-many +maps (i.e. `AddressTransaction` is the concatenation of `AddressLocation` and `TransactionLocation`, +but also is used to map each `AddressLocation` to multiple `TransactionLocation`s). + +```mermaid +graph TD; + Address -->|"balance_by_transparent_addr
"| AddressBalance; + AddressBalance ==> Amount; + AddressBalance ==> AddressLocation; + AddressLocation ==> FirstOutputLocation; + AddressLocation -.->|"tx_loc_by_transparent_addr_loc
(AddressTransaction[13])"| TransactionLocation; + TransactionLocation ==> Height; + TransactionLocation ==> TransactionIndex; + OutputLocation -->|utxo_by_out_loc| Output; + OutputLocation ==> TransactionLocation; + OutputLocation ==> OutputIndex; + AddressLocation -.->|"utxo_loc_by_transparent_addr_loc
(AddressUnspentOutput[16])"| OutputLocation; + + AddressBalance["AddressBalance[16]"]; + Amount["Amount[8]"]; + Height["Height[3]"]; + Address["Address[21]"]; + TransactionIndex["TransactionIndex[2]"]; + TransactionLocation["TransactionLocation[5]"]; + OutputIndex["OutputIndex[3]"]; + OutputLocation["OutputLocation[8]"]; + FirstOutputLocation["First OutputLocation[8]"]; + AddressLocation["AddressLocation[8]"]; +``` + +### Implementing consensus rules using rocksdb +[rocksdb-consensus-rules]: #rocksdb-consensus-rules + +Each column family handles updates differently, based on its specific consensus rules: +- Create: + - Each key-value entry is created once. + - Keys are never deleted, values are never updated. +- Delete: + - Each key-value entry is created once. + - Keys can be deleted, but values are never updated. + - Code called by ReadStateService must ignore deleted keys, or use a read lock. + - TODO: should we prevent re-inserts of keys that have been deleted? +- Update: + - Each key-value entry is created once. + - Keys are never deleted, but values can be updated. + - Code called by ReadStateService must handle old or new values, or use a read lock. + +We can't do some kinds of value updates, because they cause RocksDB performance issues: +- Append: + - Keys are never deleted. + - Existing values are never updated. + - Sets of values have additional items appended to the end of the set. + - Code called by ReadStateService must handle shorter or longer sets, or use a read lock. +- Up/Del: + - Keys can be deleted. + - Sets of values have items added or deleted (in any position). + - Code called by ReadStateService must ignore deleted keys and values, + accept shorter or longer sets, and accept old or new values. + Or it should use a read lock. + +Avoid using large sets of values as RocksDB keys or values. + +### RocksDB read locks +[rocksdb-read-locks]: #rocksdb-read-locks + +The read-only ReadStateService needs to handle concurrent writes and deletes of the finalized +column families it reads. It must also handle overlaps between the cached non-finalized `Chain`, +and the current finalized state database. + +The StateService uses RocksDB transactions for each block write. +So ReadStateService queries that only access a single key or value will always see +a consistent view of the database. + +If a ReadStateService query only uses column families that have keys and values appended +(`Never` in the Updates table above), it should ignore extra appended values. +Most queries do this by default. + +For more complex queries, there are several options: + +Reading across multiple column families: +1. Ignore deleted values using custom Rust code +2. Take a database snapshot - https://docs.rs/rocksdb/latest/rocksdb/struct.DBWithThreadMode.html#method.snapshot + +Reading a single column family: +3. multi_get - https://docs.rs/rocksdb/latest/rocksdb/struct.DBWithThreadMode.html#method.multi_get_cf +4. iterator - https://docs.rs/rocksdb/latest/rocksdb/struct.DBWithThreadMode.html#method.iterator_cf + +RocksDB also has read transactions, but they don't seem to be exposed in the Rust crate. + +### Low-Level Implementation Details +[rocksdb-low-level]: #rocksdb-low-level + +RocksDB ignores duplicate puts and deletes, preserving the latest values. +If rejecting duplicate puts or deletes is consensus-critical, +check [`db.get_cf(cf, key)?`](https://docs.rs/rocksdb/0.16.0/rocksdb/struct.DBWithThreadMode.html#method.get_cf) +before putting or deleting any values in a batch. + +Currently, these restrictions should be enforced by code review: +- multiple `zs_insert`s are only allowed on Update column families, and +- [`delete_cf`](https://docs.rs/rocksdb/0.16.0/rocksdb/struct.WriteBatch.html#method.delete_cf) + is only allowed on Delete column families. + +In future, we could enforce these restrictions by: +- creating traits for Never, Delete, and Update +- doing different checks in `zs_insert` depending on the trait +- wrapping `delete_cf` in a trait, and only implementing that trait for types that use Delete column families. + +As of June 2021, the Rust `rocksdb` crate [ignores the delete callback](https://docs.rs/rocksdb/0.16.0/src/rocksdb/merge_operator.rs.html#83-94), +and merge operators are unreliable (or have undocumented behaviour). +So they should not be used for consensus-critical checks. + +### Notes on rocksdb column families +[rocksdb-column-families]: #rocksdb-column-families + +- The `hash_by_height` and `height_by_hash` column families provide a bijection between + block heights and block hashes. (Since the rocksdb state only stores finalized + state, they are actually a bijection). + +- Similarly, the `tx_loc_by_hash` and `hash_by_tx_loc` column families provide a bijection between + transaction locations and transaction hashes. + +- The `block_header_by_height` column family provides a bijection between block + heights and block header data. There is no corresponding `height_by_block` column + family: instead, hash the block header, and use the hash from `height_by_hash`. + (Since the rocksdb state only stores finalized state, they are actually a bijection). + Similarly, there are no column families that go from transaction data + to transaction locations: hash the transaction and use `tx_loc_by_hash`. + +- Block headers and transactions are stored separately in the database, + so that individual transactions can be accessed efficiently. + Blocks can be re-created on request using the following process: + - Look up `height` in `height_by_hash` + - Get the block header for `height` from `block_header_by_height` + - Iterate from `TransactionIndex` 0, + to get each transaction with `height` from `tx_by_loc`, + stopping when there are no more transactions in the block + +- Block headers are stored by height, not by hash. This has the downside that looking + up a block by hash requires an extra level of indirection. The upside is + that blocks with adjacent heights are adjacent in the database, and many + common access patterns, such as helping a client sync the chain or doing + analysis, access blocks in (potentially sparse) height order. In addition, + the fact that we commit blocks in order means we're writing only to the end + of the rocksdb column family, which may help save space. + +- Similarly, transaction data is stored in chain order in `tx_by_loc` and `utxo_by_out_loc`, + and chain order within each vector in `utxo_loc_by_transparent_addr_loc` and + `tx_loc_by_transparent_addr_loc`. + +- `TransactionLocation`s are stored as a `(height, index)` pair referencing the + height of the transaction's parent block and the transaction's index in that + block. This would more traditionally be a `(hash, index)` pair, but because + we store blocks by height, storing the height saves one level of indirection. + Transaction hashes can be looked up using `hash_by_tx_loc`. + +- Similarly, UTXOs are stored in `utxo_by_out_loc` by `OutputLocation`, + rather than `OutPoint`. `OutPoint`s can be looked up using `tx_loc_by_hash`, + and reconstructed using `hash_by_tx_loc`. + +- The `Utxo` type can be constructed from the `OutputLocation` and `Output` data, + `height: OutputLocation.height`, and + `is_coinbase: OutputLocation.transaction_index == 0` + (coinbase transactions are always the first transaction in a block). + +- `balance_by_transparent_addr` is the sum of all `utxo_loc_by_transparent_addr_loc`s + that are still in `utxo_by_out_loc`. It is cached to improve performance for + addresses with large UTXO sets. It also stores the `AddressLocation` for each + address, which allows for efficient lookups. + +- `utxo_loc_by_transparent_addr_loc` stores unspent transparent output locations + by address. The address location and UTXO location are stored as a RocksDB key, + so they are in chain order, and get good database performance. + This column family includes also includes the original address location UTXO, + if it has not been spent. + +- When a block write deletes a UTXO from `utxo_by_out_loc`, + that UTXO location should be deleted from `utxo_loc_by_transparent_addr_loc`. + The deleted UTXO can be removed efficiently, because the UTXO location is part of the key. + This is an index optimisation, which does not affect query results. + +- `tx_loc_by_transparent_addr_loc` stores transaction locations by address. + This list includes transactions containing spent UTXOs. + The address location and transaction location are stored as a RocksDB key, + so they are in chain order, and get good database performance. + This column family also includes the `TransactionLocation` + of the transaction for the `AddressLocation`. + +- The `sprout_note_commitment_tree` stores the note commitment tree state + at the tip of the finalized state, for the specific pool. There is always + a single entry. Each tree is stored + as a "Merkle tree frontier" which is basically a (logarithmic) subset of + the Merkle tree nodes as required to insert new items. + For each block committed, the old tree is deleted and a new one is inserted + by its new height. + **TODO:** store the sprout note commitment tree by `()`, + to avoid ReadStateService concurrent write issues. + +- The `{sapling, orchard}_note_commitment_tree` stores the note commitment tree + state for every height, for the specific pool. Each tree is stored + as a "Merkle tree frontier" which is basically a (logarithmic) subset of + the Merkle tree nodes as required to insert new items. + +- `history_tree` stores the ZIP-221 history tree state at the tip of the finalized + state. There is always a single entry for it. The tree is stored as the set of "peaks" + of the "Merkle mountain range" tree structure, which is what is required to + insert new items. + **TODO:** store the history tree by `()`, to avoid ReadStateService concurrent write issues. + +- Each `*_anchors` stores the anchor (the root of a Merkle tree) of the note commitment + tree of a certain block. We only use the keys since we just need the set of anchors, + regardless of where they come from. The exception is `sprout_anchors` which also maps + the anchor to the matching note commitment tree. This is required to support interstitial + treestates, which are unique to Sprout. + **TODO:** store the `Root` hash in `sprout_note_commitment_tree`, and use it to look up the + note commitment tree. This de-duplicates tree state data. But we currently only store one sprout tree by height. + +- The value pools are only stored for the finalized tip. + +- We do not store the cumulative work for the finalized chain, + because the finalized work is equal for all non-finalized chains. + So the additional non-finalized work can be used to calculate the relative chain order, + and choose the best chain. diff --git a/zebra-state/src/service/finalized_state/disk_format/upgrade.rs b/zebra-state/src/service/finalized_state/disk_format/upgrade.rs index 15c1e003776..9f61855c12c 100644 --- a/zebra-state/src/service/finalized_state/disk_format/upgrade.rs +++ b/zebra-state/src/service/finalized_state/disk_format/upgrade.rs @@ -218,6 +218,9 @@ impl DbFormatChange { /// /// If `cancel_receiver` gets a message, or its sender is dropped, /// the format change stops running early. + /// + /// See the format upgrade design docs for more details: + /// // // New format upgrades must be added to the *end* of this method. fn apply_format_upgrade( @@ -259,8 +262,6 @@ impl DbFormatChange { }; // Example format change. - // - // TODO: link to format upgrade instructions doc here // Check if we need to do this upgrade. let database_format_add_format_change_task = From a61d464d4dd56916adb51bf83ed32aef4dc3052f Mon Sep 17 00:00:00 2001 From: teor Date: Thu, 20 Jul 2023 21:50:13 +1000 Subject: [PATCH 235/265] Fix Docker git commit handling (#7265) --- docker/Dockerfile | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/docker/Dockerfile b/docker/Dockerfile index 4c2c8e98262..22a673ca327 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -80,7 +80,9 @@ ARG COLORBT_SHOW_HIDDEN ENV COLORBT_SHOW_HIDDEN=${COLORBT_SHOW_HIDDEN:-1} ARG SHORT_SHA -ENV SHORT_SHA=${SHORT_SHA:-unknown} +# If this is not set, it must be the empty string, so Zebra can try an alternative git commit source: +# https://github.com/ZcashFoundation/zebra/blob/9ebd56092bcdfc1a09062e15a0574c94af37f389/zebrad/src/application.rs#L179-L182 +ENV SHORT_SHA=${SHORT_SHA:-} ENV CARGO_HOME="/opt/zebrad/.cargo/" From a78112ae392ea7616ce90558bdb7b5bb9d3ca1be Mon Sep 17 00:00:00 2001 From: teor Date: Fri, 21 Jul 2023 10:33:41 +1000 Subject: [PATCH 236/265] change(deps): Remove OpenSSL dependency from dev tools (#7257) * Remove OpenSSL dependency from dev tools * Update Cargo.lock --- Cargo.lock | 145 ----------------------------------------- zebra-utils/Cargo.toml | 3 +- 2 files changed, 2 insertions(+), 146 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index cb7297935eb..4d02a6fcd83 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -883,16 +883,6 @@ version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "21a53c0a4d288377e7415b53dcfc3c04da5cdc2cc95c8d5ac178b58f0b861ad6" -[[package]] -name = "core-foundation" -version = "0.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "194a7a9e6de53fa55116934067c844d9d749312f75c6f6d0980e8c252f8c2146" -dependencies = [ - "core-foundation-sys", - "libc", -] - [[package]] name = "core-foundation-sys" version = "0.8.4" @@ -1417,21 +1407,6 @@ version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" -[[package]] -name = "foreign-types" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" -dependencies = [ - "foreign-types-shared", -] - -[[package]] -name = "foreign-types-shared" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" - [[package]] name = "form_urlencoded" version = "1.2.0" @@ -1935,19 +1910,6 @@ dependencies = [ "tokio-io-timeout", ] -[[package]] -name = "hyper-tls" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" -dependencies = [ - "bytes", - "hyper", - "native-tls", - "tokio", - "tokio-native-tls", -] - [[package]] name = "iana-time-zone" version = "0.1.57" @@ -2576,24 +2538,6 @@ dependencies = [ "getrandom 0.2.10", ] -[[package]] -name = "native-tls" -version = "0.2.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07226173c32f2926027b63cce4bcd8076c3552846cbe7925f3aaffeac0a3b92e" -dependencies = [ - "lazy_static", - "libc", - "log", - "openssl", - "openssl-probe", - "openssl-sys", - "schannel", - "security-framework", - "security-framework-sys", - "tempfile", -] - [[package]] name = "net2" version = "0.2.39" @@ -2724,50 +2668,6 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" -[[package]] -name = "openssl" -version = "0.10.55" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "345df152bc43501c5eb9e4654ff05f794effb78d4efe3d53abc158baddc0703d" -dependencies = [ - "bitflags 1.3.2", - "cfg-if 1.0.0", - "foreign-types", - "libc", - "once_cell", - "openssl-macros", - "openssl-sys", -] - -[[package]] -name = "openssl-macros" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" -dependencies = [ - "proc-macro2 1.0.66", - "quote 1.0.31", - "syn 2.0.26", -] - -[[package]] -name = "openssl-probe" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" - -[[package]] -name = "openssl-sys" -version = "0.9.90" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "374533b0e45f3a7ced10fcaeccca020e66656bc03dac384f852e4e5a7a8104a6" -dependencies = [ - "cc", - "libc", - "pkg-config", - "vcpkg", -] - [[package]] name = "option-ext" version = "0.2.0" @@ -3602,12 +3502,10 @@ dependencies = [ "http-body", "hyper", "hyper-rustls", - "hyper-tls", "ipnet", "js-sys", "log", "mime", - "native-tls", "once_cell", "percent-encoding", "pin-project-lite", @@ -3617,7 +3515,6 @@ dependencies = [ "serde_json", "serde_urlencoded", "tokio", - "tokio-native-tls", "tokio-rustls", "tokio-util 0.7.8", "tower-service", @@ -3841,15 +3738,6 @@ dependencies = [ "winapi-util", ] -[[package]] -name = "schannel" -version = "0.1.22" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c3733bf4cf7ea0880754e19cb5a462007c4a8c1914bff372ccc95b464f1df88" -dependencies = [ - "windows-sys 0.48.0", -] - [[package]] name = "scopeguard" version = "1.2.0" @@ -3895,29 +3783,6 @@ dependencies = [ "zeroize", ] -[[package]] -name = "security-framework" -version = "2.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fc758eb7bffce5b308734e9b0c1468893cae9ff70ebf13e7090be8dcbcc83a8" -dependencies = [ - "bitflags 1.3.2", - "core-foundation", - "core-foundation-sys", - "libc", - "security-framework-sys", -] - -[[package]] -name = "security-framework-sys" -version = "2.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f51d0c0d83bec45f16480d0ce0058397a69e48fcdc52d1dc8855fb68acbd31a7" -dependencies = [ - "core-foundation-sys", - "libc", -] - [[package]] name = "semver" version = "0.9.0" @@ -4524,16 +4389,6 @@ dependencies = [ "syn 2.0.26", ] -[[package]] -name = "tokio-native-tls" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbae76ab933c85776efabc971569dd6119c580d8f5d448769dec1764bf796ef2" -dependencies = [ - "native-tls", - "tokio", -] - [[package]] name = "tokio-rustls" version = "0.24.1" diff --git a/zebra-utils/Cargo.toml b/zebra-utils/Cargo.toml index 194e543d151..5060fe81dc0 100644 --- a/zebra-utils/Cargo.toml +++ b/zebra-utils/Cargo.toml @@ -85,7 +85,8 @@ itertools = { version = "0.11.0", optional = true } # These crates are needed for the search-issue-refs binary regex = { version = "1.9.1", optional = true } -reqwest = { version = "0.11.18", optional = true } +# Avoid default openssl dependency to reduce the dependency tree and security alerts. +reqwest = { version = "0.11.18", default-features = false, features = ["rustls-tls"], optional = true } # These crates are needed for the zebra-checkpoints and search-issue-refs binaries tokio = { version = "1.29.1", features = ["full"], optional = true } From c3e7d749ede971e72665834bd5f4427c53248a3b Mon Sep 17 00:00:00 2001 From: teor Date: Sat, 22 Jul 2023 06:35:56 +1000 Subject: [PATCH 237/265] fix(log): Limit verbose RPC responses in logs (#7258) * Truncate verbose RPC responses in test logs * Downgrade verbose block template state log to debug --- zebra-chain/src/transaction/serialize.rs | 11 ++++- .../types/get_block_template.rs | 41 ++++++++++++++++++- zebra-state/src/service.rs | 2 +- .../get_block_template.rs | 10 ++++- 4 files changed, 60 insertions(+), 4 deletions(-) diff --git a/zebra-chain/src/transaction/serialize.rs b/zebra-chain/src/transaction/serialize.rs index e083921b77c..dd8a8c2e4c0 100644 --- a/zebra-chain/src/transaction/serialize.rs +++ b/zebra-chain/src/transaction/serialize.rs @@ -1023,8 +1023,17 @@ impl fmt::Display for SerializedTransaction { impl fmt::Debug for SerializedTransaction { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + // A transaction with a lot of transfers can be extremely long in logs. + let mut data_truncated = hex::encode(&self.bytes); + if data_truncated.len() > 1003 { + let end = data_truncated.len() - 500; + // Replace the middle bytes with "...", but leave 500 bytes on either side. + // The data is hex, so this replacement won't panic. + data_truncated.replace_range(500..=end, "..."); + } + f.debug_tuple("SerializedTransaction") - .field(&hex::encode(&self.bytes)) + .field(&data_truncated) .finish() } } diff --git a/zebra-rpc/src/methods/get_block_template_rpcs/types/get_block_template.rs b/zebra-rpc/src/methods/get_block_template_rpcs/types/get_block_template.rs index df85939e4ff..617b80080c2 100644 --- a/zebra-rpc/src/methods/get_block_template_rpcs/types/get_block_template.rs +++ b/zebra-rpc/src/methods/get_block_template_rpcs/types/get_block_template.rs @@ -1,6 +1,8 @@ //! The `GetBlockTempate` type is the output of the `getblocktemplate` RPC method in the //! default 'template' mode. See [`ProposalResponse`] for the output in 'proposal' mode. +use std::fmt; + use zebra_chain::{ amount, block::{ChainHistoryBlockTxAuthCommitmentHash, MAX_BLOCK_BYTES, ZCASH_BLOCK_VERSION}, @@ -34,7 +36,7 @@ pub use parameters::{GetBlockTemplateCapability, GetBlockTemplateRequestMode, Js pub use proposal::{proposal_block_from_template, ProposalResponse}; /// A serialized `getblocktemplate` RPC response in template mode. -#[derive(Clone, Debug, Eq, PartialEq, serde::Serialize, serde::Deserialize)] +#[derive(Clone, Eq, PartialEq, serde::Serialize, serde::Deserialize)] pub struct GetBlockTemplate { /// The getblocktemplate RPC capabilities supported by Zebra. /// @@ -167,6 +169,43 @@ pub struct GetBlockTemplate { pub submit_old: Option, } +impl fmt::Debug for GetBlockTemplate { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + // A block with a lot of transactions can be extremely long in logs. + let mut transactions_truncated = self.transactions.clone(); + if self.transactions.len() > 4 { + // Remove transaction 3 onwards, but leave the last transaction + let end = self.transactions.len() - 2; + transactions_truncated.splice(3..=end, Vec::new()); + } + + f.debug_struct("GetBlockTemplate") + .field("capabilities", &self.capabilities) + .field("version", &self.version) + .field("previous_block_hash", &self.previous_block_hash) + .field("block_commitments_hash", &self.block_commitments_hash) + .field("light_client_root_hash", &self.light_client_root_hash) + .field("final_sapling_root_hash", &self.final_sapling_root_hash) + .field("default_roots", &self.default_roots) + .field("transaction_count", &self.transactions.len()) + .field("transactions", &transactions_truncated) + .field("coinbase_txn", &self.coinbase_txn) + .field("long_poll_id", &self.long_poll_id) + .field("target", &self.target) + .field("min_time", &self.min_time) + .field("mutable", &self.mutable) + .field("nonce_range", &self.nonce_range) + .field("sigop_limit", &self.sigop_limit) + .field("size_limit", &self.size_limit) + .field("cur_time", &self.cur_time) + .field("bits", &self.bits) + .field("height", &self.height) + .field("max_time", &self.max_time) + .field("submit_old", &self.submit_old) + .finish() + } +} + impl GetBlockTemplate { /// Returns a `Vec` of capabilities supported by the `getblocktemplate` RPC pub fn capabilities() -> Vec { diff --git a/zebra-state/src/service.rs b/zebra-state/src/service.rs index d2a8eb237a0..5340ef14bc0 100644 --- a/zebra-state/src/service.rs +++ b/zebra-state/src/service.rs @@ -1751,7 +1751,7 @@ impl Service for ReadStateService { tokio::task::spawn_blocking(move || { span.in_scope(move || { - tracing::info!("attempting to validate and commit block proposal onto a cloned non-finalized state"); + tracing::debug!("attempting to validate and commit block proposal onto a cloned non-finalized state"); let mut latest_non_finalized_state = state.latest_non_finalized_state(); // The previous block of a valid proposal must be on the best chain tip. diff --git a/zebrad/tests/common/get_block_template_rpcs/get_block_template.rs b/zebrad/tests/common/get_block_template_rpcs/get_block_template.rs index 81a8ad4946c..e6bd3d3d9c4 100644 --- a/zebrad/tests/common/get_block_template_rpcs/get_block_template.rs +++ b/zebrad/tests/common/get_block_template_rpcs/get_block_template.rs @@ -106,7 +106,15 @@ pub(crate) async fn run() -> Result<()> { .await?; let is_response_success = getblocktemplate_response.status().is_success(); - let response_text = getblocktemplate_response.text().await?; + + let mut response_text = getblocktemplate_response.text().await?; + // This string can be extremely long in logs. + if response_text.len() > 1003 { + let end = response_text.len() - 500; + // Replace the middle bytes with "...", but leave 500 bytes on either side. + // The response text is ascii, so this replacement won't panic. + response_text.replace_range(500..=end, "..."); + } tracing::info!( response_text, From 7ce5b8b1150e6d060951686a6a17a78d7f9dd93b Mon Sep 17 00:00:00 2001 From: Alfredo Garcia Date: Fri, 21 Jul 2023 19:34:43 -0300 Subject: [PATCH 238/265] fix(feature): elasticsearch for the Mainnet (#7268) * differenciate testnet vs mainnet bulks * reduce the number of bulk blocks to 24 in mainnet --- zebra-state/src/service/finalized_state.rs | 29 ++++++++++++++++++---- 1 file changed, 24 insertions(+), 5 deletions(-) diff --git a/zebra-state/src/service/finalized_state.rs b/zebra-state/src/service/finalized_state.rs index 74ae8dd54ba..1bd53054f69 100644 --- a/zebra-state/src/service/finalized_state.rs +++ b/zebra-state/src/service/finalized_state.rs @@ -373,12 +373,31 @@ impl FinalizedState { let block_time = block.header.time.timestamp(); let local_time = chrono::Utc::now().timestamp(); - const AWAY_FROM_TIP_BULK_SIZE: usize = 800; + // Mainnet bulk size is small enough to avoid the elasticsearch 100mb content + // length limitation. MAX_BLOCK_BYTES = 2MB but each block use around 4.1 MB of JSON. + // Each block count as 2 as we send them with a operation/header line. A value of 48 + // is 24 blocks. + const MAINNET_AWAY_FROM_TIP_BULK_SIZE: usize = 48; + + // Testnet bulk size is larger as blocks are generally smaller in the testnet. + // A value of 800 is 400 blocks as we are not counting the operation line. + const TESTNET_AWAY_FROM_TIP_BULK_SIZE: usize = 800; + + // The number of blocks the bulk will have when we are in sync. + // A value of 2 means only 1 block as we want to insert them as soon as we get + // them for a real time experience. This is the same for mainnet and testnet. const CLOSE_TO_TIP_BULK_SIZE: usize = 2; + + // We consider in sync when the local time and the blockchain time difference is + // less than this number of seconds. const CLOSE_TO_TIP_SECONDS: i64 = 14400; // 4 hours - // If we are close to the tip index one block per bulk call. - let mut blocks_size_to_dump = AWAY_FROM_TIP_BULK_SIZE; + let mut blocks_size_to_dump = match self.network { + Network::Mainnet => MAINNET_AWAY_FROM_TIP_BULK_SIZE, + Network::Testnet => TESTNET_AWAY_FROM_TIP_BULK_SIZE, + }; + + // If we are close to the tip, index one block per bulk call. if local_time - block_time < CLOSE_TO_TIP_SECONDS { blocks_size_to_dump = CLOSE_TO_TIP_BULK_SIZE; } @@ -419,12 +438,12 @@ impl FinalizedState { let response_body = response .json::() .await - .expect("ES response parsing to a json_body should never fail"); + .expect("ES response parsing error. Maybe we are sending more than 100 mb of data (`http.max_content_length`)"); let errors = response_body["errors"].as_bool().unwrap_or(true); assert!(!errors, "{}", format!("ES error: {response_body}")); }); - // clean the block storage. + // Clean the block storage. self.elastic_blocks.clear(); } } From a6bdb480bb70c83501db246833ce165095a5d637 Mon Sep 17 00:00:00 2001 From: Arya Date: Fri, 21 Jul 2023 21:18:09 -0400 Subject: [PATCH 239/265] Release Zebra v1.1.0 (#7259) * Updates dependencies * Dedups bindgen, removes windows-sys from deny.toml * Updates checkpoints * chore: Release * Adds changelog draft * updates estimated_release_height * Updates changelog * Apply suggestions from code review Co-authored-by: teor --------- Co-authored-by: teor --- CHANGELOG.md | 35 ++++++++++- Cargo.lock | 24 +++---- book/src/user/docker.md | 2 +- book/src/user/install.md | 4 +- tower-batch-control/Cargo.toml | 2 +- tower-fallback/Cargo.toml | 2 +- zebra-chain/Cargo.toml | 4 +- zebra-consensus/Cargo.toml | 14 ++--- .../src/checkpoint/main-checkpoints.txt | 61 ++++++++++++++++++ .../src/checkpoint/test-checkpoints.txt | 63 +++++++++++++++++++ zebra-network/Cargo.toml | 4 +- zebra-node-services/Cargo.toml | 4 +- zebra-rpc/Cargo.toml | 14 ++--- zebra-script/Cargo.toml | 4 +- zebra-state/Cargo.toml | 6 +- zebra-test/Cargo.toml | 2 +- zebra-utils/Cargo.toml | 8 +-- zebrad/Cargo.toml | 16 ++--- zebrad/src/components/sync/end_of_support.rs | 2 +- 19 files changed, 214 insertions(+), 57 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 708809ac43f..16bf41b2694 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,13 +5,46 @@ All notable changes to Zebra are documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org). +## [Zebra 1.1.0](https://github.com/ZcashFoundation/zebra/releases/tag/v1.1.0) - 2023-07-18 -## [Zebra 1.0.1](https://github.com/ZcashFoundation/zebra/releases/tag/v1.0.1) - 2023-XX-XX + +This release adds new mempool metrics, fixes panics when cancelling tasks on shutdown, detects subcommand name typos on the command-line, and improves the usability of Zebra's Docker images (particularly for mining). ### Breaking Changes - Zebra now detects subcommand name typos on the command-line. If you want to give Zebra a list of tracing filters, use `zebrad start --filters debug,...` ([#7056](https://github.com/ZcashFoundation/zebra/pull/7056)) +### Security + +- Avoid initiating outbound handshakes with IPs for which Zebra already has an active peer ([#7029](https://github.com/ZcashFoundation/zebra/pull/7029)) +- Rate-limit inbound connections per IP ([#7041](https://github.com/ZcashFoundation/zebra/pull/7041)) + +### Added + +- Metrics tracking mempool actions and size bucketed by weight ([#7019](https://github.com/ZcashFoundation/zebra/pull/7019)) by @str4d +- Legacy state format compatibility layer and version bumps for ECC dependencies to match `zcashd` 5.6.0 ([#7053](https://github.com/ZcashFoundation/zebra/pull/7053)) +- Framework for upcoming in-place database format upgrades ([#7031](https://github.com/ZcashFoundation/zebra/pull/7031)) + + +### Changed + +- Deduplicate note commitment trees in non-finalized state ([#7218](https://github.com/ZcashFoundation/zebra/pull/7218), [#7239](https://github.com/ZcashFoundation/zebra/pull/7239)) + +### Fixed + +- Enable miners running Zebra with Docker to set their address for mining rewards ([#7178](https://github.com/ZcashFoundation/zebra/pull/7178)) +- Use default RPC port when running Zebra with Docker ([#7177](https://github.com/ZcashFoundation/zebra/pull/7177), [#7162](https://github.com/ZcashFoundation/zebra/pull/7162)) +- Stop panicking on async task cancellation on shutdown in network and state futures ([#7219](https://github.com/ZcashFoundation/zebra/pull/7219)) +- Remove redundant startup logs, fix progress bar number, order, and wording ([#7087](https://github.com/ZcashFoundation/zebra/pull/7087)) +- Organize Docker `ENV` and `ARG` values based on their usage ([#7200](https://github.com/ZcashFoundation/zebra/pull/7200)) +- Avoid blocking threads by awaiting proof verification results from rayon in async context ([#6887](https://github.com/ZcashFoundation/zebra/pull/6887)) + + +### Contributors + +Thank you to everyone who contributed to this release, we couldn't make Zebra without you: +@arya2, @gustavovalverde, @mpguerra, @oxarbitrage, @str4d, @teor2345 and @upbqdn + ## [Zebra 1.0.1](https://github.com/ZcashFoundation/zebra/releases/tag/v1.0.1) - 2023-07-03 diff --git a/Cargo.lock b/Cargo.lock index 4d02a6fcd83..cad36d5fee0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4559,7 +4559,7 @@ dependencies = [ [[package]] name = "tower-batch-control" -version = "0.2.41-beta.3" +version = "0.2.41-beta.4" dependencies = [ "color-eyre", "ed25519-zebra", @@ -4583,7 +4583,7 @@ dependencies = [ [[package]] name = "tower-fallback" -version = "0.2.41-beta.3" +version = "0.2.41-beta.4" dependencies = [ "futures-core", "pin-project", @@ -5486,7 +5486,7 @@ dependencies = [ [[package]] name = "zebra-chain" -version = "1.0.0-beta.27" +version = "1.0.0-beta.28" dependencies = [ "bitflags 2.3.3", "bitflags-serde-legacy", @@ -5547,7 +5547,7 @@ dependencies = [ [[package]] name = "zebra-consensus" -version = "1.0.0-beta.27" +version = "1.0.0-beta.28" dependencies = [ "bellman", "blake2b_simd", @@ -5592,7 +5592,7 @@ dependencies = [ [[package]] name = "zebra-network" -version = "1.0.0-beta.27" +version = "1.0.0-beta.28" dependencies = [ "bitflags 2.3.3", "byteorder", @@ -5633,7 +5633,7 @@ dependencies = [ [[package]] name = "zebra-node-services" -version = "1.0.0-beta.27" +version = "1.0.0-beta.28" dependencies = [ "color-eyre", "jsonrpc-core", @@ -5645,7 +5645,7 @@ dependencies = [ [[package]] name = "zebra-rpc" -version = "1.0.0-beta.27" +version = "1.0.0-beta.28" dependencies = [ "chrono", "futures", @@ -5677,7 +5677,7 @@ dependencies = [ [[package]] name = "zebra-script" -version = "1.0.0-beta.27" +version = "1.0.0-beta.28" dependencies = [ "displaydoc", "hex", @@ -5690,7 +5690,7 @@ dependencies = [ [[package]] name = "zebra-state" -version = "1.0.0-beta.27" +version = "1.0.0-beta.28" dependencies = [ "bincode", "chrono", @@ -5732,7 +5732,7 @@ dependencies = [ [[package]] name = "zebra-test" -version = "1.0.0-beta.27" +version = "1.0.0-beta.28" dependencies = [ "color-eyre", "futures", @@ -5759,7 +5759,7 @@ dependencies = [ [[package]] name = "zebra-utils" -version = "1.0.0-beta.27" +version = "1.0.0-beta.28" dependencies = [ "color-eyre", "hex", @@ -5780,7 +5780,7 @@ dependencies = [ [[package]] name = "zebrad" -version = "1.0.1" +version = "1.1.0" dependencies = [ "abscissa_core", "atty", diff --git a/book/src/user/docker.md b/book/src/user/docker.md index 462d5745d8a..525c6a12d27 100644 --- a/book/src/user/docker.md +++ b/book/src/user/docker.md @@ -17,7 +17,7 @@ docker run --detach zfnd/zebra:latest ### Build it locally ```shell -git clone --depth 1 --branch v1.0.1 https://github.com/ZcashFoundation/zebra.git +git clone --depth 1 --branch v1.1.0 https://github.com/ZcashFoundation/zebra.git docker build --file docker/Dockerfile --target runtime --tag zebra:local . docker run --detach zebra:local ``` diff --git a/book/src/user/install.md b/book/src/user/install.md index b70eed17f2b..6bd65ad7a7e 100644 --- a/book/src/user/install.md +++ b/book/src/user/install.md @@ -20,7 +20,7 @@ To compile Zebra directly from GitHub, or from a GitHub release source archive: ```sh git clone https://github.com/ZcashFoundation/zebra.git cd zebra -git checkout v1.0.1 +git checkout v1.1.0 ``` 3. Build and Run `zebrad` @@ -33,7 +33,7 @@ target/release/zebrad start ### Compiling from git using cargo install ```sh -cargo install --git https://github.com/ZcashFoundation/zebra --tag v1.0.1 zebrad +cargo install --git https://github.com/ZcashFoundation/zebra --tag v1.1.0 zebrad ``` ### Compiling on ARM diff --git a/tower-batch-control/Cargo.toml b/tower-batch-control/Cargo.toml index 1f9e89df2c9..ad29bbf4819 100644 --- a/tower-batch-control/Cargo.toml +++ b/tower-batch-control/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "tower-batch-control" -version = "0.2.41-beta.3" +version = "0.2.41-beta.4" authors = ["Zcash Foundation ", "Tower Maintainers "] description = "Tower middleware for batch request processing" # # Legal diff --git a/tower-fallback/Cargo.toml b/tower-fallback/Cargo.toml index e7d9a58c87e..7fcc4387dcd 100644 --- a/tower-fallback/Cargo.toml +++ b/tower-fallback/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "tower-fallback" -version = "0.2.41-beta.3" +version = "0.2.41-beta.4" authors = ["Zcash Foundation "] description = "A Tower service combinator that sends requests to a first service, then retries processing on a second fallback service if the first service errors." license = "MIT OR Apache-2.0" diff --git a/zebra-chain/Cargo.toml b/zebra-chain/Cargo.toml index bd597eae51f..46208f5eb4b 100644 --- a/zebra-chain/Cargo.toml +++ b/zebra-chain/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-chain" -version = "1.0.0-beta.27" +version = "1.0.0-beta.28" authors = ["Zcash Foundation "] description = "Core Zcash data structures" license = "MIT OR Apache-2.0" @@ -126,7 +126,7 @@ proptest-derive = { version = "0.3.0", optional = true } rand = { version = "0.8.5", optional = true } rand_chacha = { version = "0.3.1", optional = true } -zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.27", optional = true } +zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.28", optional = true } [dev-dependencies] # Benchmarks diff --git a/zebra-consensus/Cargo.toml b/zebra-consensus/Cargo.toml index 35fabab2f1b..901e88058ad 100644 --- a/zebra-consensus/Cargo.toml +++ b/zebra-consensus/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-consensus" -version = "1.0.0-beta.27" +version = "1.0.0-beta.28" authors = ["Zcash Foundation "] description = "Implementation of Zcash consensus checks" license = "MIT OR Apache-2.0" @@ -62,13 +62,13 @@ orchard = "0.5.0" zcash_proofs = { version = "0.12.1", features = ["local-prover", "multicore", "download-params"] } -tower-fallback = { path = "../tower-fallback/", version = "0.2.41-beta.3" } -tower-batch-control = { path = "../tower-batch-control/", version = "0.2.41-beta.3" } +tower-fallback = { path = "../tower-fallback/", version = "0.2.41-beta.4" } +tower-batch-control = { path = "../tower-batch-control/", version = "0.2.41-beta.4" } -zebra-script = { path = "../zebra-script", version = "1.0.0-beta.27" } -zebra-state = { path = "../zebra-state", version = "1.0.0-beta.27" } -zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.27" } -zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.27" } +zebra-script = { path = "../zebra-script", version = "1.0.0-beta.28" } +zebra-state = { path = "../zebra-state", version = "1.0.0-beta.28" } +zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.28" } +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.28" } # prod feature progress-bar howudoin = { version = "0.1.2", optional = true } diff --git a/zebra-consensus/src/checkpoint/main-checkpoints.txt b/zebra-consensus/src/checkpoint/main-checkpoints.txt index 89a9f940623..dd8e688feb3 100644 --- a/zebra-consensus/src/checkpoint/main-checkpoints.txt +++ b/zebra-consensus/src/checkpoint/main-checkpoints.txt @@ -10951,3 +10951,64 @@ 2138018 000000000006e84370babab79c13faa64113afb4386a92375983d3a7987619ca 2138392 00000000010a100e0d11eabd1692eac1cb93989d2cd03b355e5b0240f77cf978 2138792 00000000001c6417d7df1be185d6b0ec0657703eebb22e68a418a565da99dbad +2139192 00000000009943cee98c43f5d391769eff1a1f89f4b2aa8f61f1ca0b7a1a035e +2139592 0000000000598a978f9fb352a6fa920de69260096e159d68dc8d40aff3c17e92 +2139960 0000000000cc1ccb9f4b4f48a5bb8598e7205db3042f98b37eb02e3b37e6fc6b +2140206 00000000008d6f467aa6c002fe0a64c9a4fbf92421de221690f0cb653b34f646 +2140557 000000000116303502e9e57339e1a9366ad41dc8b2856ee93d0ba9325acbacea +2140935 00000000012a549d767f9baadb9d5fbc32b5731e5f17984fae0f710aa2c07e4d +2141335 0000000000aeb392feef141bdc08e20dd7e3b240e48d5a71444747f1380c1590 +2141659 000000000069f06cde1416523d991d82103ec685d6e83a523511c481488ee1a3 +2142040 000000000111b9d1034a10f455c7b8fa23c513b137978d9277c3cb228aa1b87c +2142388 00000000012dff7fc274b000fb33ce3bebeb804fbafe2d3ac192699a07f89866 +2142701 0000000000a3a77419c416ddca708cadea1f8f824b1471b9ae15d82bef3221e2 +2143001 0000000000b75101aa7213929ebb6c1cdcea847b7b6fbc5cf80f08819e921839 +2143342 000000000177e02b211712c21ee84c4318643568101ec549770a93bc39188e4c +2143638 00000000001e80d2127c27a6e9943f6289dfb63ff38c870457026af6bb60bf97 +2143939 0000000000d42517546ae61193b197dda0eed95779d905608b6d8b9c5312d3ff +2144245 000000000142361c95ae61438f7184aa13797381d4b55b40e53ea1b733fc3c61 +2144564 00000000010a68a12792eea5a16ef54d758abe202b3e5675116f799416f4aa94 +2144872 0000000000d16090f1782e5068e54f8090de1c618067461b875a9ed634599678 +2145166 0000000001eff76b65bee186f9ee3a8334a8aaddc26592f1adc13dcb54fc4dd5 +2145479 000000000130119c71832a87a08b20ab3ebe14c27d45fa4771e03741804b0ca3 +2145753 000000000030dc3f97729f24a349776dd0fb3d76791daa2546008018bef34d4a +2146049 0000000000bc378dd8cb00ffc2ed5903334f139435d4836e00b433662f4f8745 +2146356 0000000001cf420569c9dc8e1d53043245fe041fc03a42ebb7029cd5d96ccc1f +2146681 000000000184c12fe3bfe8f2a9e07f32ef069d1ccd0e1e37e593e11defcc9f81 +2146962 00000000011ef518062c4bf693fbbc5a9a8cf14070dadf0e034455c950d2cbc4 +2147278 0000000001433728c12320b61be87dbf5064ce53c5d0c2ec0d88849606ac791d +2147579 0000000000c6baafb5f6d4505f9df1531695a4ef162627fb74dfba790f889bf1 +2147887 0000000001551ae09945147c25bae678d0ba160e3fdd5526dab400c6e7f15d0a +2148184 000000000016e7048f77880ee061ce62210461d0f16d9bacb977a313bb7a6a79 +2148497 00000000018e77540b791666e462d03fe2092064730e13e5dc4412cfaf8054a0 +2148798 000000000015ca4de3c9f8eee7a5db5e5743039ddcc794a9ab63898ccdac4eac +2149107 0000000000b95386a0dcf5ea3378ea4a71a8d47504dec77525fc30abc372329e +2149402 000000000129f21442d711334026047792418d7552ac15446f804e65e503520c +2149682 00000000011366ca792e91c4c568e3d8739d97c1a385ef6bfed90a477a9622d6 +2149994 00000000019ebc82fa134540b12d44baf296a7de847012aff9f6d7984dd59f8e +2150295 0000000000f338ec2ee075c53165dd3c2075340053f29117ce560858cbcb89ea +2150628 000000000104a1912842bac89d50faeeb95d6381085365a98a74a6ffc5df5916 +2151028 00000000018b4d5632fe0fecd4b39df78dfd5c0baa9718b8540d8e20a7ac5a44 +2151428 00000000006dad2c3e9e63da48eb994eeea239c2e2ead76129d9092ae0da8611 +2151828 0000000000e482a079287b9a742fccbd8fd15a0cdde94207214a2923e1717030 +2152228 00000000018e17d2ad8f040334355580cf8d94217b50c2d3a0a32223fe2455eb +2152628 00000000010d95f51d45c376c436fc478d15b5f19043ae9b877554edd6e955ae +2153028 0000000001ed3f035ff96ff981bd1baf7105e8ceac2ccbb08d484ce09fea84f2 +2153428 000000000056e97ed55b0ecad057d20e5d1945655dbfa945352efc68080fb106 +2153828 00000000004a6c97362e2022c443ff1676e7f385834eed887e6dea4a967f5f9c +2154228 00000000004e30690838777e4bdd9594c07c9859d09c831e182ac41c803ba4dd +2154628 000000000158f8dd48314777079df1f7f4b40f03c80bc7ff79325e3ec9795c7d +2155028 00000000006a566ab3d31064dbbacaa7c4f9d0cde9a2151379ad8eb82b5c22b7 +2155428 00000000019d5b3b490aad9d696f73ce6e2c9dcc5aaa0f59d895037f0c42464c +2155828 00000000013fda74b17fe46eb349312cc8641e4e4cc8e82c7461a0c5dde9942f +2156228 00000000002a5dcecbc9fc649a00bd369b3614a2966e31dd8f6f0486d5423f95 +2156628 000000000063e00e765733cbf1fa8f91b3918704f8f7f5f2164e7c4db93db0ab +2157028 00000000011ad7748b6ad9f296bebc2fd5d5fd2471c3957538a05e12442e5220 +2157428 000000000229fb466e7d68465f58a0a107291164f98c4a8aa473b61b27bc58bb +2157828 000000000029febeb9e9ff7308a46dc08b7cc6de7b8525f6e71b40611b7eb8a7 +2158228 000000000132364ef3ce63c688d7c89bd2d81f9db403f688013783d231ec77db +2158628 0000000000747ce4c907c89887de3084bd7987bf82da755b99c27ea5003591d8 +2159028 00000000009f9607df64f4a64f09405c34ed470683711ddad307fca1fcbfe242 +2159428 0000000000024d54b83f05fd6a7a4a61abab1b1491560e4175b6590b99fb9792 +2159792 000000000022521614534df6b3640b8ee5e83481223f17dc98b16eb209a51aa1 +2160160 000000000160c7dc4b42f0b2df00563adc885f9a540912f25251a8de8cdda6a8 diff --git a/zebra-consensus/src/checkpoint/test-checkpoints.txt b/zebra-consensus/src/checkpoint/test-checkpoints.txt index f6fe84e09e1..be02a5648a7 100644 --- a/zebra-consensus/src/checkpoint/test-checkpoints.txt +++ b/zebra-consensus/src/checkpoint/test-checkpoints.txt @@ -6010,3 +6010,66 @@ 2403600 0067d8e71caef5f552904c15d8560500b810c2cce0928b630e8ac6382055ef89 2404000 000a309d9d8ec47ed3be870aea4a03dd75822e88790229bc9bee09de86e0ebd0 2404400 001d29fbe998d3039b6967f944f1b50e10ce70421d60d0ed87f39cf7d5a8e350 +2404800 007574d68dac9c4f91607ae1077b05c0394e9da87cad82d8efc920f752d947c0 +2405200 00184c4242fbcd575979107f6a0f9040d1873fbc48d606f6759da52c3ced87d3 +2405600 001b392f4bef65085a04ca389975b3ed42fae965d933152f1248ca1ca589e6c3 +2406000 001e4aba075ba633b8ad7f79704ae0ec486e4861482e86ce1bcd791f700ac6e9 +2406400 0018867cc75db62da321c0011d28ee3892a2b7fb55f4f88740b3a5a83cbc2e10 +2406800 00215d1a7e594637abff20c50db5e72914e2008ffe695e99b27a634c582f7457 +2407200 001f9f6d16eb0826cae423c7f5b1c9be7cab97f1f43156f9c60600f21c63106a +2407600 001245fc5f829bec3db26630ad7c9a51a1fd99669035f66ab41e7790fe7b1b9d +2408000 001b893907719478f6ae2fa9a673a6632942aeffa95b98ed20c477ada5396cb7 +2408400 00113b4a16d8e076f3dfe6250964cacc6fcdec235a685591a7264abdf92a8c36 +2408800 002fbc1b1325182ef8419bc3118e6c79d1dbed62ea665b3c7900656aba4e0a35 +2409200 001f0a8c467e598e8032c535b974c273eea839e8078c2b154724ddd132fd77af +2409600 0013cb11e27ef00c30b63509d5421f4d9e3fcae15bb58ff59cb6deddf50312dc +2410000 003562055f67b993af8e15649617dfa5ac9596ebe60f3aef82cefe757b972401 +2410400 001aa605b3120d9f312e118ff7fd4262b62e112fec65c5c9c753535223a98ff3 +2410800 0099af7fdca8353addc8baadcbde264b15660e0db61f52aaa0e7793160ead9da +2411200 000635de7abcb55bb2b93c1ab04ccb802b17d5c88f6a6efdd3480e389de9b5b2 +2411600 00113848f9b8797931dbf481c481cfbb24360575bf2a49805823cef865634916 +2412000 000f6742293ff5ef97bb9be28647500afbae23fa86896326a7393b2d6d45b310 +2412400 0013e29c30e96db9c2309e0dd1bcae2bd3fe5e8bbea455c1bcb0a7189bd3e431 +2412800 001e404f3ef35c06248169aa4f0c4a0acfea14545f064fbb8c148f6cd0884c0e +2413200 000c83c13e110c71eb729776deae6fc3bf8641dbd32cd3565ea0e386258d3833 +2413600 0000246eb0b7231fa7733128ebda3035802926787ffa37f8875ecce4de4760fb +2414000 000e73156de990023f9abedea44aa6b3fe773da8dd2a748540aaaac189f7b4c6 +2414400 000af497ba3897498f01e19375373ea445567d79603eb1c31e8087672817fb23 +2414800 001d3546ec3934694333b807ddc094ce788f123007e118098f97516758855a64 +2415200 0024b3d2b66a83dc985f478043ea92f28d4fb2e282d058b709191603c50fb0a2 +2415600 0011f73490bd8e9bc0dec7b71ae9763b51bde03808bd7c12f672ca280ccefca0 +2416000 0009a7c7a04dd18e1e9f9b3547b90051d2ff2ca49deb8501e61522d59926f4d5 +2416400 000ff62d148ece31ac95244231e0b2afc6594064b42821a6b7a6dd7f44012a67 +2416800 0001f2f6e77ddfd2b26f6effd5e69b395dbfb444375336c2fa33b8171470cd92 +2417200 000a1eb94898508f9240bb084a15d17dd339a8dc645a0f02140a760a96e348a1 +2417600 0009122adddb8203272b43f810e0b91ddee7b50023a4ad4ef3bec6cd6e8b3b82 +2418000 00b572de4fc8f36553fedd6f8052abf9ef3e23379672585dba56e51ab0d28943 +2418400 000e1452a59a48d05e696ddc6c2c855b970ad8b75d6ae27a10b89350426dc2bf +2418800 001327fa234866e2a63c5303ff72a9c2ae1a7cb62681d0418c28f308c61bd304 +2419200 0016c2fda05b563490258c503c6e77b7bb76a51d637968f1c8f768709a55f6ec +2419600 000453e2a08768d6eb42fc12e194206ef9b319e5b05aa7901ea0c26241860009 +2420000 00036ebc9345e3404b47395118cee2f30a3b1e526e2ac1309675f3a04409fd16 +2420400 00095db89b9b48847b864aa2235a864b98f5745f0c386ebcd4dc62213ff4a62d +2420800 002250914adffa2990ab8065b7a83b3e209792b40173d68ac77f1db74b87ab61 +2421200 0018a36332d5413807371cc524205aa52f3abef4497215a4a8cb554f61418ee6 +2421600 000fb66dfdde35a7b270f6059fe2d6e37839ad52518721bf26fc2c9751cd463b +2422000 000b3f76cfd75aecfa369de06ffc5cc3887dacb3b52d08401dc716864de010bb +2422400 0015215a0dbb7ff3495a358120d392955a78d228213db9a6c712517b0580d332 +2422800 000831a8b66f9b6c2843332d6a902e8d63e1fa5db82041fd333ddae0f6698b66 +2423200 00028a73f946360c3f666e6136976e8922ab3325476318eae443e5bb67089cdc +2423600 0027da308c8c2cc1b7d119e58a2667547d0ee303cfe2f79cbdf8a9dda929d240 +2424000 00061c81c151c285e03fe2858624ccf0a5b9d1978a34350874e817361fdfcdac +2424400 0017a3a30f6590348b91c38a6ec391de9c13cb165bac6df0a2be1d31d8ce64b5 +2424800 000d5f96aa63c9a43c2f93a5abad4549bc4def6791deee9988959b9c8db98525 +2425200 0016128ab597f53bd775f80b5ffd21a92c941b246c449bd224bcb5bbb43eb1e2 +2425600 0001f32b316f38b44651da105fe3f5cb7ac6f666418cc2f8112815ac41846767 +2426000 001d221d7777f6fa2c8607a98b43ef3a58958a15508a6ca8acaa244e67d8f82f +2426400 0010bc73ac2042cb1eeee66bdecda69d609e033416765efa70150337f91365f4 +2426800 000b9219464787ec2129f70fb3abaed943cf7684e182890789992cb492cfe7ae +2427200 00104b3e83562c2f114e01b277c93084d62794680208e4da6ebd6ea79d82f5fe +2427600 0119c00e0ddf4c0f69c01cd7120664128648bd39e0ed589ffd1605406d46d633 +2428000 000fe605457e5b68313b1822a1b3f76eca5f8f3044acde6870afe1e1be25504a +2428400 0019de074ee2032cece66e85144729274c7cf16a81dc89176ddc9f4617dac926 +2428800 00046f19373ffa9c354dc7a971cc1857495fb547ea808938cf93aec57f6d2120 +2429200 00087c7ee96c3358d4793e4c64b0b86b9c092b8e162192c7e15f2fd73ebb4d50 +2429600 0006481c064325f6890cf9721cf2bc768d37e177dca971d7d80c283e78d150fe diff --git a/zebra-network/Cargo.toml b/zebra-network/Cargo.toml index a11f520ab3f..ecc284b7c14 100644 --- a/zebra-network/Cargo.toml +++ b/zebra-network/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-network" -version = "1.0.0-beta.27" +version = "1.0.0-beta.28" authors = ["Zcash Foundation ", "Tower Maintainers "] description = "Networking code for Zebra" # # Legal @@ -83,7 +83,7 @@ howudoin = { version = "0.1.2", optional = true } proptest = { version = "1.2.0", optional = true } proptest-derive = { version = "0.3.0", optional = true } -zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.27", features = ["async-error"] } +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.28", features = ["async-error"] } [dev-dependencies] proptest = "1.2.0" diff --git a/zebra-node-services/Cargo.toml b/zebra-node-services/Cargo.toml index 694636b959c..06e4aa9d81d 100644 --- a/zebra-node-services/Cargo.toml +++ b/zebra-node-services/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-node-services" -version = "1.0.0-beta.27" +version = "1.0.0-beta.28" authors = ["Zcash Foundation "] description = "The interfaces of some Zebra node services" license = "MIT OR Apache-2.0" @@ -35,7 +35,7 @@ rpc-client = [ ] [dependencies] -zebra-chain = { path = "../zebra-chain" , version = "1.0.0-beta.27" } +zebra-chain = { path = "../zebra-chain" , version = "1.0.0-beta.28" } # Optional dependencies diff --git a/zebra-rpc/Cargo.toml b/zebra-rpc/Cargo.toml index e9a8f43055d..aa8c7779682 100644 --- a/zebra-rpc/Cargo.toml +++ b/zebra-rpc/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-rpc" -version = "1.0.0-beta.27" +version = "1.0.0-beta.28" authors = ["Zcash Foundation "] description = "A Zebra JSON Remote Procedure Call (JSON-RPC) interface" license = "MIT OR Apache-2.0" @@ -70,12 +70,12 @@ zcash_address = { version = "0.3.0", optional = true } # Test-only feature proptest-impl proptest = { version = "1.2.0", optional = true } -zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.27", features = ["json-conversion"] } -zebra-consensus = { path = "../zebra-consensus", version = "1.0.0-beta.27" } -zebra-network = { path = "../zebra-network", version = "1.0.0-beta.27" } -zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.27" } -zebra-script = { path = "../zebra-script", version = "1.0.0-beta.27" } -zebra-state = { path = "../zebra-state", version = "1.0.0-beta.27" } +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.28", features = ["json-conversion"] } +zebra-consensus = { path = "../zebra-consensus", version = "1.0.0-beta.28" } +zebra-network = { path = "../zebra-network", version = "1.0.0-beta.28" } +zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.28" } +zebra-script = { path = "../zebra-script", version = "1.0.0-beta.28" } +zebra-state = { path = "../zebra-state", version = "1.0.0-beta.28" } [dev-dependencies] insta = { version = "1.31.0", features = ["redactions", "json", "ron"] } diff --git a/zebra-script/Cargo.toml b/zebra-script/Cargo.toml index 747c8c0dadb..e80a3fcb7b4 100644 --- a/zebra-script/Cargo.toml +++ b/zebra-script/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-script" -version = "1.0.0-beta.27" +version = "1.0.0-beta.28" authors = ["Zcash Foundation "] description = "Zebra script verification wrapping zcashd's zcash_script library" license = "MIT OR Apache-2.0" @@ -17,7 +17,7 @@ categories = ["api-bindings", "cryptography::cryptocurrencies"] [dependencies] zcash_script = "0.1.13" -zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.27" } +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.28" } thiserror = "1.0.43" displaydoc = "0.2.4" diff --git a/zebra-state/Cargo.toml b/zebra-state/Cargo.toml index 187a2b45eb9..77307cbd58b 100644 --- a/zebra-state/Cargo.toml +++ b/zebra-state/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-state" -version = "1.0.0-beta.27" +version = "1.0.0-beta.28" authors = ["Zcash Foundation "] description = "State contextual verification and storage code for Zebra" license = "MIT OR Apache-2.0" @@ -71,13 +71,13 @@ tracing = "0.1.37" elasticsearch = { version = "8.5.0-alpha.1", default-features = false, features = ["rustls-tls"], optional = true } serde_json = { version = "1.0.100", package = "serde_json", optional = true } -zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.27", features = ["async-error"] } +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.28", features = ["async-error"] } # prod feature progress-bar howudoin = { version = "0.1.2", optional = true } # test feature proptest-impl -zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.27", optional = true } +zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.28", optional = true } proptest = { version = "1.2.0", optional = true } proptest-derive = { version = "0.3.0", optional = true } diff --git a/zebra-test/Cargo.toml b/zebra-test/Cargo.toml index 43676ae95ca..357d598ec97 100644 --- a/zebra-test/Cargo.toml +++ b/zebra-test/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-test" -version = "1.0.0-beta.27" +version = "1.0.0-beta.28" authors = ["Zcash Foundation "] description = "Test harnesses and test vectors for Zebra" license = "MIT OR Apache-2.0" diff --git a/zebra-utils/Cargo.toml b/zebra-utils/Cargo.toml index 5060fe81dc0..fc962ec2e5c 100644 --- a/zebra-utils/Cargo.toml +++ b/zebra-utils/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-utils" -version = "1.0.0-beta.27" +version = "1.0.0-beta.28" authors = ["Zcash Foundation "] description = "Developer tools for Zebra maintenance and testing" license = "MIT OR Apache-2.0" @@ -74,11 +74,11 @@ tracing-error = "0.2.0" tracing-subscriber = "0.3.17" thiserror = "1.0.43" -zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.27" } -zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.27" } +zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.28" } +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.28" } # These crates are needed for the block-template-to-proposal binary -zebra-rpc = { path = "../zebra-rpc", version = "1.0.0-beta.27", optional = true } +zebra-rpc = { path = "../zebra-rpc", version = "1.0.0-beta.28", optional = true } # These crates are needed for the zebra-checkpoints binary itertools = { version = "0.11.0", optional = true } diff --git a/zebrad/Cargo.toml b/zebrad/Cargo.toml index 381e9dfc66b..351fd39fc28 100644 --- a/zebrad/Cargo.toml +++ b/zebrad/Cargo.toml @@ -1,7 +1,7 @@ [package] # Crate metadata name = "zebrad" -version = "1.0.1" +version = "1.1.0" authors = ["Zcash Foundation "] description = "The Zcash Foundation's independent, consensus-compatible implementation of a Zcash node" license = "MIT OR Apache-2.0" @@ -142,15 +142,15 @@ test_sync_past_mandatory_checkpoint_mainnet = [] test_sync_past_mandatory_checkpoint_testnet = [] [dependencies] -zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.27" } -zebra-consensus = { path = "../zebra-consensus", version = "1.0.0-beta.27" } -zebra-network = { path = "../zebra-network", version = "1.0.0-beta.27" } -zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.27" } -zebra-rpc = { path = "../zebra-rpc", version = "1.0.0-beta.27" } -zebra-state = { path = "../zebra-state", version = "1.0.0-beta.27" } +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.28" } +zebra-consensus = { path = "../zebra-consensus", version = "1.0.0-beta.28" } +zebra-network = { path = "../zebra-network", version = "1.0.0-beta.28" } +zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.28" } +zebra-rpc = { path = "../zebra-rpc", version = "1.0.0-beta.28" } +zebra-state = { path = "../zebra-state", version = "1.0.0-beta.28" } # Required for crates.io publishing, but it's only used in tests -zebra-utils = { path = "../zebra-utils", version = "1.0.0-beta.27", optional = true } +zebra-utils = { path = "../zebra-utils", version = "1.0.0-beta.28", optional = true } abscissa_core = "0.7.0" clap = { version = "4.3.15", features = ["cargo"] } diff --git a/zebrad/src/components/sync/end_of_support.rs b/zebrad/src/components/sync/end_of_support.rs index ead7b34e03b..cd4a0f35660 100644 --- a/zebrad/src/components/sync/end_of_support.rs +++ b/zebrad/src/components/sync/end_of_support.rs @@ -13,7 +13,7 @@ use zebra_chain::{ use crate::application::release_version; /// The estimated height that this release will be published. -pub const ESTIMATED_RELEASE_HEIGHT: u32 = 2_143_000; +pub const ESTIMATED_RELEASE_HEIGHT: u32 = 2_165_000; /// The maximum number of days after `ESTIMATED_RELEASE_HEIGHT` where a Zebra server will run /// without halting. From 7114cc03a8b2bc376e4a9afba157f0b5d19bf42e Mon Sep 17 00:00:00 2001 From: teor Date: Mon, 24 Jul 2023 21:28:03 +1000 Subject: [PATCH 240/265] change(deps): Use larger dependabot groups (#7247) * Use larger dependabot groups * Reduce number of open PRs --- .github/dependabot.yml | 109 +++++++++++++++++++---------------------- 1 file changed, 50 insertions(+), 59 deletions(-) diff --git a/.github/dependabot.yml b/.github/dependabot.yml index ad39a513f61..86bbd3eceec 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -7,8 +7,8 @@ updates: interval: weekly day: monday timezone: America/New_York - # Limit dependabot to 2 PRs per reviewer, but assume one reviewer is busy or away - open-pull-requests-limit: 8 + # Limit dependabot to 1 PR per reviewer + open-pull-requests-limit: 6 labels: - 'C-trivial' - 'A-rust' @@ -30,60 +30,45 @@ updates: crypto: patterns: - "bellman" - - "redjubjub" - - "reddsa" + # reddsa, redjubjub + - "red*" - "jubjub" - "group" - "bls12_381" - "blake*" - "secp256k1" - "sha2" - ed25519-zebra: - patterns: - - "ed25519*" - - "curve25519*" - - "x25519*" - tokio: + - "*25519*" + - "rand*" + async: patterns: - "tokio*" - "console-subscriber" - tower: - patterns: - "tower*" - dirs: - patterns: - - "dirs*" - - "directories*" - - "tempfile" - grpc: - patterns: - - "prost*" - - "tonic*" - vergen: - patterns: - - "vergen" - - "git*" - - "libgit*" - http: - patterns: - "hyper*" - "h2" - "reqwest" - tracing: + - "futures*" + - "pin-project*" + log: patterns: - "tracing*" - "log" - error: - patterns: - "*eyre*" - "thiserror" - "displaydoc" - "spandoc" - "owo-colors" - once-cell: + - "sentry*" + - "metrics*" + - "inferno" + concurrency: patterns: - "once_cell" - "lazy_static" + - "rayon*" + - "crossbeam*" + - "num_cpus" progress-bar: patterns: - "indicatif" @@ -93,43 +78,49 @@ updates: - "chrono*" - "time*" - "humantime*" - cli: + app: patterns: - "abscissa*" - "structopt*" - "clap*" - "atty*" - flamegraph: - patterns: - - "tracing-flame" - - "inferno" - serde: + - "semver*" + # dirs, directories, directories-next + - "dir*" + - "vergen" + - "*git*" + - "toml*" + - "rlimit" + formats: patterns: - "serde*" - futures: - patterns: - - "futures*" - sentry: - patterns: - - "sentry*" - metrics: - patterns: - - "metrics*" - bitflags: - patterns: - - "bitflags*" - jsonrpc: - patterns: - "jsonrpc*" - rand: - patterns: - - "rand*" - pin-project: + - "hex*" + - "regex" + - "byteorder" + - "bytes" + - "bincode" + data-structures: patterns: - - "pin-project*" - proptest: + - "bitflags*" + - "bitvec" + - "indexmap" + - "num-integer" + - "primitive-types" + - "uint" + - "tinyvec" + - "itertools" + - "ordered-map" + - "mset" + test: patterns: - "proptest*" + - "insta" + - "prost*" + - "tonic*" + - "tempfile" + - "static_assertions" + - "criterion" - package-ecosystem: github-actions directory: '/' schedule: @@ -137,7 +128,7 @@ updates: interval: weekly day: wednesday timezone: America/New_York - open-pull-requests-limit: 6 + open-pull-requests-limit: 4 labels: - 'C-trivial' - 'A-devops' From 2b7b65b2083bd113871760c462bb649f25ca9b60 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 24 Jul 2023 11:28:11 +0000 Subject: [PATCH 241/265] build(deps): bump tj-actions/changed-files from 37.1.2 to 37.3.0 (#7264) Bumps [tj-actions/changed-files](https://github.com/tj-actions/changed-files) from 37.1.2 to 37.3.0. - [Release notes](https://github.com/tj-actions/changed-files/releases) - [Changelog](https://github.com/tj-actions/changed-files/blob/main/HISTORY.md) - [Commits](https://github.com/tj-actions/changed-files/compare/v37.1.2...v37.3.0) --- updated-dependencies: - dependency-name: tj-actions/changed-files dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/lint.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index efcbdcca7c7..396ebebe9ad 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -37,7 +37,7 @@ jobs: - name: Rust files id: changed-files-rust - uses: tj-actions/changed-files@v37.1.2 + uses: tj-actions/changed-files@v37.3.0 with: files: | **/*.rs @@ -49,7 +49,7 @@ jobs: - name: Workflow files id: changed-files-workflows - uses: tj-actions/changed-files@v37.1.2 + uses: tj-actions/changed-files@v37.3.0 with: files: | .github/workflows/*.yml From f7a4c7164f40fffa9580a798f7a8a0bdfcfe45e5 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 24 Jul 2023 12:16:38 +0000 Subject: [PATCH 242/265] build(deps): bump tj-actions/changed-files from 37.3.0 to 37.4.0 (#7280) Bumps [tj-actions/changed-files](https://github.com/tj-actions/changed-files) from 37.3.0 to 37.4.0. - [Release notes](https://github.com/tj-actions/changed-files/releases) - [Changelog](https://github.com/tj-actions/changed-files/blob/main/HISTORY.md) - [Commits](https://github.com/tj-actions/changed-files/compare/v37.3.0...v37.4.0) --- updated-dependencies: - dependency-name: tj-actions/changed-files dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/lint.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 396ebebe9ad..7a9db2dad0d 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -37,7 +37,7 @@ jobs: - name: Rust files id: changed-files-rust - uses: tj-actions/changed-files@v37.3.0 + uses: tj-actions/changed-files@v37.4.0 with: files: | **/*.rs @@ -49,7 +49,7 @@ jobs: - name: Workflow files id: changed-files-workflows - uses: tj-actions/changed-files@v37.3.0 + uses: tj-actions/changed-files@v37.4.0 with: files: | .github/workflows/*.yml From 64f116a8617ee963b31e76bff411950cf23141bf Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 24 Jul 2023 17:45:55 +0000 Subject: [PATCH 243/265] build(deps): bump the test group with 1 update (#7284) Bumps the test group with 1 update: [tempfile](https://github.com/Stebalien/tempfile). - [Changelog](https://github.com/Stebalien/tempfile/blob/master/CHANGELOG.md) - [Commits](https://github.com/Stebalien/tempfile/compare/v3.6.0...v3.7.0) --- updated-dependencies: - dependency-name: tempfile dependency-type: direct:production update-type: version-update:semver-minor dependency-group: test ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 49 ++++++---------------------------------- zebra-network/Cargo.toml | 2 +- zebra-state/Cargo.toml | 2 +- zebra-test/Cargo.toml | 2 +- zebrad/Cargo.toml | 2 +- 5 files changed, 11 insertions(+), 46 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index cad36d5fee0..7eca1de8b4d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1336,12 +1336,9 @@ dependencies = [ [[package]] name = "fastrand" -version = "1.9.0" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e51093e27b0797c359783294ca4f0a911c270184cb10f85783b118614a1501be" -dependencies = [ - "instant", -] +checksum = "6999dc1837253364c2ebb0704ba97994bd874e8f195d665c50b7548f6ea92764" [[package]] name = "ff" @@ -2071,17 +2068,6 @@ dependencies = [ "cfg-if 1.0.0", ] -[[package]] -name = "io-lifetimes" -version = "1.0.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eae7b9aee968036d54dce06cebaefd919e4472e753296daccd6d344e3e2df0c2" -dependencies = [ - "hermit-abi 0.3.2", - "libc", - "windows-sys 0.48.0", -] - [[package]] name = "ipnet" version = "2.8.0" @@ -2095,7 +2081,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cb0889898416213fab133e1d33a0e5858a48177452750691bde3666d0fdbaf8b" dependencies = [ "hermit-abi 0.3.2", - "rustix 0.38.4", + "rustix", "windows-sys 0.48.0", ] @@ -2316,12 +2302,6 @@ version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" -[[package]] -name = "linux-raw-sys" -version = "0.3.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519" - [[package]] name = "linux-raw-sys" version = "0.4.3" @@ -3625,20 +3605,6 @@ dependencies = [ "semver 1.0.18", ] -[[package]] -name = "rustix" -version = "0.37.23" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d69718bf81c6127a49dc64e44a742e8bb9213c0ff8869a22c308f84c1d4ab06" -dependencies = [ - "bitflags 1.3.2", - "errno", - "io-lifetimes", - "libc", - "linux-raw-sys 0.3.8", - "windows-sys 0.48.0", -] - [[package]] name = "rustix" version = "0.38.4" @@ -3648,7 +3614,7 @@ dependencies = [ "bitflags 2.3.3", "errno", "libc", - "linux-raw-sys 0.4.3", + "linux-raw-sys", "windows-sys 0.48.0", ] @@ -4233,15 +4199,14 @@ checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" [[package]] name = "tempfile" -version = "3.6.0" +version = "3.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31c0432476357e58790aaa47a8efb0c5138f137343f3b5f23bd36a27e3b0a6d6" +checksum = "5486094ee78b2e5038a6382ed7645bc084dc2ec433426ca4c3cb61e2007b8998" dependencies = [ - "autocfg", "cfg-if 1.0.0", "fastrand", "redox_syscall 0.3.5", - "rustix 0.37.23", + "rustix", "windows-sys 0.48.0", ] diff --git a/zebra-network/Cargo.toml b/zebra-network/Cargo.toml index ecc284b7c14..e0d586c403e 100644 --- a/zebra-network/Cargo.toml +++ b/zebra-network/Cargo.toml @@ -57,7 +57,7 @@ rand = "0.8.5" rayon = "1.7.0" regex = "1.9.1" serde = { version = "1.0.168", features = ["serde_derive"] } -tempfile = "3.5.0" +tempfile = "3.7.0" thiserror = "1.0.43" futures = "0.3.28" diff --git a/zebra-state/Cargo.toml b/zebra-state/Cargo.toml index 77307cbd58b..0337a868e23 100644 --- a/zebra-state/Cargo.toml +++ b/zebra-state/Cargo.toml @@ -58,7 +58,7 @@ rlimit = "0.10.0" rocksdb = { version = "0.21.0", default-features = false, features = ["lz4"] } semver = "1.0.18" serde = { version = "1.0.168", features = ["serde_derive"] } -tempfile = "3.5.0" +tempfile = "3.7.0" thiserror = "1.0.43" rayon = "1.7.0" diff --git a/zebra-test/Cargo.toml b/zebra-test/Cargo.toml index 357d598ec97..80a8cb03c96 100644 --- a/zebra-test/Cargo.toml +++ b/zebra-test/Cargo.toml @@ -43,4 +43,4 @@ tracing-error = "0.2.0" tracing = "0.1.37" [dev-dependencies] -tempfile = "3.5.0" +tempfile = "3.7.0" diff --git a/zebrad/Cargo.toml b/zebrad/Cargo.toml index 351fd39fc28..68a431f5522 100644 --- a/zebrad/Cargo.toml +++ b/zebrad/Cargo.toml @@ -237,7 +237,7 @@ regex = "1.9.1" # zebra-rpc needs the preserve_order feature, it also makes test results more stable serde_json = { version = "1.0.100", features = ["preserve_order"] } -tempfile = "3.5.0" +tempfile = "3.7.0" hyper = { version = "0.14.27", features = ["http1", "http2", "server"]} tracing-test = { version = "0.2.4", features = ["no-env-filter"] } From bce8e1a842776d2013db1437c0a83ef7c1aa808d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 24 Jul 2023 20:20:22 +0000 Subject: [PATCH 244/265] build(deps): bump the log group with 1 update (#7281) Bumps the log group with 1 update: [thiserror](https://github.com/dtolnay/thiserror). - [Release notes](https://github.com/dtolnay/thiserror/releases) - [Commits](https://github.com/dtolnay/thiserror/compare/1.0.43...1.0.44) --- updated-dependencies: - dependency-name: thiserror dependency-type: direct:production update-type: version-update:semver-patch dependency-group: log ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 8 ++++---- zebra-chain/Cargo.toml | 2 +- zebra-consensus/Cargo.toml | 2 +- zebra-network/Cargo.toml | 2 +- zebra-rpc/Cargo.toml | 2 +- zebra-script/Cargo.toml | 2 +- zebra-state/Cargo.toml | 2 +- zebra-test/Cargo.toml | 2 +- zebra-utils/Cargo.toml | 2 +- zebrad/Cargo.toml | 2 +- 10 files changed, 13 insertions(+), 13 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7eca1de8b4d..8e7eeb34178 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4230,18 +4230,18 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.43" +version = "1.0.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a35fc5b8971143ca348fa6df4f024d4d55264f3468c71ad1c2f365b0a4d58c42" +checksum = "611040a08a0439f8248d1990b111c95baa9c704c805fa1f62104b39655fd7f90" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.43" +version = "1.0.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "463fe12d7993d3b327787537ce8dd4dfa058de32fc2b195ef3cde03dc4771e8f" +checksum = "090198534930841fab3a5d1bb637cde49e339654e606195f8d9c76eeb081dc96" dependencies = [ "proc-macro2 1.0.66", "quote 1.0.31", diff --git a/zebra-chain/Cargo.toml b/zebra-chain/Cargo.toml index 46208f5eb4b..32e4e291b58 100644 --- a/zebra-chain/Cargo.toml +++ b/zebra-chain/Cargo.toml @@ -91,7 +91,7 @@ humantime = "2.1.0" # Error Handling & Formatting displaydoc = "0.2.4" static_assertions = "1.1.0" -thiserror = "1.0.43" +thiserror = "1.0.44" tracing = "0.1.37" # Serialization diff --git a/zebra-consensus/Cargo.toml b/zebra-consensus/Cargo.toml index 901e88058ad..7633334725e 100644 --- a/zebra-consensus/Cargo.toml +++ b/zebra-consensus/Cargo.toml @@ -52,7 +52,7 @@ serde = { version = "1.0.168", features = ["serde_derive"] } futures = "0.3.28" futures-util = "0.3.28" metrics = "0.21.1" -thiserror = "1.0.43" +thiserror = "1.0.44" tokio = { version = "1.29.1", features = ["time", "sync", "tracing", "rt-multi-thread"] } tower = { version = "0.4.13", features = ["timeout", "util", "buffer"] } tracing = "0.1.37" diff --git a/zebra-network/Cargo.toml b/zebra-network/Cargo.toml index e0d586c403e..a95a7cda242 100644 --- a/zebra-network/Cargo.toml +++ b/zebra-network/Cargo.toml @@ -58,7 +58,7 @@ rayon = "1.7.0" regex = "1.9.1" serde = { version = "1.0.168", features = ["serde_derive"] } tempfile = "3.7.0" -thiserror = "1.0.43" +thiserror = "1.0.44" futures = "0.3.28" tokio = { version = "1.29.1", features = ["fs", "io-util", "net", "time", "tracing", "macros", "rt-multi-thread"] } diff --git a/zebra-rpc/Cargo.toml b/zebra-rpc/Cargo.toml index aa8c7779682..946cbce51d2 100644 --- a/zebra-rpc/Cargo.toml +++ b/zebra-rpc/Cargo.toml @@ -82,7 +82,7 @@ insta = { version = "1.31.0", features = ["redactions", "json", "ron"] } proptest = "1.2.0" -thiserror = "1.0.43" +thiserror = "1.0.44" tokio = { version = "1.29.1", features = ["full", "tracing", "test-util"] } zebra-chain = { path = "../zebra-chain", features = ["proptest-impl"] } diff --git a/zebra-script/Cargo.toml b/zebra-script/Cargo.toml index e80a3fcb7b4..00d59e4a9db 100644 --- a/zebra-script/Cargo.toml +++ b/zebra-script/Cargo.toml @@ -19,7 +19,7 @@ zcash_script = "0.1.13" zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.28" } -thiserror = "1.0.43" +thiserror = "1.0.44" displaydoc = "0.2.4" [dev-dependencies] diff --git a/zebra-state/Cargo.toml b/zebra-state/Cargo.toml index 0337a868e23..20a63093b9c 100644 --- a/zebra-state/Cargo.toml +++ b/zebra-state/Cargo.toml @@ -59,7 +59,7 @@ rocksdb = { version = "0.21.0", default-features = false, features = ["lz4"] } semver = "1.0.18" serde = { version = "1.0.168", features = ["serde_derive"] } tempfile = "3.7.0" -thiserror = "1.0.43" +thiserror = "1.0.44" rayon = "1.7.0" tokio = { version = "1.29.1", features = ["rt-multi-thread", "sync", "tracing"] } diff --git a/zebra-test/Cargo.toml b/zebra-test/Cargo.toml index 80a8cb03c96..98c22eebfe8 100644 --- a/zebra-test/Cargo.toml +++ b/zebra-test/Cargo.toml @@ -36,7 +36,7 @@ tinyvec = { version = "1.6.0", features = ["rustc_1_55"] } humantime = "2.1.0" owo-colors = "3.5.0" spandoc = "0.2.2" -thiserror = "1.0.43" +thiserror = "1.0.44" tracing-subscriber = { version = "0.3.17", features = ["env-filter"] } tracing-error = "0.2.0" diff --git a/zebra-utils/Cargo.toml b/zebra-utils/Cargo.toml index fc962ec2e5c..a9ba7944f95 100644 --- a/zebra-utils/Cargo.toml +++ b/zebra-utils/Cargo.toml @@ -72,7 +72,7 @@ hex = "0.4.3" serde_json = "1.0.100" tracing-error = "0.2.0" tracing-subscriber = "0.3.17" -thiserror = "1.0.43" +thiserror = "1.0.44" zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.28" } zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.28" } diff --git a/zebrad/Cargo.toml b/zebrad/Cargo.toml index 68a431f5522..20feba83dcc 100644 --- a/zebrad/Cargo.toml +++ b/zebrad/Cargo.toml @@ -173,7 +173,7 @@ color-eyre = { version = "0.6.2", default-features = false, features = ["issue-u # Enable a feature that makes tinyvec compile much faster. tinyvec = { version = "1.6.0", features = ["rustc_1_55"] } -thiserror = "1.0.43" +thiserror = "1.0.44" tracing-subscriber = { version = "0.3.17", features = ["env-filter"] } tracing-appender = "0.2.2" From 7625e0b49f3f369a2fe2a85af003b279bf7093b6 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 24 Jul 2023 23:50:15 +0000 Subject: [PATCH 245/265] build(deps): bump the app group with 2 updates (#7282) Bumps the app group with 2 updates: [clap](https://github.com/clap-rs/clap) and [rlimit](https://github.com/Nugine/rlimit). Updates `clap` from 4.3.16 to 4.3.19 - [Release notes](https://github.com/clap-rs/clap/releases) - [Changelog](https://github.com/clap-rs/clap/blob/master/CHANGELOG.md) - [Commits](https://github.com/clap-rs/clap/compare/v4.3.16...v4.3.19) Updates `rlimit` from 0.10.0 to 0.10.1 - [Changelog](https://github.com/Nugine/rlimit/blob/main/CHANGELOG.md) - [Commits](https://github.com/Nugine/rlimit/compare/v0.10.0...v0.10.1) --- updated-dependencies: - dependency-name: clap dependency-type: direct:production update-type: version-update:semver-patch dependency-group: app - dependency-name: rlimit dependency-type: direct:production update-type: version-update:semver-patch dependency-group: app ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 18 +++++++++--------- zebra-state/Cargo.toml | 2 +- zebrad/Cargo.toml | 2 +- 3 files changed, 11 insertions(+), 11 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8e7eeb34178..929a1428f6c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -12,7 +12,7 @@ dependencies = [ "arc-swap", "backtrace", "canonical-path", - "clap 4.3.16", + "clap 4.3.19", "color-eyre", "fs-err", "once_cell", @@ -744,9 +744,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.3.16" +version = "4.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74bb1b4028935821b2d6b439bba2e970bdcf740832732437ead910c632e30d7d" +checksum = "5fd304a20bff958a57f04c4e96a2e7594cc4490a0e809cbd48bb6437edaa452d" dependencies = [ "clap_builder", "clap_derive", @@ -755,9 +755,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.3.16" +version = "4.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ae467cbb0111869b765e13882a1dbbd6cb52f58203d8b80c44f667d4dd19843" +checksum = "01c6a3f08f1fe5662a35cfe393aec09c4df95f60ee93b7556505260f75eee9e1" dependencies = [ "anstream", "anstyle", @@ -916,7 +916,7 @@ dependencies = [ "anes", "cast", "ciborium", - "clap 4.3.16", + "clap 4.3.19", "criterion-plot", "is-terminal", "itertools 0.10.5", @@ -3541,9 +3541,9 @@ dependencies = [ [[package]] name = "rlimit" -version = "0.10.0" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b5b8be0bc0ef630d24f8fa836b3a3463479b2343b29f9a8fa905c71a8c7b69b" +checksum = "3560f70f30a0f16d11d01ed078a07740fe6b489667abc7c7b029155d9f21c3d8" dependencies = [ "libc", ] @@ -5750,7 +5750,7 @@ dependencies = [ "abscissa_core", "atty", "chrono", - "clap 4.3.16", + "clap 4.3.19", "color-eyre", "console-subscriber", "dirs", diff --git a/zebra-state/Cargo.toml b/zebra-state/Cargo.toml index 20a63093b9c..4c4473144fe 100644 --- a/zebra-state/Cargo.toml +++ b/zebra-state/Cargo.toml @@ -54,7 +54,7 @@ lazy_static = "1.4.0" metrics = "0.21.1" mset = "0.1.1" regex = "1.9.1" -rlimit = "0.10.0" +rlimit = "0.10.1" rocksdb = { version = "0.21.0", default-features = false, features = ["lz4"] } semver = "1.0.18" serde = { version = "1.0.168", features = ["serde_derive"] } diff --git a/zebrad/Cargo.toml b/zebrad/Cargo.toml index 20feba83dcc..9937827e663 100644 --- a/zebrad/Cargo.toml +++ b/zebrad/Cargo.toml @@ -153,7 +153,7 @@ zebra-state = { path = "../zebra-state", version = "1.0.0-beta.28" } zebra-utils = { path = "../zebra-utils", version = "1.0.0-beta.28", optional = true } abscissa_core = "0.7.0" -clap = { version = "4.3.15", features = ["cargo"] } +clap = { version = "4.3.19", features = ["cargo"] } chrono = { version = "0.4.26", default-features = false, features = ["clock", "std"] } humantime-serde = "1.1.1" indexmap = "2.0.0" From b8dd54081da63e4c90fc3c736b70fe917d7f222f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 25 Jul 2023 02:22:22 +0000 Subject: [PATCH 246/265] build(deps): bump the formats group with 1 update (#7283) Bumps the formats group with 1 update: [serde](https://github.com/serde-rs/serde). - [Release notes](https://github.com/serde-rs/serde/releases) - [Commits](https://github.com/serde-rs/serde/compare/v1.0.171...v1.0.175) --- updated-dependencies: - dependency-name: serde dependency-type: direct:production update-type: version-update:semver-patch dependency-group: formats ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 8 ++++---- zebra-chain/Cargo.toml | 2 +- zebra-consensus/Cargo.toml | 2 +- zebra-network/Cargo.toml | 2 +- zebra-node-services/Cargo.toml | 4 ++-- zebra-rpc/Cargo.toml | 2 +- zebra-state/Cargo.toml | 2 +- zebrad/Cargo.toml | 2 +- 8 files changed, 12 insertions(+), 12 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 929a1428f6c..826a3d2d38d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3861,9 +3861,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.171" +version = "1.0.175" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30e27d1e4fd7659406c492fd6cfaf2066ba8773de45ca75e855590f856dc34a9" +checksum = "5d25439cd7397d044e2748a6fe2432b5e85db703d6d097bd014b3c0ad1ebff0b" dependencies = [ "serde_derive", ] @@ -3879,9 +3879,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.171" +version = "1.0.175" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "389894603bd18c46fa56231694f8d827779c0951a667087194cf9de94ed24682" +checksum = "b23f7ade6f110613c0d63858ddb8b94c1041f550eab58a16b371bdf2c9c80ab4" dependencies = [ "proc-macro2 1.0.66", "quote 1.0.31", diff --git a/zebra-chain/Cargo.toml b/zebra-chain/Cargo.toml index 32e4e291b58..7820507d304 100644 --- a/zebra-chain/Cargo.toml +++ b/zebra-chain/Cargo.toml @@ -96,7 +96,7 @@ tracing = "0.1.37" # Serialization hex = { version = "0.4.3", features = ["serde"] } -serde = { version = "1.0.168", features = ["serde_derive", "rc"] } +serde = { version = "1.0.175", features = ["serde_derive", "rc"] } serde_with = "3.0.0" serde-big-array = "0.5.1" diff --git a/zebra-consensus/Cargo.toml b/zebra-consensus/Cargo.toml index 7633334725e..4211e43d5a9 100644 --- a/zebra-consensus/Cargo.toml +++ b/zebra-consensus/Cargo.toml @@ -47,7 +47,7 @@ chrono = { version = "0.4.26", default-features = false, features = ["clock", "s displaydoc = "0.2.4" lazy_static = "1.4.0" once_cell = "1.18.0" -serde = { version = "1.0.168", features = ["serde_derive"] } +serde = { version = "1.0.175", features = ["serde_derive"] } futures = "0.3.28" futures-util = "0.3.28" diff --git a/zebra-network/Cargo.toml b/zebra-network/Cargo.toml index a95a7cda242..c8299fb3ae0 100644 --- a/zebra-network/Cargo.toml +++ b/zebra-network/Cargo.toml @@ -56,7 +56,7 @@ pin-project = "1.1.2" rand = "0.8.5" rayon = "1.7.0" regex = "1.9.1" -serde = { version = "1.0.168", features = ["serde_derive"] } +serde = { version = "1.0.175", features = ["serde_derive"] } tempfile = "3.7.0" thiserror = "1.0.44" diff --git a/zebra-node-services/Cargo.toml b/zebra-node-services/Cargo.toml index 06e4aa9d81d..956b4922995 100644 --- a/zebra-node-services/Cargo.toml +++ b/zebra-node-services/Cargo.toml @@ -44,7 +44,7 @@ color-eyre = { version = "0.6.2", optional = true } jsonrpc-core = { version = "18.0.0", optional = true } # Security: avoid default dependency on openssl reqwest = { version = "0.11.18", default-features = false, features = ["rustls-tls"], optional = true } -serde = { version = "1.0.168", optional = true } +serde = { version = "1.0.175", optional = true } serde_json = { version = "1.0.100", optional = true } [dev-dependencies] @@ -52,5 +52,5 @@ serde_json = { version = "1.0.100", optional = true } color-eyre = "0.6.2" jsonrpc-core = "18.0.0" reqwest = { version = "0.11.18", default-features = false, features = ["rustls-tls"] } -serde = "1.0.168" +serde = "1.0.175" serde_json = "1.0.100" diff --git a/zebra-rpc/Cargo.toml b/zebra-rpc/Cargo.toml index 946cbce51d2..2115dda055b 100644 --- a/zebra-rpc/Cargo.toml +++ b/zebra-rpc/Cargo.toml @@ -60,7 +60,7 @@ tower = "0.4.13" tracing = "0.1.37" hex = { version = "0.4.3", features = ["serde"] } -serde = { version = "1.0.168", features = ["serde_derive"] } +serde = { version = "1.0.175", features = ["serde_derive"] } # Experimental feature getblocktemplate-rpcs rand = { version = "0.8.5", optional = true } diff --git a/zebra-state/Cargo.toml b/zebra-state/Cargo.toml index 4c4473144fe..70453548be2 100644 --- a/zebra-state/Cargo.toml +++ b/zebra-state/Cargo.toml @@ -57,7 +57,7 @@ regex = "1.9.1" rlimit = "0.10.1" rocksdb = { version = "0.21.0", default-features = false, features = ["lz4"] } semver = "1.0.18" -serde = { version = "1.0.168", features = ["serde_derive"] } +serde = { version = "1.0.175", features = ["serde_derive"] } tempfile = "3.7.0" thiserror = "1.0.44" diff --git a/zebrad/Cargo.toml b/zebrad/Cargo.toml index 9937827e663..f65e0213f61 100644 --- a/zebrad/Cargo.toml +++ b/zebrad/Cargo.toml @@ -159,7 +159,7 @@ humantime-serde = "1.1.1" indexmap = "2.0.0" lazy_static = "1.4.0" semver = "1.0.18" -serde = { version = "1.0.168", features = ["serde_derive"] } +serde = { version = "1.0.175", features = ["serde_derive"] } toml = "0.7.6" futures = "0.3.28" From da2e696836cc8bb06a12ee4f0ee8780b689bc94d Mon Sep 17 00:00:00 2001 From: Gustavo Valverde Date: Tue, 25 Jul 2023 23:50:45 +0100 Subject: [PATCH 247/265] ref(docker): remove all unrequired docker arguments from CI/CD pipelines (#7231) * ref(docker): remove all unrequired docker arguments * fix(ci): use correct `$NETWORK` approach for tests * fix(release): do not change default `$NETWORK` for experimental image * Update .github/workflows/continous-integration-docker.yml Co-authored-by: Marek * Revert "fix(release): do not change default `$NETWORK` for experimental image" This reverts commit bd5b6c831b7807f8038d0cf33dcb19e09fd44ca7. * fix: typo --------- Co-authored-by: Marek --- .github/workflows/build-docker-image.yml | 18 ----- .github/workflows/continous-delivery.yml | 8 +-- .../continous-integration-docker.yml | 70 +++++++++---------- .github/workflows/release-binaries.yml | 8 --- .github/workflows/zcash-params.yml | 1 - 5 files changed, 34 insertions(+), 71 deletions(-) diff --git a/.github/workflows/build-docker-image.yml b/.github/workflows/build-docker-image.yml index 95c187bec97..4c1a8c1cca1 100644 --- a/.github/workflows/build-docker-image.yml +++ b/.github/workflows/build-docker-image.yml @@ -3,9 +3,6 @@ name: Build docker image on: workflow_call: inputs: - network: - required: false - type: string image_name: required: true type: string @@ -24,12 +21,6 @@ on: rust_lib_backtrace: required: false type: string - colorbt_show_hidden: - required: false - type: string - zebra_skip_ipv6_tests: - required: false - type: string rust_log: required: false type: string @@ -44,9 +35,6 @@ on: required: false default: "lightwalletd-grpc-tests zebra-checkpoints" type: string - rpc_port: - required: false - type: string tag_suffix: required: false type: string @@ -154,16 +142,10 @@ jobs: tags: ${{ steps.meta.outputs.tags }} labels: ${{ steps.meta.outputs.labels }} build-args: | - NETWORK=${{ inputs.network }} SHORT_SHA=${{ env.GITHUB_SHA_SHORT }} - RUST_BACKTRACE=${{ inputs.rust_backtrace }} - RUST_LIB_BACKTRACE=${{ inputs.rust_lib_backtrace }} - COLORBT_SHOW_HIDDEN=${{ inputs.colorbt_show_hidden }} - ZEBRA_SKIP_IPV6_TESTS=${{ inputs.zebra_skip_ipv6_tests }} RUST_LOG=${{ inputs.rust_log }} FEATURES=${{ inputs.features }} TEST_FEATURES=${{ inputs.test_features }} - RPC_PORT=${{ inputs.rpc_port }} push: true # Don't read from the cache if the caller disabled it. # https://docs.docker.com/engine/reference/commandline/buildx_build/#options diff --git a/.github/workflows/continous-delivery.yml b/.github/workflows/continous-delivery.yml index 8c846874977..49b871a261a 100644 --- a/.github/workflows/continous-delivery.yml +++ b/.github/workflows/continous-delivery.yml @@ -114,10 +114,6 @@ jobs: dockerfile_target: runtime image_name: zebrad no_cache: ${{ inputs.no_cache || false }} - # We hard-code Mainnet here, because the config is modified before running zebrad - network: 'Mainnet' - rust_backtrace: '1' - zebra_skip_ipv6_tests: '1' rust_log: info # Test that Zebra works using the default config with the latest Zebra version. @@ -276,7 +272,7 @@ jobs: --container-stdin \ --container-tty \ --container-image ${{ vars.GAR_BASE }}/zebrad@${{ needs.build.outputs.image_digest }} \ - --container-env "NETWORK=${{ matrix.network }},LOG_FILE=${{ vars.CD_LOG_FILE }},LOG_COLOR=false,SENTRY_DSN=${{ vars.SENTRY_DSN }},SHORT_SHA=${{ env.GITHUB_SHA_SHORT }}" \ + --container-env "NETWORK=${{ matrix.network }},LOG_FILE=${{ vars.CD_LOG_FILE }},LOG_COLOR=false,SENTRY_DSN=${{ vars.SENTRY_DSN }}" \ --create-disk=name=zebrad-cache-${{ env.GITHUB_SHA_SHORT }}-${NETWORK},device-name=zebrad-cache-${{ env.GITHUB_SHA_SHORT }}-${NETWORK},auto-delete=yes,size=300GB,type=pd-ssd,mode=rw \ --container-mount-disk=mount-path='/var/cache/zebrad-cache',name=zebrad-cache-${{ env.GITHUB_SHA_SHORT }}-${NETWORK},mode=rw \ --machine-type ${{ vars.GCP_SMALL_MACHINE }} \ @@ -377,7 +373,7 @@ jobs: --container-stdin \ --container-tty \ --container-image ${{ vars.GAR_BASE }}/zebrad@${{ needs.build.outputs.image_digest }} \ - --container-env "NETWORK=${{ inputs.network }},LOG_FILE=${{ inputs.log_file }},LOG_COLOR=false,SENTRY_DSN=${{ vars.SENTRY_DSN }},SHORT_SHA=${{ env.GITHUB_SHA_SHORT }}" \ + --container-env "NETWORK=${{ inputs.network }},LOG_FILE=${{ inputs.log_file }},LOG_COLOR=false,SENTRY_DSN=${{ vars.SENTRY_DSN }}" \ --create-disk=name=zebrad-cache-${{ env.GITHUB_SHA_SHORT }}-${NETWORK},device-name=zebrad-cache-${{ env.GITHUB_SHA_SHORT }}-${NETWORK},auto-delete=yes,size=300GB,type=pd-ssd,mode=rw \ --container-mount-disk=mount-path='/var/cache/zebrad-cache',name=zebrad-cache-${{ env.GITHUB_SHA_SHORT }}-${NETWORK},mode=rw \ --machine-type ${{ vars.GCP_SMALL_MACHINE }} \ diff --git a/.github/workflows/continous-integration-docker.yml b/.github/workflows/continous-integration-docker.yml index a88df245ab1..29618143a5b 100644 --- a/.github/workflows/continous-integration-docker.yml +++ b/.github/workflows/continous-integration-docker.yml @@ -131,11 +131,8 @@ jobs: dockerfile_target: tests image_name: ${{ vars.CI_IMAGE_NAME }} no_cache: ${{ inputs.no_cache || false }} - network: ${{ inputs.network || vars.ZCASH_NETWORK }} rust_backtrace: full rust_lib_backtrace: full - colorbt_show_hidden: '1' - zebra_skip_ipv6_tests: '1' rust_log: info # zebrad tests without cached state @@ -169,7 +166,9 @@ jobs: - name: Run zebrad tests run: | docker pull ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} - docker run --name zebrad-tests --tty ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} cargo test --locked --release --features "lightwalletd-grpc-tests" --workspace -- --include-ignored + docker run -e NETWORK --name zebrad-tests --tty ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} cargo test --locked --release --features "lightwalletd-grpc-tests" --workspace -- --include-ignored + env: + NETWORK: ${{ inputs.network || vars.ZCASH_NETWORK }} # zebrad tests without cached state with `getblocktemplate-rpcs` feature # @@ -190,7 +189,9 @@ jobs: - name: Run zebrad tests run: | docker pull ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} - docker run --name zebrad-tests --tty ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} cargo test --locked --release --features "lightwalletd-grpc-tests getblocktemplate-rpcs" --workspace -- --include-ignored + docker run -e NETWORK --name zebrad-tests --tty -e ${{ inputs.network || vars.ZCASH_NETWORK }} ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} cargo test --locked --release --features "lightwalletd-grpc-tests getblocktemplate-rpcs" --workspace -- --include-ignored + env: + NETWORK: ${{ inputs.network || vars.ZCASH_NETWORK }} # Run state tests with fake activation heights. # @@ -217,9 +218,10 @@ jobs: - name: Run tests with fake activation heights run: | docker pull ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} - docker run -e TEST_FAKE_ACTIVATION_HEIGHTS --name zebrad-tests -t ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} cargo test --locked --release --package zebra-state --lib -- --nocapture --include-ignored with_fake_activation_heights + docker run -e NETWORK -e TEST_FAKE_ACTIVATION_HEIGHTS --name zebrad-tests -t ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} cargo test --locked --release --package zebra-state --lib -- --nocapture --include-ignored with_fake_activation_heights env: TEST_FAKE_ACTIVATION_HEIGHTS: '1' + NETWORK: ${{ inputs.network || vars.ZCASH_NETWORK }} # Test that Zebra syncs and checkpoints a few thousand blocks from an empty state. # @@ -240,7 +242,9 @@ jobs: - name: Run zebrad large sync tests run: | docker pull ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} - docker run --name zebrad-tests -t ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} cargo test --locked --release --features lightwalletd-grpc-tests --package zebrad --test acceptance -- --nocapture --include-ignored sync_large_checkpoints_ + docker run -e NETWORK --name zebrad-tests -t ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} cargo test --locked --release --features lightwalletd-grpc-tests --package zebrad --test acceptance -- --nocapture --include-ignored sync_large_checkpoints_ + env: + NETWORK: ${{ inputs.network || vars.ZCASH_NETWORK }} # Test launching lightwalletd with an empty lightwalletd and Zebra state. # @@ -261,9 +265,10 @@ jobs: - name: Run tests with empty lightwalletd launch run: | docker pull ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} - docker run -e ZEBRA_TEST_LIGHTWALLETD --name lightwalletd-tests -t ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} cargo test --locked --release --features lightwalletd-grpc-tests --package zebrad --test acceptance -- --nocapture --include-ignored lightwalletd_integration + docker run -e NETWORK -e ZEBRA_TEST_LIGHTWALLETD --name lightwalletd-tests -t ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} cargo test --locked --release --features lightwalletd-grpc-tests --package zebrad --test acceptance -- --nocapture --include-ignored lightwalletd_integration env: ZEBRA_TEST_LIGHTWALLETD: '1' + NETWORK: ${{ inputs.network || vars.ZCASH_NETWORK }} # Test that Zebra works using the default config with the latest Zebra version test-configuration-file: @@ -284,11 +289,13 @@ jobs: run: | set -ex docker pull ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} - docker run --detach --name default-conf-tests -t ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} zebrad start + docker run -e NETWORK --detach --name default-conf-tests -t ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} zebrad start EXIT_STATUS=$(docker logs --tail all --follow default-conf-tests 2>&1 | grep -q --extended-regexp --max-count=1 -e 'estimated progress to chain tip.*BeforeOverwinter'; echo $?; ) docker stop default-conf-tests docker logs default-conf-tests exit "$EXIT_STATUS" + env: + NETWORK: ${{ inputs.network || vars.ZCASH_NETWORK }} # Test that Zebra works using the $ZEBRA_CONF_PATH config test-zebra-conf-path: @@ -309,13 +316,14 @@ jobs: run: | set -ex docker pull ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} - docker run --detach -e ZEBRA_CONF_PATH --name variable-conf-tests -t ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} -c $ZEBRA_CONF_PATH start + docker run -e NETWORK --detach -e ZEBRA_CONF_PATH --name variable-conf-tests -t ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} -c $ZEBRA_CONF_PATH start EXIT_STATUS=$(docker logs --tail all --follow variable-conf-tests 2>&1 | grep -q --extended-regexp --max-count=1 -e 'v1.0.0-rc.2.toml'; echo $?; ) docker stop variable-conf-tests docker logs variable-conf-tests exit "$EXIT_STATUS" env: ZEBRA_CONF_PATH: 'zebrad/tests/common/configs/v1.0.0-rc.2.toml' + NETWORK: ${{ inputs.network || vars.ZCASH_NETWORK }} # END TODO: make the non-cached-state tests use: # network: ${{ inputs.network || vars.ZCASH_NETWORK }} @@ -338,8 +346,7 @@ jobs: app_name: zebrad test_id: sync-to-checkpoint test_description: Test sync up to mandatory checkpoint - test_variables: '-e TEST_DISK_REBUILD=1 -e ZEBRA_FORCE_USE_COLOR=1' - network: ${{ inputs.network || vars.ZCASH_NETWORK }} + test_variables: '-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_DISK_REBUILD=1 -e ZEBRA_FORCE_USE_COLOR=1' needs_zebra_state: false saves_to_disk: true force_save_to_disk: ${{ inputs.force_save_to_disk || false }} @@ -368,8 +375,7 @@ jobs: app_name: zebrad test_id: sync-past-checkpoint test_description: Test full validation sync from a cached state - test_variables: '-e TEST_CHECKPOINT_SYNC=1 -e ZEBRA_FORCE_USE_COLOR=1' - network: ${{ inputs.network || vars.ZCASH_NETWORK }} + test_variables: '-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_CHECKPOINT_SYNC=1 -e ZEBRA_FORCE_USE_COLOR=1' needs_zebra_state: true saves_to_disk: false disk_suffix: checkpoint @@ -399,8 +405,7 @@ jobs: test_description: Test a full sync up to the tip # The value of FULL_SYNC_MAINNET_TIMEOUT_MINUTES is currently ignored. # TODO: update the test to use {{ input.network }} instead? - test_variables: '-e FULL_SYNC_MAINNET_TIMEOUT_MINUTES=0 -e ZEBRA_FORCE_USE_COLOR=1' - network: 'Mainnet' + test_variables: '-e NETWORK=Mainnet -e FULL_SYNC_MAINNET_TIMEOUT_MINUTES=0 -e ZEBRA_FORCE_USE_COLOR=1' # This test runs for longer than 6 hours, so it needs multiple jobs is_long_test: true needs_zebra_state: false @@ -441,8 +446,7 @@ jobs: app_name: zebrad test_id: update-to-tip test_description: Test syncing to tip with a Zebra tip state - test_variables: '-e TEST_UPDATE_SYNC=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache' - network: ${{ inputs.network || vars.ZCASH_NETWORK }} + test_variables: '-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_UPDATE_SYNC=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache' needs_zebra_state: true # update the disk on every PR, to increase CI speed saves_to_disk: true @@ -476,8 +480,7 @@ jobs: test_id: generate-checkpoints-mainnet test_description: Generate Zebra checkpoints on mainnet # TODO: update the test to use {{ input.network }} instead? - test_variables: '-e GENERATE_CHECKPOINTS_MAINNET=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache' - network: 'Mainnet' + test_variables: '-e NETWORK=Mainnet -e GENERATE_CHECKPOINTS_MAINNET=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache' needs_zebra_state: true # test-update-sync updates the disk on every PR, so we don't need to do it here saves_to_disk: false @@ -512,8 +515,7 @@ jobs: test_id: full-sync-to-tip-testnet test_description: Test a full sync up to the tip on testnet # The value of FULL_SYNC_TESTNET_TIMEOUT_MINUTES is currently ignored. - test_variables: '-e FULL_SYNC_TESTNET_TIMEOUT_MINUTES=0 -e ZEBRA_FORCE_USE_COLOR=1' - network: 'Testnet' + test_variables: '-e NETWORK=Testnet -e FULL_SYNC_TESTNET_TIMEOUT_MINUTES=0 -e ZEBRA_FORCE_USE_COLOR=1' # A full testnet sync could take 2-10 hours in April 2023. # The time varies a lot due to the small number of nodes. is_long_test: true @@ -557,8 +559,7 @@ jobs: app_name: zebrad test_id: generate-checkpoints-testnet test_description: Generate Zebra checkpoints on testnet - test_variables: '-e GENERATE_CHECKPOINTS_TESTNET=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache' - network: 'Testnet' + test_variables: '-e NETWORK=Testnet -e GENERATE_CHECKPOINTS_TESTNET=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache' needs_zebra_state: true # update the disk on every PR, to increase CI speed # we don't have a test-update-sync-testnet job, so we need to update the disk here @@ -591,8 +592,7 @@ jobs: app_name: lightwalletd test_id: lwd-full-sync test_description: Test lightwalletd full sync - test_variables: '-e TEST_LWD_FULL_SYNC=1 -e ZEBRA_TEST_LIGHTWALLETD=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache -e LIGHTWALLETD_DATA_DIR=/var/cache/lwd-cache' - network: ${{ inputs.network || vars.ZCASH_NETWORK }} + test_variables: '-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_LWD_FULL_SYNC=1 -e ZEBRA_TEST_LIGHTWALLETD=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache -e LIGHTWALLETD_DATA_DIR=/var/cache/lwd-cache' # This test runs for longer than 6 hours, so it needs multiple jobs is_long_test: true needs_zebra_state: true @@ -631,8 +631,7 @@ jobs: app_name: lightwalletd test_id: lwd-update-sync test_description: Test lightwalletd update sync with both states - test_variables: '-e TEST_LWD_UPDATE_SYNC=1 -e ZEBRA_TEST_LIGHTWALLETD=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache -e LIGHTWALLETD_DATA_DIR=/var/cache/lwd-cache' - network: ${{ inputs.network || vars.ZCASH_NETWORK }} + test_variables: '-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_LWD_UPDATE_SYNC=1 -e ZEBRA_TEST_LIGHTWALLETD=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache -e LIGHTWALLETD_DATA_DIR=/var/cache/lwd-cache' needs_zebra_state: true needs_lwd_state: true # since we do a full sync in every PR, the new cached state will only be a few minutes newer than the original one @@ -664,8 +663,7 @@ jobs: app_name: lightwalletd test_id: fully-synced-rpc test_description: Test lightwalletd RPC with a Zebra tip state - test_variables: '-e TEST_LWD_RPC_CALL=1 -e ZEBRA_TEST_LIGHTWALLETD=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache' - network: ${{ inputs.network || vars.ZCASH_NETWORK }} + test_variables: '-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_LWD_RPC_CALL=1 -e ZEBRA_TEST_LIGHTWALLETD=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache' needs_zebra_state: true saves_to_disk: false disk_suffix: tip @@ -690,8 +688,7 @@ jobs: app_name: lightwalletd test_id: lwd-send-transactions test_description: Test sending transactions via lightwalletd - test_variables: '-e TEST_LWD_TRANSACTIONS=1 -e ZEBRA_TEST_LIGHTWALLETD=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache -e LIGHTWALLETD_DATA_DIR=/var/cache/lwd-cache' - network: ${{ inputs.network || vars.ZCASH_NETWORK }} + test_variables: '-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_LWD_TRANSACTIONS=1 -e ZEBRA_TEST_LIGHTWALLETD=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache -e LIGHTWALLETD_DATA_DIR=/var/cache/lwd-cache' needs_zebra_state: true needs_lwd_state: true saves_to_disk: false @@ -718,8 +715,7 @@ jobs: app_name: lightwalletd test_id: lwd-grpc-wallet test_description: Test gRPC calls via lightwalletd - test_variables: '-e TEST_LWD_GRPC=1 -e ZEBRA_TEST_LIGHTWALLETD=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache -e LIGHTWALLETD_DATA_DIR=/var/cache/lwd-cache' - network: ${{ inputs.network || vars.ZCASH_NETWORK }} + test_variables: '-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_LWD_GRPC=1 -e ZEBRA_TEST_LIGHTWALLETD=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache -e LIGHTWALLETD_DATA_DIR=/var/cache/lwd-cache' needs_zebra_state: true needs_lwd_state: true saves_to_disk: false @@ -750,8 +746,7 @@ jobs: app_name: zebrad test_id: get-block-template test_description: Test getblocktemplate RPC method via Zebra's rpc server - test_variables: '-e TEST_GET_BLOCK_TEMPLATE=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache' - network: ${{ inputs.network || vars.ZCASH_NETWORK }} + test_variables: '-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_GET_BLOCK_TEMPLATE=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache' needs_zebra_state: true needs_lwd_state: false saves_to_disk: false @@ -777,8 +772,7 @@ jobs: app_name: zebrad test_id: submit-block test_description: Test submitting blocks via Zebra's rpc server - test_variables: '-e TEST_SUBMIT_BLOCK=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache' - network: ${{ inputs.network || vars.ZCASH_NETWORK }} + test_variables: '-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_SUBMIT_BLOCK=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache' needs_zebra_state: true needs_lwd_state: false saves_to_disk: false diff --git a/.github/workflows/release-binaries.yml b/.github/workflows/release-binaries.yml index 81ee18c7b18..a96c15c2867 100644 --- a/.github/workflows/release-binaries.yml +++ b/.github/workflows/release-binaries.yml @@ -26,9 +26,6 @@ jobs: dockerfile_path: ./docker/Dockerfile dockerfile_target: runtime image_name: zebra - network: Mainnet - rust_backtrace: '1' - zebra_skip_ipv6_tests: '1' rust_log: info # This step needs access to Docker Hub secrets to run successfully secrets: inherit @@ -42,12 +39,7 @@ jobs: dockerfile_target: runtime image_name: zebra tag_suffix: .experimental - network: Testnet - rpc_port: '18232' features: "default-release-binaries getblocktemplate-rpcs" - test_features: "" - rust_backtrace: '1' - zebra_skip_ipv6_tests: '1' rust_log: info # This step needs access to Docker Hub secrets to run successfully secrets: inherit diff --git a/.github/workflows/zcash-params.yml b/.github/workflows/zcash-params.yml index 5a05a340a83..28bcea9a424 100644 --- a/.github/workflows/zcash-params.yml +++ b/.github/workflows/zcash-params.yml @@ -42,5 +42,4 @@ jobs: no_cache: ${{ inputs.no_cache || false }} rust_backtrace: full rust_lib_backtrace: full - colorbt_show_hidden: '1' rust_log: info From d725d29a580e5f575cbf47dfe3805726971e3453 Mon Sep 17 00:00:00 2001 From: Marek Date: Wed, 26 Jul 2023 00:51:05 +0200 Subject: [PATCH 248/265] add(Docker): Docs for mining with Docker (#7179) * Add docs for mining with Docker * Refactor the docs for mining with Docker * Add a note on syncing --- book/src/SUMMARY.md | 1 + book/src/user/mining-docker.md | 45 ++++++++++++++++++++++++++++++++++ 2 files changed, 46 insertions(+) create mode 100644 book/src/user/mining-docker.md diff --git a/book/src/SUMMARY.md b/book/src/SUMMARY.md index e0c32275940..aa49967efcc 100644 --- a/book/src/SUMMARY.md +++ b/book/src/SUMMARY.md @@ -15,6 +15,7 @@ - [zk-SNARK Parameters](user/parameters.md) - [Mining](user/mining.md) - [Testnet Mining with s-nomp](user/mining-testnet-s-nomp.md) + - [Mining with Zebra in Docker](user/mining-docker.md) - [Kibana blockchain explorer](user/elasticsearch.md) - [Troubleshooting](user/troubleshooting.md) - [Developer Documentation](dev.md) diff --git a/book/src/user/mining-docker.md b/book/src/user/mining-docker.md new file mode 100644 index 00000000000..e5d974317b8 --- /dev/null +++ b/book/src/user/mining-docker.md @@ -0,0 +1,45 @@ +# Mining with Zebra in Docker + +Some of our published [Docker images](https://hub.docker.com/r/zfnd/zebra/tags) +have the `.experimental` suffix in their name. We compile these images with the +`getblocktemplate-rpcs` feature, and you can use them for your mining +operations. For example, executing + +```bash +docker run -e MINER_ADDRESS="t1XhG6pT9xRqRQn3BHP7heUou1RuYrbcrCc" -p 8232:8232 zfnd/zebra:v1.1.0.experimental +``` + +will start a container on Mainnet and bind port 8232 on your Docker host. If you +want to start generating blocks, you need to let Zebra sync first. + +Note that you must pass the address for your mining rewards via the +`MINER_ADDRESS` environment variable when you are starting the container, as we +did in the example above. The address we used starts with the prefix `t1`, +meaning it is a Mainnet P2PKH address. Please remember to set your own address +for the rewards. + +The port we mapped between the container and the host with the `-p` flag in the +example above is Zebra's default Mainnet RPC port. If you want to use a +different one, you can specify it in the `RPC_PORT` environment variable, +similarly to `MINER_ADDRESS`, and then map it with the Docker's `-p` flag. + +Instead of listing the environment variables on the command line, you can use +Docker's `--env-file` flag to specify a file containing the variables. You +can find more info here +https://docs.docker.com/engine/reference/commandline/run/#env. + +## Mining on Testnet + +If you want to mine on Testnet, you need to set the `NETWORK` environment +variable to `Testnet` and use a Testnet address for the rewards. For example, +running + +```bash +docker run -e NETWORK="Testnet" -e MINER_ADDRESS="t27eWDgjFYJGVXmzrXeVjnb5J3uXDM9xH9v" -p 18232:18232 zfnd/zebra:v1.1.0.experimental +``` + +will start a container on Testnet and bind port 18232 on your Docker host, which +is the standard Testnet RPC port. Notice that we also used a different rewards +address. It starts with the prefix `t2`, indicating that it is a Testnet +address. A Mainnet address would prevent Zebra from starting on Testnet, and +conversely, a Testnet address would prevent Zebra from starting on Mainnet. From 195280a760b4c9975b475cd26a1ae6a46fe212b8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 30 Jul 2023 21:10:57 +0000 Subject: [PATCH 249/265] build(deps): bump tj-actions/changed-files from 37.4.0 to 37.5.0 (#7291) Bumps [tj-actions/changed-files](https://github.com/tj-actions/changed-files) from 37.4.0 to 37.5.0. - [Release notes](https://github.com/tj-actions/changed-files/releases) - [Changelog](https://github.com/tj-actions/changed-files/blob/main/HISTORY.md) - [Commits](https://github.com/tj-actions/changed-files/compare/v37.4.0...v37.5.0) --- updated-dependencies: - dependency-name: tj-actions/changed-files dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/lint.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 7a9db2dad0d..a61514060c5 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -37,7 +37,7 @@ jobs: - name: Rust files id: changed-files-rust - uses: tj-actions/changed-files@v37.4.0 + uses: tj-actions/changed-files@v37.5.0 with: files: | **/*.rs @@ -49,7 +49,7 @@ jobs: - name: Workflow files id: changed-files-workflows - uses: tj-actions/changed-files@v37.4.0 + uses: tj-actions/changed-files@v37.5.0 with: files: | .github/workflows/*.yml From 24fa1eece460f8800f4217a1e5308191b29ee6c7 Mon Sep 17 00:00:00 2001 From: Deirdre Connolly Date: Thu, 3 Aug 2023 11:21:02 -0400 Subject: [PATCH 250/265] fit(whitespace): Update CODEOWNERS (#7296) --- .github/CODEOWNERS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 4275337d52a..33478d31539 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -14,7 +14,7 @@ /zebrad/src/commands/start.rs @ZcashFoundation/general-rust-reviewers # Network and Async Code -/tower-batch-control/ @ZcashFoundation/network-reviewers +/tower-batch-control/ @ZcashFoundation/network-reviewers /tower-fallback/ @ZcashFoundation/network-reviewers /zebra-network/ @ZcashFoundation/network-reviewers /zebra-node-services/ @ZcashFoundation/network-reviewers From 2c2fa63a03f8ddb852e9b5c8aace2f91cf3ac597 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 4 Aug 2023 10:51:15 +0000 Subject: [PATCH 251/265] build(deps): bump tj-actions/changed-files from 37.5.0 to 37.5.1 (#7295) Bumps [tj-actions/changed-files](https://github.com/tj-actions/changed-files) from 37.5.0 to 37.5.1. - [Release notes](https://github.com/tj-actions/changed-files/releases) - [Changelog](https://github.com/tj-actions/changed-files/blob/main/HISTORY.md) - [Commits](https://github.com/tj-actions/changed-files/compare/v37.5.0...v37.5.1) --- updated-dependencies: - dependency-name: tj-actions/changed-files dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/lint.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index a61514060c5..5b0405145ff 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -37,7 +37,7 @@ jobs: - name: Rust files id: changed-files-rust - uses: tj-actions/changed-files@v37.5.0 + uses: tj-actions/changed-files@v37.5.1 with: files: | **/*.rs @@ -49,7 +49,7 @@ jobs: - name: Workflow files id: changed-files-workflows - uses: tj-actions/changed-files@v37.5.0 + uses: tj-actions/changed-files@v37.5.1 with: files: | .github/workflows/*.yml From bf6832f53a07b359cb5bc74459c59718916c523a Mon Sep 17 00:00:00 2001 From: Gustavo Valverde Date: Tue, 8 Aug 2023 11:24:48 +0100 Subject: [PATCH 252/265] fix: remove reference to old `zealous-zebra` project (#7303) * fix: remove reference to old `zealous-zebra` project * fix(build): use `edge` tag from our repositories --- book/src/user/docker.md | 2 +- docker/Dockerfile | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/book/src/user/docker.md b/book/src/user/docker.md index 525c6a12d27..0f99bd11287 100644 --- a/book/src/user/docker.md +++ b/book/src/user/docker.md @@ -30,4 +30,4 @@ See [Building Zebra](https://github.com/ZcashFoundation/zebra#building-zebra) fo The images built by the Zebra team are all publicly hosted. Old image versions meant to be used by our [CI pipeline](https://github.com/ZcashFoundation/zebra/blob/main/.github/workflows/continous-integration-docker.yml) (`zebrad-test`, `lighwalletd`) might be deleted on a scheduled basis. -We use [Docker Hub](https://hub.docker.com/r/zfnd/zebra) for end-user images and [Google Artifact Registry](https://console.cloud.google.com/artifacts/docker/zealous-zebra/us/zebra) to build external tools and test images +We use [Docker Hub](https://hub.docker.com/r/zfnd/zebra) for end-user images and [Google Artifact Registry](https://console.cloud.google.com/artifacts/docker/zfnd-dev-zebra/us/zebra) to build external tools and test images diff --git a/docker/Dockerfile b/docker/Dockerfile index 22a673ca327..46beb2bdc56 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -93,8 +93,8 @@ ENV CARGO_HOME="/opt/zebrad/.cargo/" FROM deps AS tests # TODO: do not hardcode the user /root/ even though is a safe assumption # Pre-download Zcash Sprout, Sapling parameters and Lightwalletd binary -COPY --from=us-docker.pkg.dev/zealous-zebra/zebra/zcash-params /root/.zcash-params /root/.zcash-params -COPY --from=us-docker.pkg.dev/zealous-zebra/zebra/lightwalletd /opt/lightwalletd /usr/local/bin +COPY --from=us-docker.pkg.dev/zfnd-dev-zebra/zebra/zcash-params:edge /root/.zcash-params /root/.zcash-params +COPY --from=us-docker.pkg.dev/zfnd-dev-zebra/zebra/lightwalletd:edge /opt/lightwalletd /usr/local/bin # cargo uses timestamps for its cache, so they need to be in this order: # unmodified source files < previous build cache < modified source files @@ -176,7 +176,7 @@ RUN chmod u+x /runtime-entrypoint.sh FROM debian:bullseye-slim AS runtime COPY --from=release /opt/zebrad/target/release/zebrad /usr/local/bin COPY --from=release /runtime-entrypoint.sh / -COPY --from=us-docker.pkg.dev/zealous-zebra/zebra/zcash-params /root/.zcash-params /root/.zcash-params +COPY --from=us-docker.pkg.dev/zfnd-dev-zebra/zebra/zcash-params:edge /root/.zcash-params /root/.zcash-params RUN apt-get update && \ apt-get install -y --no-install-recommends \ From cce81b35c9c42398a84ca88aba8bf737f805552e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 8 Aug 2023 21:55:21 +0000 Subject: [PATCH 253/265] build(deps): bump Swatinem/rust-cache from 2.5.1 to 2.6.0 (#7294) Bumps [Swatinem/rust-cache](https://github.com/swatinem/rust-cache) from 2.5.1 to 2.6.0. - [Release notes](https://github.com/swatinem/rust-cache/releases) - [Changelog](https://github.com/Swatinem/rust-cache/blob/master/CHANGELOG.md) - [Commits](https://github.com/swatinem/rust-cache/compare/v2.5.1...v2.6.0) --- updated-dependencies: - dependency-name: Swatinem/rust-cache dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/continous-integration-os.yml | 4 ++-- .github/workflows/docs.yml | 2 +- .github/workflows/lint.yml | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/continous-integration-os.yml b/.github/workflows/continous-integration-os.yml index 37a14cb8b0a..3447455131b 100644 --- a/.github/workflows/continous-integration-os.yml +++ b/.github/workflows/continous-integration-os.yml @@ -107,7 +107,7 @@ jobs: curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain=${{ matrix.rust }} --profile=minimal - - uses: Swatinem/rust-cache@v2.5.1 + - uses: Swatinem/rust-cache@v2.6.0 # TODO: change Rust cache target directory on Windows, # or remove this workaround once the build is more efficient (#3005). #with: @@ -242,7 +242,7 @@ jobs: run: | curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain=stable --profile=minimal - - uses: Swatinem/rust-cache@v2.5.1 + - uses: Swatinem/rust-cache@v2.6.0 with: shared-key: "clippy-cargo-lock" diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index 2c4f0e5ec9f..62340ce123e 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -56,7 +56,7 @@ jobs: run: | curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain=beta --profile=default - - uses: Swatinem/rust-cache@v2.5.1 + - uses: Swatinem/rust-cache@v2.6.0 - name: Setup mdBook uses: peaceiris/actions-mdbook@v1.2.0 diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 5b0405145ff..f07a0b001bc 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -86,7 +86,7 @@ jobs: run: | curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain=stable --profile=default - - uses: Swatinem/rust-cache@v2.5.1 + - uses: Swatinem/rust-cache@v2.6.0 with: shared-key: "clippy-cargo-lock" @@ -131,7 +131,7 @@ jobs: # We don't cache `fmt` outputs because the job is quick, # and we want to use the limited GitHub actions cache space for slower jobs. - #- uses: Swatinem/rust-cache@v2.5.1 + #- uses: Swatinem/rust-cache@v2.6.0 - run: | cargo fmt --all -- --check From 57c92491418f86638d6b78e1da91caa352287cb5 Mon Sep 17 00:00:00 2001 From: Marek Date: Wed, 9 Aug 2023 02:32:27 +0200 Subject: [PATCH 254/265] change(state): Insert only the first tree in each series of identical trees into finalized state (#7266) * Pass ZebraDB to batch preparation * Dedup the insertion of Sapling trees into database * Dedup the insertion of Orchard trees into database * Update snapshots * Rename batch preparation of trees * Simplify the naming of note commitment trees * Correctly retrieve Sapling trees from fin state * Correctly retrieve Orchard trees from fin state * Simplify the naming of methods for Sprout trees * Simplify the naming of methods for Sapling trees * Simplify the naming of methods for Orchard trees * Reduce disk reads by caching trees. (#7276) * Bump the state minor version * Reset the state patch version * Simplify the preparation of genesis trees * Store the roots of the trees of the genesis block * Add the genesis roots to snapshots * fix(test): Don't include shielded data in genesis blocks (#7302) * fix(state): Fix marking format upgrades (#7304) --------- Co-authored-by: Arya --- zebra-chain/src/transaction/arbitrary.rs | 42 ++++-- zebra-state/src/constants.rs | 4 +- zebra-state/src/service/check/anchors.rs | 2 +- .../src/service/check/tests/nullifier.rs | 12 +- zebra-state/src/service/check/tests/utxo.rs | 9 +- zebra-state/src/service/finalized_state.rs | 28 ++-- .../disk_format/tests/snapshot.rs | 2 +- ...te_commitment_tree_raw_data@mainnet_0.snap | 2 +- ...te_commitment_tree_raw_data@mainnet_1.snap | 4 - ...te_commitment_tree_raw_data@mainnet_2.snap | 8 -- ...te_commitment_tree_raw_data@testnet_0.snap | 2 +- ...te_commitment_tree_raw_data@testnet_1.snap | 4 - ...te_commitment_tree_raw_data@testnet_2.snap | 8 -- ...te_commitment_tree_raw_data@mainnet_0.snap | 2 +- ...te_commitment_tree_raw_data@mainnet_1.snap | 4 - ...te_commitment_tree_raw_data@mainnet_2.snap | 8 -- ...te_commitment_tree_raw_data@testnet_0.snap | 2 +- ...te_commitment_tree_raw_data@testnet_1.snap | 4 - ...te_commitment_tree_raw_data@testnet_2.snap | 8 -- ...te_commitment_tree_raw_data@mainnet_0.snap | 2 +- ...te_commitment_tree_raw_data@testnet_0.snap | 2 +- .../finalized_state/disk_format/upgrade.rs | 22 ++-- .../src/service/finalized_state/tests/prop.rs | 11 +- .../service/finalized_state/zebra_db/block.rs | 108 +++++++++++----- .../zebra_db/block/tests/snapshot.rs | 18 +-- .../snapshots/orchard_trees@mainnet_1.snap | 4 +- .../snapshots/orchard_trees@mainnet_2.snap | 4 +- .../snapshots/orchard_trees@testnet_1.snap | 4 +- .../snapshots/orchard_trees@testnet_2.snap | 4 +- .../snapshots/sapling_trees@mainnet_1.snap | 4 +- .../snapshots/sapling_trees@mainnet_2.snap | 4 +- .../snapshots/sapling_trees@testnet_1.snap | 4 +- .../snapshots/sapling_trees@testnet_2.snap | 4 +- .../finalized_state/zebra_db/shielded.rs | 122 +++++++----------- .../src/service/non_finalized_state.rs | 6 +- zebra-state/src/service/read/tree.rs | 4 +- zebra-state/src/service/write.rs | 14 +- zebra-state/src/tests/setup.rs | 2 +- 38 files changed, 259 insertions(+), 239 deletions(-) diff --git a/zebra-chain/src/transaction/arbitrary.rs b/zebra-chain/src/transaction/arbitrary.rs index 43581e07a0e..704a0d23cd1 100644 --- a/zebra-chain/src/transaction/arbitrary.rs +++ b/zebra-chain/src/transaction/arbitrary.rs @@ -109,20 +109,32 @@ impl Transaction { option::of(any::>()), ) .prop_map( - |( - inputs, - outputs, - lock_time, - expiry_height, - joinsplit_data, - sapling_shielded_data, - )| Transaction::V4 { + move |( inputs, outputs, lock_time, expiry_height, joinsplit_data, sapling_shielded_data, + )| { + Transaction::V4 { + inputs, + outputs, + lock_time, + expiry_height, + joinsplit_data: if ledger_state.height.is_min() { + // The genesis block should not contain any joinsplits. + None + } else { + joinsplit_data + }, + sapling_shielded_data: if ledger_state.height.is_min() { + // The genesis block should not contain any shielded data. + None + } else { + sapling_shielded_data + }, + } }, ) .boxed() @@ -159,8 +171,18 @@ impl Transaction { expiry_height, inputs, outputs, - sapling_shielded_data, - orchard_shielded_data, + sapling_shielded_data: if ledger_state.height.is_min() { + // The genesis block should not contain any shielded data. + None + } else { + sapling_shielded_data + }, + orchard_shielded_data: if ledger_state.height.is_min() { + // The genesis block should not contain any shielded data. + None + } else { + orchard_shielded_data + }, } }, ) diff --git a/zebra-state/src/constants.rs b/zebra-state/src/constants.rs index bd60f3d6198..b5060d10492 100644 --- a/zebra-state/src/constants.rs +++ b/zebra-state/src/constants.rs @@ -48,11 +48,11 @@ pub(crate) const DATABASE_FORMAT_VERSION: u64 = 25; /// - adding new column families, /// - changing the format of a column family in a compatible way, or /// - breaking changes with compatibility code in all supported Zebra versions. -pub(crate) const DATABASE_FORMAT_MINOR_VERSION: u64 = 0; +pub(crate) const DATABASE_FORMAT_MINOR_VERSION: u64 = 1; /// The database format patch version, incremented each time the on-disk database format has a /// significant format compatibility fix. -pub(crate) const DATABASE_FORMAT_PATCH_VERSION: u64 = 2; +pub(crate) const DATABASE_FORMAT_PATCH_VERSION: u64 = 0; /// The name of the file containing the minor and patch database versions. /// diff --git a/zebra-state/src/service/check/anchors.rs b/zebra-state/src/service/check/anchors.rs index 471f39174bc..5f6ee293e34 100644 --- a/zebra-state/src/service/check/anchors.rs +++ b/zebra-state/src/service/check/anchors.rs @@ -152,7 +152,7 @@ fn fetch_sprout_final_treestates( let input_tree = parent_chain .and_then(|chain| chain.sprout_trees_by_anchor.get(&joinsplit.anchor).cloned()) - .or_else(|| finalized_state.sprout_note_commitment_tree_by_anchor(&joinsplit.anchor)); + .or_else(|| finalized_state.sprout_tree_by_anchor(&joinsplit.anchor)); if let Some(input_tree) = input_tree { sprout_final_treestates.insert(joinsplit.anchor, input_tree); diff --git a/zebra-state/src/service/check/tests/nullifier.rs b/zebra-state/src/service/check/tests/nullifier.rs index e522ec479a3..1a944d017ee 100644 --- a/zebra-state/src/service/check/tests/nullifier.rs +++ b/zebra-state/src/service/check/tests/nullifier.rs @@ -85,7 +85,7 @@ proptest! { // randomly choose to commit the block to the finalized or non-finalized state if use_finalized_state { let block1 = CheckpointVerifiedBlock::from(Arc::new(block1)); - let commit_result = finalized_state.commit_finalized_direct(block1.clone().into(), "test"); + let commit_result = finalized_state.commit_finalized_direct(block1.clone().into(), None, "test"); // the block was committed prop_assert_eq!(Some((Height(1), block1.hash)), read::best_tip(&non_finalized_state, &finalized_state.db)); @@ -352,7 +352,7 @@ proptest! { // randomly choose to commit the next block to the finalized or non-finalized state if duplicate_in_finalized_state { let block1 = CheckpointVerifiedBlock::from(Arc::new(block1)); - let commit_result = finalized_state.commit_finalized_direct(block1.clone().into(), "test"); + let commit_result = finalized_state.commit_finalized_direct(block1.clone().into(), None, "test"); prop_assert_eq!(Some((Height(1), block1.hash)), read::best_tip(&non_finalized_state, &finalized_state.db)); prop_assert!(commit_result.is_ok()); @@ -452,7 +452,7 @@ proptest! { // randomly choose to commit the block to the finalized or non-finalized state if use_finalized_state { let block1 = CheckpointVerifiedBlock::from(Arc::new(block1)); - let commit_result = finalized_state.commit_finalized_direct(block1.clone().into(), "test"); + let commit_result = finalized_state.commit_finalized_direct(block1.clone().into(),None, "test"); prop_assert_eq!(Some((Height(1), block1.hash)), read::best_tip(&non_finalized_state, &finalized_state.db)); prop_assert!(commit_result.is_ok()); @@ -634,7 +634,7 @@ proptest! { // randomly choose to commit the next block to the finalized or non-finalized state if duplicate_in_finalized_state { let block1 = CheckpointVerifiedBlock::from(Arc::new(block1)); - let commit_result = finalized_state.commit_finalized_direct(block1.clone().into(), "test"); + let commit_result = finalized_state.commit_finalized_direct(block1.clone().into(),None, "test"); prop_assert_eq!(Some((Height(1), block1.hash)), read::best_tip(&non_finalized_state, &finalized_state.db)); prop_assert!(commit_result.is_ok()); @@ -732,7 +732,7 @@ proptest! { // randomly choose to commit the block to the finalized or non-finalized state if use_finalized_state { let block1 = CheckpointVerifiedBlock::from(Arc::new(block1)); - let commit_result = finalized_state.commit_finalized_direct(block1.clone().into(), "test"); + let commit_result = finalized_state.commit_finalized_direct(block1.clone().into(), None, "test"); prop_assert_eq!(Some((Height(1), block1.hash)), read::best_tip(&non_finalized_state, &finalized_state.db)); prop_assert!(commit_result.is_ok()); @@ -923,7 +923,7 @@ proptest! { // randomly choose to commit the next block to the finalized or non-finalized state if duplicate_in_finalized_state { let block1 = CheckpointVerifiedBlock::from(Arc::new(block1)); - let commit_result = finalized_state.commit_finalized_direct(block1.clone().into(), "test"); + let commit_result = finalized_state.commit_finalized_direct(block1.clone().into(), None, "test"); prop_assert_eq!(Some((Height(1), block1.hash)), read::best_tip(&non_finalized_state, &finalized_state.db)); prop_assert!(commit_result.is_ok()); diff --git a/zebra-state/src/service/check/tests/utxo.rs b/zebra-state/src/service/check/tests/utxo.rs index 4f7e1e13bc1..acdc2d399a7 100644 --- a/zebra-state/src/service/check/tests/utxo.rs +++ b/zebra-state/src/service/check/tests/utxo.rs @@ -185,7 +185,7 @@ proptest! { // randomly choose to commit the block to the finalized or non-finalized state if use_finalized_state { let block1 = CheckpointVerifiedBlock::from(Arc::new(block1)); - let commit_result = finalized_state.commit_finalized_direct(block1.clone().into(), "test"); + let commit_result = finalized_state.commit_finalized_direct(block1.clone().into(), None, "test"); // the block was committed prop_assert_eq!(Some((Height(1), block1.hash)), read::best_tip(&non_finalized_state, &finalized_state.db)); @@ -273,7 +273,7 @@ proptest! { if use_finalized_state_spend { let block2 = CheckpointVerifiedBlock::from(Arc::new(block2)); - let commit_result = finalized_state.commit_finalized_direct(block2.clone().into(), "test"); + let commit_result = finalized_state.commit_finalized_direct(block2.clone().into(),None, "test"); // the block was committed prop_assert_eq!(Some((Height(2), block2.hash)), read::best_tip(&non_finalized_state, &finalized_state.db)); @@ -612,7 +612,7 @@ proptest! { if use_finalized_state_spend { let block2 = CheckpointVerifiedBlock::from(block2.clone()); - let commit_result = finalized_state.commit_finalized_direct(block2.clone().into(), "test"); + let commit_result = finalized_state.commit_finalized_direct(block2.clone().into(), None, "test"); // the block was committed prop_assert_eq!(Some((Height(2), block2.hash)), read::best_tip(&non_finalized_state, &finalized_state.db)); @@ -884,7 +884,8 @@ fn new_state_with_mainnet_transparent_data( if use_finalized_state { let block1 = CheckpointVerifiedBlock::from(block1.clone()); - let commit_result = finalized_state.commit_finalized_direct(block1.clone().into(), "test"); + let commit_result = + finalized_state.commit_finalized_direct(block1.clone().into(), None, "test"); // the block was committed assert_eq!( diff --git a/zebra-state/src/service/finalized_state.rs b/zebra-state/src/service/finalized_state.rs index 1bd53054f69..702c9e575aa 100644 --- a/zebra-state/src/service/finalized_state.rs +++ b/zebra-state/src/service/finalized_state.rs @@ -20,7 +20,7 @@ use std::{ sync::Arc, }; -use zebra_chain::{block, parameters::Network}; +use zebra_chain::{block, parallel::tree::NoteCommitmentTrees, parameters::Network}; use crate::{ request::{FinalizableBlock, SemanticallyVerifiedBlockWithTrees, Treestate}, @@ -168,10 +168,12 @@ impl FinalizedState { pub fn commit_finalized( &mut self, ordered_block: QueuedCheckpointVerified, - ) -> Result { + prev_note_commitment_trees: Option, + ) -> Result<(CheckpointVerifiedBlock, NoteCommitmentTrees), BoxError> { let (checkpoint_verified, rsp_tx) = ordered_block; let result = self.commit_finalized_direct( checkpoint_verified.clone().into(), + prev_note_commitment_trees, "commit checkpoint-verified request", ); @@ -202,10 +204,10 @@ impl FinalizedState { // and the block write task. let result = result.map_err(CloneError::from); - let _ = rsp_tx.send(result.clone().map_err(BoxError::from)); + let _ = rsp_tx.send(result.clone().map(|(hash, _)| hash).map_err(BoxError::from)); result - .map(|_hash| checkpoint_verified) + .map(|(_hash, note_commitment_trees)| (checkpoint_verified, note_commitment_trees)) .map_err(BoxError::from) } @@ -226,9 +228,10 @@ impl FinalizedState { pub fn commit_finalized_direct( &mut self, finalizable_block: FinalizableBlock, + prev_note_commitment_trees: Option, source: &str, - ) -> Result { - let (height, hash, finalized) = match finalizable_block { + ) -> Result<(block::Hash, NoteCommitmentTrees), BoxError> { + let (height, hash, finalized, prev_note_commitment_trees) = match finalizable_block { FinalizableBlock::Checkpoint { checkpoint_verified, } => { @@ -240,9 +243,11 @@ impl FinalizedState { let block = checkpoint_verified.block.clone(); let mut history_tree = self.db.history_tree(); - let mut note_commitment_trees = self.db.note_commitment_trees(); + let prev_note_commitment_trees = + prev_note_commitment_trees.unwrap_or_else(|| self.db.note_commitment_trees()); // Update the note commitment trees. + let mut note_commitment_trees = prev_note_commitment_trees.clone(); note_commitment_trees.update_trees_parallel(&block)?; // Check the block commitment if the history tree was not @@ -287,6 +292,7 @@ impl FinalizedState { history_tree, }, }, + Some(prev_note_commitment_trees), ) } FinalizableBlock::Contextual { @@ -299,6 +305,7 @@ impl FinalizedState { verified: contextually_verified.into(), treestate, }, + prev_note_commitment_trees, ), }; @@ -331,8 +338,11 @@ impl FinalizedState { #[cfg(feature = "elasticsearch")] let finalized_block = finalized.verified.block.clone(); + let note_commitment_trees = finalized.treestate.note_commitment_trees.clone(); - let result = self.db.write_block(finalized, self.network, source); + let result = + self.db + .write_block(finalized, prev_note_commitment_trees, self.network, source); if result.is_ok() { // Save blocks to elasticsearch if the feature is enabled. @@ -360,7 +370,7 @@ impl FinalizedState { } } - result + result.map(|hash| (hash, note_commitment_trees)) } #[cfg(feature = "elasticsearch")] diff --git a/zebra-state/src/service/finalized_state/disk_format/tests/snapshot.rs b/zebra-state/src/service/finalized_state/disk_format/tests/snapshot.rs index 3c5c9938e15..67b4f2ebb68 100644 --- a/zebra-state/src/service/finalized_state/disk_format/tests/snapshot.rs +++ b/zebra-state/src/service/finalized_state/disk_format/tests/snapshot.rs @@ -103,7 +103,7 @@ fn test_raw_rocksdb_column_families_with_network(network: Network) { .expect("test data deserializes"); state - .commit_finalized_direct(block.into(), "snapshot tests") + .commit_finalized_direct(block.into(), None, "snapshot tests") .expect("test block is valid"); let mut settings = insta::Settings::clone_current(); diff --git a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/orchard_note_commitment_tree_raw_data@mainnet_0.snap b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/orchard_note_commitment_tree_raw_data@mainnet_0.snap index bdf69ca735d..49244e75105 100644 --- a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/orchard_note_commitment_tree_raw_data@mainnet_0.snap +++ b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/orchard_note_commitment_tree_raw_data@mainnet_0.snap @@ -5,6 +5,6 @@ expression: cf_data [ KV( k: "000000", - v: "0000", + v: "0001ae2935f1dfd8a24aed7c70df7de3a668eb7a49b1319880dde2bbd9031ae5d82f", ), ] diff --git a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/orchard_note_commitment_tree_raw_data@mainnet_1.snap b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/orchard_note_commitment_tree_raw_data@mainnet_1.snap index 91cde822ee9..49244e75105 100644 --- a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/orchard_note_commitment_tree_raw_data@mainnet_1.snap +++ b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/orchard_note_commitment_tree_raw_data@mainnet_1.snap @@ -5,10 +5,6 @@ expression: cf_data [ KV( k: "000000", - v: "0000", - ), - KV( - k: "000001", v: "0001ae2935f1dfd8a24aed7c70df7de3a668eb7a49b1319880dde2bbd9031ae5d82f", ), ] diff --git a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/orchard_note_commitment_tree_raw_data@mainnet_2.snap b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/orchard_note_commitment_tree_raw_data@mainnet_2.snap index 04ee9844634..49244e75105 100644 --- a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/orchard_note_commitment_tree_raw_data@mainnet_2.snap +++ b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/orchard_note_commitment_tree_raw_data@mainnet_2.snap @@ -5,14 +5,6 @@ expression: cf_data [ KV( k: "000000", - v: "0000", - ), - KV( - k: "000001", - v: "0001ae2935f1dfd8a24aed7c70df7de3a668eb7a49b1319880dde2bbd9031ae5d82f", - ), - KV( - k: "000002", v: "0001ae2935f1dfd8a24aed7c70df7de3a668eb7a49b1319880dde2bbd9031ae5d82f", ), ] diff --git a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/orchard_note_commitment_tree_raw_data@testnet_0.snap b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/orchard_note_commitment_tree_raw_data@testnet_0.snap index bdf69ca735d..49244e75105 100644 --- a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/orchard_note_commitment_tree_raw_data@testnet_0.snap +++ b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/orchard_note_commitment_tree_raw_data@testnet_0.snap @@ -5,6 +5,6 @@ expression: cf_data [ KV( k: "000000", - v: "0000", + v: "0001ae2935f1dfd8a24aed7c70df7de3a668eb7a49b1319880dde2bbd9031ae5d82f", ), ] diff --git a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/orchard_note_commitment_tree_raw_data@testnet_1.snap b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/orchard_note_commitment_tree_raw_data@testnet_1.snap index 91cde822ee9..49244e75105 100644 --- a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/orchard_note_commitment_tree_raw_data@testnet_1.snap +++ b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/orchard_note_commitment_tree_raw_data@testnet_1.snap @@ -5,10 +5,6 @@ expression: cf_data [ KV( k: "000000", - v: "0000", - ), - KV( - k: "000001", v: "0001ae2935f1dfd8a24aed7c70df7de3a668eb7a49b1319880dde2bbd9031ae5d82f", ), ] diff --git a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/orchard_note_commitment_tree_raw_data@testnet_2.snap b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/orchard_note_commitment_tree_raw_data@testnet_2.snap index 04ee9844634..49244e75105 100644 --- a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/orchard_note_commitment_tree_raw_data@testnet_2.snap +++ b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/orchard_note_commitment_tree_raw_data@testnet_2.snap @@ -5,14 +5,6 @@ expression: cf_data [ KV( k: "000000", - v: "0000", - ), - KV( - k: "000001", - v: "0001ae2935f1dfd8a24aed7c70df7de3a668eb7a49b1319880dde2bbd9031ae5d82f", - ), - KV( - k: "000002", v: "0001ae2935f1dfd8a24aed7c70df7de3a668eb7a49b1319880dde2bbd9031ae5d82f", ), ] diff --git a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/sapling_note_commitment_tree_raw_data@mainnet_0.snap b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/sapling_note_commitment_tree_raw_data@mainnet_0.snap index bdf69ca735d..e493c279c38 100644 --- a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/sapling_note_commitment_tree_raw_data@mainnet_0.snap +++ b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/sapling_note_commitment_tree_raw_data@mainnet_0.snap @@ -5,6 +5,6 @@ expression: cf_data [ KV( k: "000000", - v: "0000", + v: "0001fbc2f4300c01f0b7820d00e3347c8da4ee614674376cbc45359daa54f9b5493e", ), ] diff --git a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/sapling_note_commitment_tree_raw_data@mainnet_1.snap b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/sapling_note_commitment_tree_raw_data@mainnet_1.snap index e4c3af6f7eb..e493c279c38 100644 --- a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/sapling_note_commitment_tree_raw_data@mainnet_1.snap +++ b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/sapling_note_commitment_tree_raw_data@mainnet_1.snap @@ -5,10 +5,6 @@ expression: cf_data [ KV( k: "000000", - v: "0000", - ), - KV( - k: "000001", v: "0001fbc2f4300c01f0b7820d00e3347c8da4ee614674376cbc45359daa54f9b5493e", ), ] diff --git a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/sapling_note_commitment_tree_raw_data@mainnet_2.snap b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/sapling_note_commitment_tree_raw_data@mainnet_2.snap index 03feeb64625..e493c279c38 100644 --- a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/sapling_note_commitment_tree_raw_data@mainnet_2.snap +++ b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/sapling_note_commitment_tree_raw_data@mainnet_2.snap @@ -5,14 +5,6 @@ expression: cf_data [ KV( k: "000000", - v: "0000", - ), - KV( - k: "000001", - v: "0001fbc2f4300c01f0b7820d00e3347c8da4ee614674376cbc45359daa54f9b5493e", - ), - KV( - k: "000002", v: "0001fbc2f4300c01f0b7820d00e3347c8da4ee614674376cbc45359daa54f9b5493e", ), ] diff --git a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/sapling_note_commitment_tree_raw_data@testnet_0.snap b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/sapling_note_commitment_tree_raw_data@testnet_0.snap index bdf69ca735d..e493c279c38 100644 --- a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/sapling_note_commitment_tree_raw_data@testnet_0.snap +++ b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/sapling_note_commitment_tree_raw_data@testnet_0.snap @@ -5,6 +5,6 @@ expression: cf_data [ KV( k: "000000", - v: "0000", + v: "0001fbc2f4300c01f0b7820d00e3347c8da4ee614674376cbc45359daa54f9b5493e", ), ] diff --git a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/sapling_note_commitment_tree_raw_data@testnet_1.snap b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/sapling_note_commitment_tree_raw_data@testnet_1.snap index e4c3af6f7eb..e493c279c38 100644 --- a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/sapling_note_commitment_tree_raw_data@testnet_1.snap +++ b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/sapling_note_commitment_tree_raw_data@testnet_1.snap @@ -5,10 +5,6 @@ expression: cf_data [ KV( k: "000000", - v: "0000", - ), - KV( - k: "000001", v: "0001fbc2f4300c01f0b7820d00e3347c8da4ee614674376cbc45359daa54f9b5493e", ), ] diff --git a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/sapling_note_commitment_tree_raw_data@testnet_2.snap b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/sapling_note_commitment_tree_raw_data@testnet_2.snap index 03feeb64625..e493c279c38 100644 --- a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/sapling_note_commitment_tree_raw_data@testnet_2.snap +++ b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/sapling_note_commitment_tree_raw_data@testnet_2.snap @@ -5,14 +5,6 @@ expression: cf_data [ KV( k: "000000", - v: "0000", - ), - KV( - k: "000001", - v: "0001fbc2f4300c01f0b7820d00e3347c8da4ee614674376cbc45359daa54f9b5493e", - ), - KV( - k: "000002", v: "0001fbc2f4300c01f0b7820d00e3347c8da4ee614674376cbc45359daa54f9b5493e", ), ] diff --git a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/sprout_note_commitment_tree_raw_data@mainnet_0.snap b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/sprout_note_commitment_tree_raw_data@mainnet_0.snap index bdf69ca735d..6d9892d5d65 100644 --- a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/sprout_note_commitment_tree_raw_data@mainnet_0.snap +++ b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/sprout_note_commitment_tree_raw_data@mainnet_0.snap @@ -5,6 +5,6 @@ expression: cf_data [ KV( k: "000000", - v: "0000", + v: "0001d7c612c817793191a1e68652121876d6b3bde40f4fa52bc314145ce6e5cdd259", ), ] diff --git a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/sprout_note_commitment_tree_raw_data@testnet_0.snap b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/sprout_note_commitment_tree_raw_data@testnet_0.snap index bdf69ca735d..6d9892d5d65 100644 --- a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/sprout_note_commitment_tree_raw_data@testnet_0.snap +++ b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/sprout_note_commitment_tree_raw_data@testnet_0.snap @@ -5,6 +5,6 @@ expression: cf_data [ KV( k: "000000", - v: "0000", + v: "0001d7c612c817793191a1e68652121876d6b3bde40f4fa52bc314145ce6e5cdd259", ), ] diff --git a/zebra-state/src/service/finalized_state/disk_format/upgrade.rs b/zebra-state/src/service/finalized_state/disk_format/upgrade.rs index 9f61855c12c..cbd8f3b017f 100644 --- a/zebra-state/src/service/finalized_state/disk_format/upgrade.rs +++ b/zebra-state/src/service/finalized_state/disk_format/upgrade.rs @@ -287,18 +287,18 @@ impl DbFormatChange { upgrade_height = (upgrade_height + 1).expect("task exits before maximum height"); } - } - // At the end of each format upgrade, the database is marked as upgraded to that version. - // Upgrades can be run more than once if Zebra is restarted, so this is just a performance - // optimisation. - info!( - ?initial_tip_height, - ?newer_running_version, - ?older_disk_version, - "marking database as upgraded" - ); - Self::mark_as_upgraded_to(&database_format_add_format_change_task, &config, network); + // At the end of each format upgrade, the database is marked as upgraded to that version. + // Upgrades can be run more than once if Zebra is restarted, so this is just a performance + // optimisation. + info!( + ?initial_tip_height, + ?newer_running_version, + ?older_disk_version, + "marking database as upgraded" + ); + Self::mark_as_upgraded_to(&database_format_add_format_change_task, &config, network); + } // End of example format change. diff --git a/zebra-state/src/service/finalized_state/tests/prop.rs b/zebra-state/src/service/finalized_state/tests/prop.rs index e849f42fe35..69fa9f40c85 100644 --- a/zebra-state/src/service/finalized_state/tests/prop.rs +++ b/zebra-state/src/service/finalized_state/tests/prop.rs @@ -29,12 +29,13 @@ fn blocks_with_v5_transactions() -> Result<()> { // use `count` to minimize test failures, so they are easier to diagnose for block in chain.iter().take(count) { let checkpoint_verified = CheckpointVerifiedBlock::from(block.block.clone()); - let hash = state.commit_finalized_direct( + let (hash, _) = state.commit_finalized_direct( checkpoint_verified.into(), + None, "blocks_with_v5_transactions test" - ); + ).unwrap(); prop_assert_eq!(Some(height), state.finalized_tip_height()); - prop_assert_eq!(hash.unwrap(), block.hash); + prop_assert_eq!(hash, block.hash); height = Height(height.0 + 1); } }); @@ -86,6 +87,7 @@ fn all_upgrades_and_wrong_commitments_with_fake_activation_heights() -> Result<( let checkpoint_verified = CheckpointVerifiedBlock::from(block); state.commit_finalized_direct( checkpoint_verified.into(), + None, "all_upgrades test" ).expect_err("Must fail commitment check"); failure_count += 1; @@ -93,8 +95,9 @@ fn all_upgrades_and_wrong_commitments_with_fake_activation_heights() -> Result<( _ => {}, } let checkpoint_verified = CheckpointVerifiedBlock::from(block.block.clone()); - let hash = state.commit_finalized_direct( + let (hash, _) = state.commit_finalized_direct( checkpoint_verified.into(), + None, "all_upgrades test" ).unwrap(); prop_assert_eq!(Some(height), state.finalized_tip_height()); diff --git a/zebra-state/src/service/finalized_state/zebra_db/block.rs b/zebra-state/src/service/finalized_state/zebra_db/block.rs index e540a0dbbd3..78cda842bd4 100644 --- a/zebra-state/src/service/finalized_state/zebra_db/block.rs +++ b/zebra-state/src/service/finalized_state/zebra_db/block.rs @@ -20,9 +20,11 @@ use zebra_chain::{ amount::NonNegative, block::{self, Block, Height}, orchard, + parallel::tree::NoteCommitmentTrees, parameters::{Network, GENESIS_PREVIOUS_BLOCK_HASH}, sapling, serialization::TrustedPreallocate, + sprout, transaction::{self, Transaction}, transparent, value_balance::ValueBalance, @@ -147,34 +149,28 @@ impl ZebraDb { })) } - /// Returns the Sapling - /// [`NoteCommitmentTree`](sapling::tree::NoteCommitmentTree) specified by a - /// hash or height, if it exists in the finalized `db`. + /// Returns the Sapling [`note commitment tree`](sapling::tree::NoteCommitmentTree) specified by + /// a hash or height, if it exists in the finalized state. #[allow(clippy::unwrap_in_result)] - pub fn sapling_tree( + pub fn sapling_tree_by_hash_or_height( &self, hash_or_height: HashOrHeight, ) -> Option> { let height = hash_or_height.height_or_else(|hash| self.height(hash))?; - let sapling_tree_handle = self.db.cf_handle("sapling_note_commitment_tree").unwrap(); - - self.db.zs_get(&sapling_tree_handle, &height) + self.sapling_tree_by_height(&height) } - /// Returns the Orchard - /// [`NoteCommitmentTree`](orchard::tree::NoteCommitmentTree) specified by a - /// hash or height, if it exists in the finalized `db`. + /// Returns the Orchard [`note commitment tree`](orchard::tree::NoteCommitmentTree) specified by + /// a hash or height, if it exists in the finalized state. #[allow(clippy::unwrap_in_result)] - pub fn orchard_tree( + pub fn orchard_tree_by_hash_or_height( &self, hash_or_height: HashOrHeight, ) -> Option> { let height = hash_or_height.height_or_else(|hash| self.height(hash))?; - let orchard_tree_handle = self.db.cf_handle("orchard_note_commitment_tree").unwrap(); - - self.db.zs_get(&orchard_tree_handle, &height) + self.orchard_tree_by_height(&height) } // Read tip block methods @@ -281,6 +277,7 @@ impl ZebraDb { pub(in super::super) fn write_block( &mut self, finalized: SemanticallyVerifiedBlockWithTrees, + prev_note_commitment_trees: Option, network: Network, source: &str, ) -> Result { @@ -375,13 +372,14 @@ impl ZebraDb { // In case of errors, propagate and do not write the batch. batch.prepare_block_batch( - &self.db, + self, &finalized, new_outputs_by_out_loc, spent_utxos_by_outpoint, spent_utxos_by_out_loc, address_balances, self.finalized_value_pool(), + prev_note_commitment_trees, )?; self.db.write(batch)?; @@ -426,14 +424,16 @@ impl DiskWriteBatch { #[allow(clippy::too_many_arguments)] pub fn prepare_block_batch( &mut self, - db: &DiskDb, + zebra_db: &ZebraDb, finalized: &SemanticallyVerifiedBlockWithTrees, new_outputs_by_out_loc: BTreeMap, spent_utxos_by_outpoint: HashMap, spent_utxos_by_out_loc: BTreeMap, address_balances: HashMap, value_pool: ValueBalance, + prev_note_commitment_trees: Option, ) -> Result<(), BoxError> { + let db = &zebra_db.db; // Commit block and transaction data. // (Transaction indexes, note commitments, and UTXOs are committed later.) self.prepare_block_header_and_transaction_data_batch(db, &finalized.verified)?; @@ -447,7 +447,7 @@ impl DiskWriteBatch { // // By returning early, Zebra commits the genesis block and transaction data, // but it ignores the genesis UTXO and value pool updates. - if self.prepare_genesis_batch(db, &finalized.verified) { + if self.prepare_genesis_batch(db, finalized) { return Ok(()); } @@ -462,7 +462,7 @@ impl DiskWriteBatch { )?; self.prepare_shielded_transaction_batch(db, &finalized.verified)?; - self.prepare_note_commitment_batch(db, finalized)?; + self.prepare_trees_batch(zebra_db, finalized, prev_note_commitment_trees)?; // Commit UTXOs and value pools self.prepare_chain_value_pools_batch( @@ -538,29 +538,71 @@ impl DiskWriteBatch { Ok(()) } - /// If `finalized.block` is a genesis block, - /// prepare a database batch that finishes initializing the database, - /// and return `true` (without actually writing anything). + /// If `finalized.block` is a genesis block, prepares a database batch that finishes + /// initializing the database, and returns `true` without actually writing anything. /// - /// Since the genesis block's transactions are skipped, - /// the returned genesis batch should be written to the database immediately. + /// Since the genesis block's transactions are skipped, the returned genesis batch should be + /// written to the database immediately. /// /// If `finalized.block` is not a genesis block, does nothing. /// - /// This method never returns an error. + /// # Panics + /// + /// If `finalized.block` is a genesis block, and a note commitment tree in `finalized` doesn't + /// match its corresponding empty tree. pub fn prepare_genesis_batch( &mut self, db: &DiskDb, - finalized: &SemanticallyVerifiedBlock, + finalized: &SemanticallyVerifiedBlockWithTrees, ) -> bool { - let SemanticallyVerifiedBlock { block, .. } = finalized; - - if block.header.previous_block_hash == GENESIS_PREVIOUS_BLOCK_HASH { - self.prepare_genesis_note_commitment_tree_batch(db, finalized); - - return true; + if finalized.verified.block.header.previous_block_hash == GENESIS_PREVIOUS_BLOCK_HASH { + assert_eq!( + *finalized.treestate.note_commitment_trees.sprout, + sprout::tree::NoteCommitmentTree::default(), + "The Sprout tree in the finalized block must match the empty Sprout tree." + ); + assert_eq!( + *finalized.treestate.note_commitment_trees.sapling, + sapling::tree::NoteCommitmentTree::default(), + "The Sapling tree in the finalized block must match the empty Sapling tree." + ); + assert_eq!( + *finalized.treestate.note_commitment_trees.orchard, + orchard::tree::NoteCommitmentTree::default(), + "The Orchard tree in the finalized block must match the empty Orchard tree." + ); + + // We want to store the trees of the genesis block together with their roots, and since + // the trees cache the roots after their computation, we trigger the computation. + // + // At the time of writing this comment, the roots are precomputed before this function + // is called, so the roots should already be cached. + finalized.treestate.note_commitment_trees.sprout.root(); + finalized.treestate.note_commitment_trees.sapling.root(); + finalized.treestate.note_commitment_trees.orchard.root(); + + // Insert the empty note commitment trees. Note that these can't be used too early + // (e.g. the Orchard tree before Nu5 activates) since the block validation will make + // sure only appropriate transactions are allowed in a block. + self.zs_insert( + &db.cf_handle("sprout_note_commitment_tree").unwrap(), + finalized.verified.height, + finalized.treestate.note_commitment_trees.sprout.clone(), + ); + self.zs_insert( + &db.cf_handle("sapling_note_commitment_tree").unwrap(), + finalized.verified.height, + finalized.treestate.note_commitment_trees.sapling.clone(), + ); + self.zs_insert( + &db.cf_handle("orchard_note_commitment_tree").unwrap(), + finalized.verified.height, + finalized.treestate.note_commitment_trees.orchard.clone(), + ); + + true + } else { + false } - - false } } diff --git a/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshot.rs b/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshot.rs index 9c19f29ee61..2754cd69c3a 100644 --- a/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshot.rs +++ b/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshot.rs @@ -197,7 +197,7 @@ fn test_block_and_transaction_data_with_network(network: Network) { .expect("test data deserializes"); state - .commit_finalized_direct(block.into(), "snapshot tests") + .commit_finalized_direct(block.into(), None, "snapshot tests") .expect("test block is valid"); let mut settings = insta::Settings::clone_current(); @@ -220,10 +220,10 @@ fn snapshot_block_and_transaction_data(state: &FinalizedState) { // // We only store the sprout tree for the tip by height, so we can't check sprout here. let sapling_tree = state - .sapling_note_commitment_tree_by_height(&block::Height::MIN) + .sapling_tree_by_height(&block::Height::MIN) .expect("the genesis block in the database has a Sapling tree"); let orchard_tree = state - .orchard_note_commitment_tree_by_height(&block::Height::MIN) + .orchard_tree_by_height(&block::Height::MIN) .expect("the genesis block in the database has an Orchard tree"); assert_eq!(*sapling_tree, sapling::tree::NoteCommitmentTree::default()); @@ -243,13 +243,13 @@ fn snapshot_block_and_transaction_data(state: &FinalizedState) { // Shielded - let stored_sprout_trees = state.sprout_note_commitments_full_map(); + let stored_sprout_trees = state.sprout_trees_full_map(); let mut stored_sapling_trees = Vec::new(); let mut stored_orchard_trees = Vec::new(); - let sprout_tree_at_tip = state.sprout_note_commitment_tree(); - let sapling_tree_at_tip = state.sapling_note_commitment_tree(); - let orchard_tree_at_tip = state.orchard_note_commitment_tree(); + let sprout_tree_at_tip = state.sprout_tree(); + let sapling_tree_at_tip = state.sapling_tree(); + let orchard_tree_at_tip = state.orchard_tree(); // Test the history tree. // @@ -278,10 +278,10 @@ fn snapshot_block_and_transaction_data(state: &FinalizedState) { // // TODO: test the rest of the shielded data (anchors, nullifiers) let sapling_tree_by_height = state - .sapling_note_commitment_tree_by_height(&query_height) + .sapling_tree_by_height(&query_height) .expect("heights up to tip have Sapling trees"); let orchard_tree_by_height = state - .orchard_note_commitment_tree_by_height(&query_height) + .orchard_tree_by_height(&query_height) .expect("heights up to tip have Orchard trees"); // We don't need to snapshot the heights, diff --git a/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/orchard_trees@mainnet_1.snap b/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/orchard_trees@mainnet_1.snap index 407ca2ec20a..949d551263c 100644 --- a/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/orchard_trees@mainnet_1.snap +++ b/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/orchard_trees@mainnet_1.snap @@ -7,7 +7,9 @@ expression: stored_orchard_trees inner: Frontier( frontier: None, ), - cached_root: None, + cached_root: Some(Root(Base( + bytes: (174, 41, 53, 241, 223, 216, 162, 74, 237, 124, 112, 223, 125, 227, 166, 104, 235, 122, 73, 177, 49, 152, 128, 221, 226, 187, 217, 3, 26, 229, 216, 47), + ))), )), (Height(1), NoteCommitmentTree( inner: Frontier( diff --git a/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/orchard_trees@mainnet_2.snap b/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/orchard_trees@mainnet_2.snap index 42bf130f51c..10b343c74fa 100644 --- a/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/orchard_trees@mainnet_2.snap +++ b/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/orchard_trees@mainnet_2.snap @@ -7,7 +7,9 @@ expression: stored_orchard_trees inner: Frontier( frontier: None, ), - cached_root: None, + cached_root: Some(Root(Base( + bytes: (174, 41, 53, 241, 223, 216, 162, 74, 237, 124, 112, 223, 125, 227, 166, 104, 235, 122, 73, 177, 49, 152, 128, 221, 226, 187, 217, 3, 26, 229, 216, 47), + ))), )), (Height(1), NoteCommitmentTree( inner: Frontier( diff --git a/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/orchard_trees@testnet_1.snap b/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/orchard_trees@testnet_1.snap index 407ca2ec20a..949d551263c 100644 --- a/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/orchard_trees@testnet_1.snap +++ b/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/orchard_trees@testnet_1.snap @@ -7,7 +7,9 @@ expression: stored_orchard_trees inner: Frontier( frontier: None, ), - cached_root: None, + cached_root: Some(Root(Base( + bytes: (174, 41, 53, 241, 223, 216, 162, 74, 237, 124, 112, 223, 125, 227, 166, 104, 235, 122, 73, 177, 49, 152, 128, 221, 226, 187, 217, 3, 26, 229, 216, 47), + ))), )), (Height(1), NoteCommitmentTree( inner: Frontier( diff --git a/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/orchard_trees@testnet_2.snap b/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/orchard_trees@testnet_2.snap index 42bf130f51c..10b343c74fa 100644 --- a/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/orchard_trees@testnet_2.snap +++ b/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/orchard_trees@testnet_2.snap @@ -7,7 +7,9 @@ expression: stored_orchard_trees inner: Frontier( frontier: None, ), - cached_root: None, + cached_root: Some(Root(Base( + bytes: (174, 41, 53, 241, 223, 216, 162, 74, 237, 124, 112, 223, 125, 227, 166, 104, 235, 122, 73, 177, 49, 152, 128, 221, 226, 187, 217, 3, 26, 229, 216, 47), + ))), )), (Height(1), NoteCommitmentTree( inner: Frontier( diff --git a/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sapling_trees@mainnet_1.snap b/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sapling_trees@mainnet_1.snap index fd27c14835e..268442af99a 100644 --- a/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sapling_trees@mainnet_1.snap +++ b/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sapling_trees@mainnet_1.snap @@ -7,7 +7,9 @@ expression: stored_sapling_trees inner: Frontier( frontier: None, ), - cached_root: None, + cached_root: Some(Root(Fq( + bytes: (251, 194, 244, 48, 12, 1, 240, 183, 130, 13, 0, 227, 52, 124, 141, 164, 238, 97, 70, 116, 55, 108, 188, 69, 53, 157, 170, 84, 249, 181, 73, 62), + ))), )), (Height(1), NoteCommitmentTree( inner: Frontier( diff --git a/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sapling_trees@mainnet_2.snap b/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sapling_trees@mainnet_2.snap index 056e581b74f..0655ffbe372 100644 --- a/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sapling_trees@mainnet_2.snap +++ b/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sapling_trees@mainnet_2.snap @@ -7,7 +7,9 @@ expression: stored_sapling_trees inner: Frontier( frontier: None, ), - cached_root: None, + cached_root: Some(Root(Fq( + bytes: (251, 194, 244, 48, 12, 1, 240, 183, 130, 13, 0, 227, 52, 124, 141, 164, 238, 97, 70, 116, 55, 108, 188, 69, 53, 157, 170, 84, 249, 181, 73, 62), + ))), )), (Height(1), NoteCommitmentTree( inner: Frontier( diff --git a/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sapling_trees@testnet_1.snap b/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sapling_trees@testnet_1.snap index fd27c14835e..268442af99a 100644 --- a/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sapling_trees@testnet_1.snap +++ b/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sapling_trees@testnet_1.snap @@ -7,7 +7,9 @@ expression: stored_sapling_trees inner: Frontier( frontier: None, ), - cached_root: None, + cached_root: Some(Root(Fq( + bytes: (251, 194, 244, 48, 12, 1, 240, 183, 130, 13, 0, 227, 52, 124, 141, 164, 238, 97, 70, 116, 55, 108, 188, 69, 53, 157, 170, 84, 249, 181, 73, 62), + ))), )), (Height(1), NoteCommitmentTree( inner: Frontier( diff --git a/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sapling_trees@testnet_2.snap b/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sapling_trees@testnet_2.snap index 056e581b74f..0655ffbe372 100644 --- a/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sapling_trees@testnet_2.snap +++ b/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sapling_trees@testnet_2.snap @@ -7,7 +7,9 @@ expression: stored_sapling_trees inner: Frontier( frontier: None, ), - cached_root: None, + cached_root: Some(Root(Fq( + bytes: (251, 194, 244, 48, 12, 1, 240, 183, 130, 13, 0, 227, 52, 124, 141, 164, 238, 97, 70, 116, 55, 108, 188, 69, 53, 157, 170, 84, 249, 181, 73, 62), + ))), )), (Height(1), NoteCommitmentTree( inner: Frontier( diff --git a/zebra-state/src/service/finalized_state/zebra_db/shielded.rs b/zebra-state/src/service/finalized_state/zebra_db/shielded.rs index 68a75ae1162..c3cf3666423 100644 --- a/zebra-state/src/service/finalized_state/zebra_db/shielded.rs +++ b/zebra-state/src/service/finalized_state/zebra_db/shielded.rs @@ -70,7 +70,7 @@ impl ZebraDb { /// Returns the Sprout note commitment tree of the finalized tip /// or the empty tree if the state is empty. - pub fn sprout_note_commitment_tree(&self) -> Arc { + pub fn sprout_tree(&self) -> Arc { let height = match self.finalized_tip_height() { Some(h) => h, None => return Default::default(), @@ -88,7 +88,7 @@ impl ZebraDb { /// /// This is used for interstitial tree building, which is unique to Sprout. #[allow(clippy::unwrap_in_result)] - pub fn sprout_note_commitment_tree_by_anchor( + pub fn sprout_tree_by_anchor( &self, sprout_anchor: &sprout::tree::Root, ) -> Option> { @@ -103,7 +103,7 @@ impl ZebraDb { /// /// Calling this method can load a lot of data into RAM, and delay block commit transactions. #[allow(dead_code, clippy::unwrap_in_result)] - pub fn sprout_note_commitments_full_map( + pub fn sprout_trees_full_map( &self, ) -> HashMap> { let sprout_anchors_handle = self.db.cf_handle("sprout_anchors").unwrap(); @@ -114,20 +114,20 @@ impl ZebraDb { /// Returns the Sapling note commitment tree of the finalized tip /// or the empty tree if the state is empty. - pub fn sapling_note_commitment_tree(&self) -> Arc { + pub fn sapling_tree(&self) -> Arc { let height = match self.finalized_tip_height() { Some(h) => h, None => return Default::default(), }; - self.sapling_note_commitment_tree_by_height(&height) + self.sapling_tree_by_height(&height) .expect("Sapling note commitment tree must exist if there is a finalized tip") } /// Returns the Sapling note commitment tree matching the given block height, /// or `None` if the height is above the finalized tip. #[allow(clippy::unwrap_in_result)] - pub fn sapling_note_commitment_tree_by_height( + pub fn sapling_tree_by_height( &self, height: &Height, ) -> Option> { @@ -159,20 +159,20 @@ impl ZebraDb { /// Returns the Orchard note commitment tree of the finalized tip /// or the empty tree if the state is empty. - pub fn orchard_note_commitment_tree(&self) -> Arc { + pub fn orchard_tree(&self) -> Arc { let height = match self.finalized_tip_height() { Some(h) => h, None => return Default::default(), }; - self.orchard_note_commitment_tree_by_height(&height) + self.orchard_tree_by_height(&height) .expect("Orchard note commitment tree must exist if there is a finalized tip") } /// Returns the Orchard note commitment tree matching the given block height, /// or `None` if the height is above the finalized tip. #[allow(clippy::unwrap_in_result)] - pub fn orchard_note_commitment_tree_by_height( + pub fn orchard_tree_by_height( &self, height: &Height, ) -> Option> { @@ -203,9 +203,9 @@ impl ZebraDb { /// or the empty trees if the state is empty. pub fn note_commitment_trees(&self) -> NoteCommitmentTrees { NoteCommitmentTrees { - sprout: self.sprout_note_commitment_tree(), - sapling: self.sapling_note_commitment_tree(), - orchard: self.orchard_note_commitment_tree(), + sprout: self.sprout_tree(), + sapling: self.sapling_tree(), + orchard: self.orchard_tree(), } } } @@ -275,97 +275,67 @@ impl DiskWriteBatch { /// /// - Propagates any errors from updating the history tree #[allow(clippy::unwrap_in_result)] - pub fn prepare_note_commitment_batch( + pub fn prepare_trees_batch( &mut self, - db: &DiskDb, + zebra_db: &ZebraDb, finalized: &SemanticallyVerifiedBlockWithTrees, + prev_note_commitment_trees: Option, ) -> Result<(), BoxError> { + let db = &zebra_db.db; + let sprout_anchors = db.cf_handle("sprout_anchors").unwrap(); let sapling_anchors = db.cf_handle("sapling_anchors").unwrap(); let orchard_anchors = db.cf_handle("orchard_anchors").unwrap(); - let sprout_note_commitment_tree_cf = db.cf_handle("sprout_note_commitment_tree").unwrap(); - let sapling_note_commitment_tree_cf = db.cf_handle("sapling_note_commitment_tree").unwrap(); - let orchard_note_commitment_tree_cf = db.cf_handle("orchard_note_commitment_tree").unwrap(); + let sprout_tree_cf = db.cf_handle("sprout_note_commitment_tree").unwrap(); + let sapling_tree_cf = db.cf_handle("sapling_note_commitment_tree").unwrap(); + let orchard_tree_cf = db.cf_handle("orchard_note_commitment_tree").unwrap(); let height = finalized.verified.height; - let note_commitment_trees = finalized.treestate.note_commitment_trees.clone(); + let trees = finalized.treestate.note_commitment_trees.clone(); // Use the cached values that were previously calculated in parallel. - let sprout_root = note_commitment_trees.sprout.root(); - let sapling_root = note_commitment_trees.sapling.root(); - let orchard_root = note_commitment_trees.orchard.root(); + let sprout_root = trees.sprout.root(); + let sapling_root = trees.sapling.root(); + let orchard_root = trees.orchard.root(); // Index the new anchors. // Note: if the root hasn't changed, we write the same value again. - self.zs_insert(&sprout_anchors, sprout_root, ¬e_commitment_trees.sprout); + self.zs_insert(&sprout_anchors, sprout_root, &trees.sprout); self.zs_insert(&sapling_anchors, sapling_root, ()); self.zs_insert(&orchard_anchors, orchard_root, ()); // Delete the previously stored Sprout note commitment tree. let current_tip_height = height - 1; if let Some(h) = current_tip_height { - self.zs_delete(&sprout_note_commitment_tree_cf, h); + self.zs_delete(&sprout_tree_cf, h); } // TODO: if we ever need concurrent read-only access to the sprout tree, // store it by `()`, not height. Otherwise, the ReadStateService could // access a height that was just deleted by a concurrent StateService // write. This requires a database version update. - self.zs_insert( - &sprout_note_commitment_tree_cf, - height, - note_commitment_trees.sprout, - ); - - self.zs_insert( - &sapling_note_commitment_tree_cf, - height, - note_commitment_trees.sapling, - ); - - self.zs_insert( - &orchard_note_commitment_tree_cf, - height, - note_commitment_trees.orchard, - ); + self.zs_insert(&sprout_tree_cf, height, trees.sprout); + + // Store the Sapling tree only if it is not already present at the previous height. + if height.is_min() + || prev_note_commitment_trees + .as_ref() + .map_or_else(|| zebra_db.sapling_tree(), |trees| trees.sapling.clone()) + != trees.sapling + { + self.zs_insert(&sapling_tree_cf, height, trees.sapling); + } - self.prepare_history_batch(db, finalized) - } + // Store the Orchard tree only if it is not already present at the previous height. + if height.is_min() + || prev_note_commitment_trees + .map_or_else(|| zebra_db.orchard_tree(), |trees| trees.orchard) + != trees.orchard + { + self.zs_insert(&orchard_tree_cf, height, trees.orchard); + } - /// Prepare a database batch containing the initial note commitment trees, - /// and return it (without actually writing anything). - /// - /// This method never returns an error. - pub fn prepare_genesis_note_commitment_tree_batch( - &mut self, - db: &DiskDb, - finalized: &SemanticallyVerifiedBlock, - ) { - let sprout_note_commitment_tree_cf = db.cf_handle("sprout_note_commitment_tree").unwrap(); - let sapling_note_commitment_tree_cf = db.cf_handle("sapling_note_commitment_tree").unwrap(); - let orchard_note_commitment_tree_cf = db.cf_handle("orchard_note_commitment_tree").unwrap(); - - let SemanticallyVerifiedBlock { height, .. } = finalized; - - // Insert empty note commitment trees. Note that these can't be - // used too early (e.g. the Orchard tree before Nu5 activates) - // since the block validation will make sure only appropriate - // transactions are allowed in a block. - self.zs_insert( - &sprout_note_commitment_tree_cf, - height, - sprout::tree::NoteCommitmentTree::default(), - ); - self.zs_insert( - &sapling_note_commitment_tree_cf, - height, - sapling::tree::NoteCommitmentTree::default(), - ); - self.zs_insert( - &orchard_note_commitment_tree_cf, - height, - orchard::tree::NoteCommitmentTree::default(), - ); + self.prepare_history_batch(db, finalized) } } diff --git a/zebra-state/src/service/non_finalized_state.rs b/zebra-state/src/service/non_finalized_state.rs index 162373b5d57..6b303360b6f 100644 --- a/zebra-state/src/service/non_finalized_state.rs +++ b/zebra-state/src/service/non_finalized_state.rs @@ -284,9 +284,9 @@ impl NonFinalizedState { let chain = Chain::new( self.network, finalized_tip_height, - finalized_state.sprout_note_commitment_tree(), - finalized_state.sapling_note_commitment_tree(), - finalized_state.orchard_note_commitment_tree(), + finalized_state.sprout_tree(), + finalized_state.sapling_tree(), + finalized_state.orchard_tree(), finalized_state.history_tree(), finalized_state.finalized_value_pool(), ); diff --git a/zebra-state/src/service/read/tree.rs b/zebra-state/src/service/read/tree.rs index 704637d3bff..9f05f1d25d4 100644 --- a/zebra-state/src/service/read/tree.rs +++ b/zebra-state/src/service/read/tree.rs @@ -38,7 +38,7 @@ where // in memory, but `db` stores blocks on disk, with a memory cache.) chain .and_then(|chain| chain.as_ref().sapling_tree(hash_or_height)) - .or_else(|| db.sapling_tree(hash_or_height)) + .or_else(|| db.sapling_tree_by_hash_or_height(hash_or_height)) } /// Returns the Orchard @@ -59,7 +59,7 @@ where // in memory, but `db` stores blocks on disk, with a memory cache.) chain .and_then(|chain| chain.as_ref().orchard_tree(hash_or_height)) - .or_else(|| db.orchard_tree(hash_or_height)) + .or_else(|| db.orchard_tree_by_hash_or_height(hash_or_height)) } #[cfg(feature = "getblocktemplate-rpcs")] diff --git a/zebra-state/src/service/write.rs b/zebra-state/src/service/write.rs index 94392d2aa2c..cb36b6a2ba5 100644 --- a/zebra-state/src/service/write.rs +++ b/zebra-state/src/service/write.rs @@ -140,6 +140,7 @@ pub fn write_blocks_from_channels( non_finalized_state_sender: watch::Sender, ) { let mut last_zebra_mined_log_height = None; + let mut prev_finalized_note_commitment_trees = None; // Write all the finalized blocks sent by the state, // until the state closes the finalized block channel's sender. @@ -178,9 +179,12 @@ pub fn write_blocks_from_channels( } // Try committing the block - match finalized_state.commit_finalized(ordered_block) { - Ok(finalized) => { + match finalized_state + .commit_finalized(ordered_block, prev_finalized_note_commitment_trees.take()) + { + Ok((finalized, note_commitment_trees)) => { let tip_block = ChainTipBlock::from(finalized); + prev_finalized_note_commitment_trees = Some(note_commitment_trees); log_if_mined_by_zebra(&tip_block, &mut last_zebra_mined_log_height); @@ -289,11 +293,11 @@ pub fn write_blocks_from_channels( while non_finalized_state.best_chain_len() > MAX_BLOCK_REORG_HEIGHT { tracing::trace!("finalizing block past the reorg limit"); let contextually_verified_with_trees = non_finalized_state.finalize(); - finalized_state - .commit_finalized_direct(contextually_verified_with_trees, "commit contextually-verified request") + prev_finalized_note_commitment_trees = finalized_state + .commit_finalized_direct(contextually_verified_with_trees, prev_finalized_note_commitment_trees.take(), "commit contextually-verified request") .expect( "unexpected finalized block commit error: note commitment and history trees were already checked by the non-finalized state", - ); + ).1.into(); } // Update the metrics if semantic and contextual validation passes diff --git a/zebra-state/src/tests/setup.rs b/zebra-state/src/tests/setup.rs index 7316b12a284..296ee10a0e1 100644 --- a/zebra-state/src/tests/setup.rs +++ b/zebra-state/src/tests/setup.rs @@ -107,7 +107,7 @@ pub(crate) fn new_state_with_mainnet_genesis( let genesis = CheckpointVerifiedBlock::from(genesis); finalized_state - .commit_finalized_direct(genesis.clone().into(), "test") + .commit_finalized_direct(genesis.clone().into(), None, "test") .expect("unexpected invalid genesis block test vector"); assert_eq!( From a9a6c6a8e5680cceb1ccd24b3e116bb4f93d7f98 Mon Sep 17 00:00:00 2001 From: Arya Date: Wed, 9 Aug 2023 06:01:21 -0400 Subject: [PATCH 255/265] use zcash lwd in ci instead of aditya fork (#7307) --- .github/workflows/zcash-lightwalletd.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/zcash-lightwalletd.yml b/.github/workflows/zcash-lightwalletd.yml index 28b444589f6..96c7376e243 100644 --- a/.github/workflows/zcash-lightwalletd.yml +++ b/.github/workflows/zcash-lightwalletd.yml @@ -58,7 +58,7 @@ jobs: steps: - uses: actions/checkout@v3.5.3 with: - repository: adityapk00/lightwalletd + repository: zcash/lightwalletd ref: 'master' persist-credentials: false From 5e20d89a547d481aacaf7bc1a109a9ba75363f6f Mon Sep 17 00:00:00 2001 From: Marek Date: Thu, 10 Aug 2023 21:27:33 +0200 Subject: [PATCH 256/265] fix(ci): Specify Testnet in jobs that require it (#7309) * Add "Testnet" to `generate-checkpoints-testnet` * Add `Testnet` to `full-sync-to-tip-testnet` --- .github/workflows/continous-integration-docker.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/continous-integration-docker.yml b/.github/workflows/continous-integration-docker.yml index 29618143a5b..ce252fd8af4 100644 --- a/.github/workflows/continous-integration-docker.yml +++ b/.github/workflows/continous-integration-docker.yml @@ -516,6 +516,7 @@ jobs: test_description: Test a full sync up to the tip on testnet # The value of FULL_SYNC_TESTNET_TIMEOUT_MINUTES is currently ignored. test_variables: '-e NETWORK=Testnet -e FULL_SYNC_TESTNET_TIMEOUT_MINUTES=0 -e ZEBRA_FORCE_USE_COLOR=1' + network: "Testnet" # A full testnet sync could take 2-10 hours in April 2023. # The time varies a lot due to the small number of nodes. is_long_test: true @@ -560,6 +561,7 @@ jobs: test_id: generate-checkpoints-testnet test_description: Generate Zebra checkpoints on testnet test_variables: '-e NETWORK=Testnet -e GENERATE_CHECKPOINTS_TESTNET=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache' + network: "Testnet" needs_zebra_state: true # update the disk on every PR, to increase CI speed # we don't have a test-update-sync-testnet job, so we need to update the disk here From e2bf4aa33088bbb13951b6c7aeaa86aa461b64fe Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 10 Aug 2023 22:28:00 +0000 Subject: [PATCH 257/265] build(deps): bump the formats group with 4 updates (#7298) Bumps the formats group with 4 updates: [serde](https://github.com/serde-rs/serde), [regex](https://github.com/rust-lang/regex), [serde_json](https://github.com/serde-rs/json) and [serde_with](https://github.com/jonasbb/serde_with). Updates `serde` from 1.0.175 to 1.0.179 - [Release notes](https://github.com/serde-rs/serde/releases) - [Commits](https://github.com/serde-rs/serde/compare/v1.0.175...v1.0.179) Updates `regex` from 1.9.1 to 1.9.3 - [Release notes](https://github.com/rust-lang/regex/releases) - [Changelog](https://github.com/rust-lang/regex/blob/master/CHANGELOG.md) - [Commits](https://github.com/rust-lang/regex/compare/1.9.1...1.9.3) Updates `serde_json` from 1.0.103 to 1.0.104 - [Release notes](https://github.com/serde-rs/json/releases) - [Commits](https://github.com/serde-rs/json/compare/v1.0.103...v1.0.104) Updates `serde_with` from 3.1.0 to 3.2.0 - [Release notes](https://github.com/jonasbb/serde_with/releases) - [Commits](https://github.com/jonasbb/serde_with/compare/v3.1.0...v3.2.0) --- updated-dependencies: - dependency-name: serde dependency-type: direct:production update-type: version-update:semver-patch dependency-group: formats - dependency-name: regex dependency-type: direct:production update-type: version-update:semver-patch dependency-group: formats - dependency-name: serde_json dependency-type: direct:production update-type: version-update:semver-patch dependency-group: formats - dependency-name: serde_with dependency-type: direct:production update-type: version-update:semver-minor dependency-group: formats ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> --- Cargo.lock | 35 +++++++++++++++++----------------- zebra-chain/Cargo.toml | 6 +++--- zebra-consensus/Cargo.toml | 2 +- zebra-network/Cargo.toml | 4 ++-- zebra-node-services/Cargo.toml | 8 ++++---- zebra-rpc/Cargo.toml | 4 ++-- zebra-state/Cargo.toml | 6 +++--- zebra-test/Cargo.toml | 2 +- zebra-utils/Cargo.toml | 4 ++-- zebrad/Cargo.toml | 6 +++--- 10 files changed, 39 insertions(+), 38 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 826a3d2d38d..bc088fc7be0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3423,13 +3423,13 @@ dependencies = [ [[package]] name = "regex" -version = "1.9.1" +version = "1.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2eae68fc220f7cf2532e4494aded17545fce192d59cd996e0fe7887f4ceb575" +checksum = "81bc1d4caf89fac26a70747fe603c130093b53c773888797a6329091246d651a" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.3.3", + "regex-automata 0.3.6", "regex-syntax 0.7.4", ] @@ -3444,9 +3444,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.3.3" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39354c10dd07468c2e73926b23bb9c2caca74c5501e38a35da70406f1d923310" +checksum = "fed1ceff11a1dddaee50c9dc8e4938bd106e9d89ae372f192311e7da498e3b69" dependencies = [ "aho-corasick", "memchr", @@ -3861,9 +3861,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.175" +version = "1.0.179" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d25439cd7397d044e2748a6fe2432b5e85db703d6d097bd014b3c0ad1ebff0b" +checksum = "0a5bf42b8d227d4abf38a1ddb08602e229108a517cd4e5bb28f9c7eaafdce5c0" dependencies = [ "serde_derive", ] @@ -3879,9 +3879,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.175" +version = "1.0.179" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b23f7ade6f110613c0d63858ddb8b94c1041f550eab58a16b371bdf2c9c80ab4" +checksum = "741e124f5485c7e60c03b043f79f320bff3527f4bbf12cf3831750dc46a0ec2c" dependencies = [ "proc-macro2 1.0.66", "quote 1.0.31", @@ -3890,9 +3890,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.103" +version = "1.0.104" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d03b412469450d4404fe8499a268edd7f8b79fecb074b0d812ad64ca21f4031b" +checksum = "076066c5f1078eac5b722a31827a8832fe108bed65dfa75e233c89f8206e976c" dependencies = [ "indexmap 2.0.0", "itoa", @@ -3933,17 +3933,18 @@ dependencies = [ [[package]] name = "serde_with" -version = "3.1.0" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21e47d95bc83ed33b2ecf84f4187ad1ab9685d18ff28db000c99deac8ce180e3" +checksum = "1402f54f9a3b9e2efe71c1cea24e648acce55887983553eeb858cf3115acfd49" dependencies = [ "base64 0.21.2", "chrono", "hex", "indexmap 1.9.3", + "indexmap 2.0.0", "serde", "serde_json", - "serde_with_macros 3.1.0", + "serde_with_macros 3.2.0", "time", ] @@ -3961,9 +3962,9 @@ dependencies = [ [[package]] name = "serde_with_macros" -version = "3.1.0" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea3cee93715c2e266b9338b7544da68a9f24e227722ba482bd1c024367c77c65" +checksum = "9197f1ad0e3c173a0222d3c4404fb04c3afe87e962bcb327af73e8301fa203c7" dependencies = [ "darling 0.20.3", "proc-macro2 1.0.66", @@ -5492,7 +5493,7 @@ dependencies = [ "serde", "serde-big-array", "serde_json", - "serde_with 3.1.0", + "serde_with 3.2.0", "sha2", "spandoc", "static_assertions", diff --git a/zebra-chain/Cargo.toml b/zebra-chain/Cargo.toml index 7820507d304..5c2020bb64a 100644 --- a/zebra-chain/Cargo.toml +++ b/zebra-chain/Cargo.toml @@ -96,8 +96,8 @@ tracing = "0.1.37" # Serialization hex = { version = "0.4.3", features = ["serde"] } -serde = { version = "1.0.175", features = ["serde_derive", "rc"] } -serde_with = "3.0.0" +serde = { version = "1.0.179", features = ["serde_derive", "rc"] } +serde_with = "3.2.0" serde-big-array = "0.5.1" # Processing @@ -111,7 +111,7 @@ redjubjub = "0.7.0" reddsa = "0.5.1" # Production feature json-conversion -serde_json = { version = "1.0.100", optional = true } +serde_json = { version = "1.0.104", optional = true } # Production feature async-error and testing feature proptest-impl tokio = { version = "1.29.1", optional = true } diff --git a/zebra-consensus/Cargo.toml b/zebra-consensus/Cargo.toml index 4211e43d5a9..25024287e1c 100644 --- a/zebra-consensus/Cargo.toml +++ b/zebra-consensus/Cargo.toml @@ -47,7 +47,7 @@ chrono = { version = "0.4.26", default-features = false, features = ["clock", "s displaydoc = "0.2.4" lazy_static = "1.4.0" once_cell = "1.18.0" -serde = { version = "1.0.175", features = ["serde_derive"] } +serde = { version = "1.0.179", features = ["serde_derive"] } futures = "0.3.28" futures-util = "0.3.28" diff --git a/zebra-network/Cargo.toml b/zebra-network/Cargo.toml index c8299fb3ae0..0510ba13818 100644 --- a/zebra-network/Cargo.toml +++ b/zebra-network/Cargo.toml @@ -55,8 +55,8 @@ ordered-map = "0.4.2" pin-project = "1.1.2" rand = "0.8.5" rayon = "1.7.0" -regex = "1.9.1" -serde = { version = "1.0.175", features = ["serde_derive"] } +regex = "1.9.3" +serde = { version = "1.0.179", features = ["serde_derive"] } tempfile = "3.7.0" thiserror = "1.0.44" diff --git a/zebra-node-services/Cargo.toml b/zebra-node-services/Cargo.toml index 956b4922995..1b8b9c824ce 100644 --- a/zebra-node-services/Cargo.toml +++ b/zebra-node-services/Cargo.toml @@ -44,13 +44,13 @@ color-eyre = { version = "0.6.2", optional = true } jsonrpc-core = { version = "18.0.0", optional = true } # Security: avoid default dependency on openssl reqwest = { version = "0.11.18", default-features = false, features = ["rustls-tls"], optional = true } -serde = { version = "1.0.175", optional = true } -serde_json = { version = "1.0.100", optional = true } +serde = { version = "1.0.179", optional = true } +serde_json = { version = "1.0.104", optional = true } [dev-dependencies] color-eyre = "0.6.2" jsonrpc-core = "18.0.0" reqwest = { version = "0.11.18", default-features = false, features = ["rustls-tls"] } -serde = "1.0.175" -serde_json = "1.0.100" +serde = "1.0.179" +serde_json = "1.0.104" diff --git a/zebra-rpc/Cargo.toml b/zebra-rpc/Cargo.toml index 2115dda055b..d3b7bc8084e 100644 --- a/zebra-rpc/Cargo.toml +++ b/zebra-rpc/Cargo.toml @@ -51,7 +51,7 @@ jsonrpc-http-server = "18.0.0" num_cpus = "1.16.0" # zebra-rpc needs the preserve_order feature in serde_json, which is a dependency of jsonrpc-core -serde_json = { version = "1.0.100", features = ["preserve_order"] } +serde_json = { version = "1.0.104", features = ["preserve_order"] } indexmap = { version = "2.0.0", features = ["serde"] } tokio = { version = "1.29.1", features = ["time", "rt-multi-thread", "macros", "tracing"] } @@ -60,7 +60,7 @@ tower = "0.4.13" tracing = "0.1.37" hex = { version = "0.4.3", features = ["serde"] } -serde = { version = "1.0.175", features = ["serde_derive"] } +serde = { version = "1.0.179", features = ["serde_derive"] } # Experimental feature getblocktemplate-rpcs rand = { version = "0.8.5", optional = true } diff --git a/zebra-state/Cargo.toml b/zebra-state/Cargo.toml index 70453548be2..0827053fa59 100644 --- a/zebra-state/Cargo.toml +++ b/zebra-state/Cargo.toml @@ -53,11 +53,11 @@ itertools = "0.11.0" lazy_static = "1.4.0" metrics = "0.21.1" mset = "0.1.1" -regex = "1.9.1" +regex = "1.9.3" rlimit = "0.10.1" rocksdb = { version = "0.21.0", default-features = false, features = ["lz4"] } semver = "1.0.18" -serde = { version = "1.0.175", features = ["serde_derive"] } +serde = { version = "1.0.179", features = ["serde_derive"] } tempfile = "3.7.0" thiserror = "1.0.44" @@ -69,7 +69,7 @@ tracing = "0.1.37" # elasticsearch specific dependencies. # Security: avoid default dependency on openssl elasticsearch = { version = "8.5.0-alpha.1", default-features = false, features = ["rustls-tls"], optional = true } -serde_json = { version = "1.0.100", package = "serde_json", optional = true } +serde_json = { version = "1.0.104", package = "serde_json", optional = true } zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.28", features = ["async-error"] } diff --git a/zebra-test/Cargo.toml b/zebra-test/Cargo.toml index 98c22eebfe8..24de90b03ff 100644 --- a/zebra-test/Cargo.toml +++ b/zebra-test/Cargo.toml @@ -22,7 +22,7 @@ insta = "1.31.0" proptest = "1.2.0" once_cell = "1.18.0" rand = "0.8.5" -regex = "1.9.1" +regex = "1.9.3" tokio = { version = "1.29.1", features = ["full", "tracing", "test-util"] } tower = { version = "0.4.13", features = ["util"] } diff --git a/zebra-utils/Cargo.toml b/zebra-utils/Cargo.toml index a9ba7944f95..0befe3849fd 100644 --- a/zebra-utils/Cargo.toml +++ b/zebra-utils/Cargo.toml @@ -69,7 +69,7 @@ tinyvec = { version = "1.6.0", features = ["rustc_1_55"] } structopt = "0.3.26" hex = "0.4.3" -serde_json = "1.0.100" +serde_json = "1.0.104" tracing-error = "0.2.0" tracing-subscriber = "0.3.17" thiserror = "1.0.44" @@ -84,7 +84,7 @@ zebra-rpc = { path = "../zebra-rpc", version = "1.0.0-beta.28", optional = true itertools = { version = "0.11.0", optional = true } # These crates are needed for the search-issue-refs binary -regex = { version = "1.9.1", optional = true } +regex = { version = "1.9.3", optional = true } # Avoid default openssl dependency to reduce the dependency tree and security alerts. reqwest = { version = "0.11.18", default-features = false, features = ["rustls-tls"], optional = true } diff --git a/zebrad/Cargo.toml b/zebrad/Cargo.toml index f65e0213f61..427500454e1 100644 --- a/zebrad/Cargo.toml +++ b/zebrad/Cargo.toml @@ -159,7 +159,7 @@ humantime-serde = "1.1.1" indexmap = "2.0.0" lazy_static = "1.4.0" semver = "1.0.18" -serde = { version = "1.0.175", features = ["serde_derive"] } +serde = { version = "1.0.179", features = ["serde_derive"] } toml = "0.7.6" futures = "0.3.28" @@ -233,10 +233,10 @@ abscissa_core = { version = "0.7.0", features = ["testing"] } hex = "0.4.3" jsonrpc-core = "18.0.0" once_cell = "1.18.0" -regex = "1.9.1" +regex = "1.9.3" # zebra-rpc needs the preserve_order feature, it also makes test results more stable -serde_json = { version = "1.0.100", features = ["preserve_order"] } +serde_json = { version = "1.0.104", features = ["preserve_order"] } tempfile = "3.7.0" hyper = { version = "0.14.27", features = ["http1", "http2", "server"]} From 77c00aef7305430efe49708a102ae3e13f6a0080 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 11 Aug 2023 01:39:14 +0000 Subject: [PATCH 258/265] build(deps): bump the progress-bar group with 1 update (#7299) Bumps the progress-bar group with 1 update: [indicatif](https://github.com/console-rs/indicatif). - [Release notes](https://github.com/console-rs/indicatif/releases) - [Commits](https://github.com/console-rs/indicatif/compare/0.17.5...0.17.6) --- updated-dependencies: - dependency-name: indicatif dependency-type: direct:production update-type: version-update:semver-patch dependency-group: progress-bar ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- zebrad/Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index bc088fc7be0..caad4ba7ebd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2005,9 +2005,9 @@ dependencies = [ [[package]] name = "indicatif" -version = "0.17.5" +version = "0.17.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ff8cc23a7393a397ed1d7f56e6365cba772aba9f9912ab968b03043c395d057" +checksum = "0b297dc40733f23a0e52728a58fa9489a5b7638a324932de16b41adc3ef80730" dependencies = [ "console", "instant", diff --git a/zebrad/Cargo.toml b/zebrad/Cargo.toml index 427500454e1..c9eda5e8d38 100644 --- a/zebrad/Cargo.toml +++ b/zebrad/Cargo.toml @@ -213,7 +213,7 @@ log = "0.4.19" # prod feature progress-bar howudoin = { version = "0.1.2", features = ["term-line"], optional = true } -indicatif = { version = "0.17.5", optional = true } +indicatif = { version = "0.17.6", optional = true } # test feature proptest-impl proptest = { version = "1.2.0", optional = true } From 84927d3dd1588e15856c9ab337ba09cdc39310e0 Mon Sep 17 00:00:00 2001 From: Arya Date: Thu, 10 Aug 2023 23:53:31 -0400 Subject: [PATCH 259/265] Updates test expected logs (#7315) Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> --- zebrad/tests/acceptance.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/zebrad/tests/acceptance.rs b/zebrad/tests/acceptance.rs index a35c05f5d44..a6fab9d5ec0 100644 --- a/zebrad/tests/acceptance.rs +++ b/zebrad/tests/acceptance.rs @@ -1831,10 +1831,11 @@ fn lightwalletd_integration_test(test_type: TestType) -> Result<()> { } if test_type.needs_lightwalletd_cached_state() { - lightwalletd.expect_stdout_line_matches("Found [0-9]{7} blocks in cache")?; + lightwalletd + .expect_stdout_line_matches("Done reading [0-9]{7} blocks from disk cache")?; } else if !test_type.allow_lightwalletd_cached_state() { // Timeout the test if we're somehow accidentally using a cached state in our temp dir - lightwalletd.expect_stdout_line_matches("Found 0 blocks in cache")?; + lightwalletd.expect_stdout_line_matches("Done reading 0 blocks from disk cache")?; } // getblock with the first Sapling block in Zebra's state From 4de8bd2d30c3e0ff12b786e6624e8e70a398b1b4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 11 Aug 2023 18:34:49 +0000 Subject: [PATCH 260/265] build(deps): bump r7kamura/rust-problem-matchers from 1.3.0 to 1.4.0 (#7311) Bumps [r7kamura/rust-problem-matchers](https://github.com/r7kamura/rust-problem-matchers) from 1.3.0 to 1.4.0. - [Release notes](https://github.com/r7kamura/rust-problem-matchers/releases) - [Commits](https://github.com/r7kamura/rust-problem-matchers/compare/v1.3.0...v1.4.0) --- updated-dependencies: - dependency-name: r7kamura/rust-problem-matchers dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> --- .github/workflows/build-crates-individually.yml | 4 ++-- .github/workflows/build-docker-image.yml | 2 +- .github/workflows/continous-delivery.yml | 4 ++-- .github/workflows/continous-integration-docker.yml | 14 +++++++------- .github/workflows/continous-integration-os.yml | 10 +++++----- .github/workflows/deploy-gcp-tests.yml | 12 ++++++------ .github/workflows/lint.yml | 4 ++-- .github/workflows/release-crates-io.yml | 2 +- 8 files changed, 26 insertions(+), 26 deletions(-) diff --git a/.github/workflows/build-crates-individually.yml b/.github/workflows/build-crates-individually.yml index 49db2a55866..117585ff80e 100644 --- a/.github/workflows/build-crates-individually.yml +++ b/.github/workflows/build-crates-individually.yml @@ -51,7 +51,7 @@ jobs: matrix: ${{ steps.set-matrix.outputs.matrix }} steps: - uses: actions/checkout@v3.5.3 - - uses: r7kamura/rust-problem-matchers@v1.3.0 + - uses: r7kamura/rust-problem-matchers@v1.4.0 # Setup Rust with stable toolchain and minimal profile - name: Setup Rust @@ -109,7 +109,7 @@ jobs: - uses: actions/checkout@v3.5.3 with: persist-credentials: false - - uses: r7kamura/rust-problem-matchers@v1.3.0 + - uses: r7kamura/rust-problem-matchers@v1.4.0 - name: Install last version of Protoc uses: arduino/setup-protoc@v2.0.0 diff --git a/.github/workflows/build-docker-image.yml b/.github/workflows/build-docker-image.yml index 4c1a8c1cca1..8e6a424e364 100644 --- a/.github/workflows/build-docker-image.yml +++ b/.github/workflows/build-docker-image.yml @@ -64,7 +64,7 @@ jobs: - uses: actions/checkout@v3.5.3 with: persist-credentials: false - - uses: r7kamura/rust-problem-matchers@v1.3.0 + - uses: r7kamura/rust-problem-matchers@v1.4.0 - name: Inject slug/short variables uses: rlespinasse/github-slug-action@v4 diff --git a/.github/workflows/continous-delivery.yml b/.github/workflows/continous-delivery.yml index 49b871a261a..35dda6a94b0 100644 --- a/.github/workflows/continous-delivery.yml +++ b/.github/workflows/continous-delivery.yml @@ -123,7 +123,7 @@ jobs: runs-on: ubuntu-latest needs: build steps: - - uses: r7kamura/rust-problem-matchers@v1.3.0 + - uses: r7kamura/rust-problem-matchers@v1.4.0 - name: Inject slug/short variables uses: rlespinasse/github-slug-action@v4 @@ -163,7 +163,7 @@ jobs: runs-on: ubuntu-latest needs: build steps: - - uses: r7kamura/rust-problem-matchers@v1.3.0 + - uses: r7kamura/rust-problem-matchers@v1.4.0 - name: Inject slug/short variables uses: rlespinasse/github-slug-action@v4 diff --git a/.github/workflows/continous-integration-docker.yml b/.github/workflows/continous-integration-docker.yml index ce252fd8af4..bbce29d87f5 100644 --- a/.github/workflows/continous-integration-docker.yml +++ b/.github/workflows/continous-integration-docker.yml @@ -153,7 +153,7 @@ jobs: needs: build if: ${{ github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} steps: - - uses: r7kamura/rust-problem-matchers@v1.3.0 + - uses: r7kamura/rust-problem-matchers@v1.4.0 - name: Inject slug/short variables uses: rlespinasse/github-slug-action@v4 @@ -179,7 +179,7 @@ jobs: needs: build if: ${{ github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} steps: - - uses: r7kamura/rust-problem-matchers@v1.3.0 + - uses: r7kamura/rust-problem-matchers@v1.4.0 - name: Inject slug/short variables uses: rlespinasse/github-slug-action@v4 @@ -208,7 +208,7 @@ jobs: needs: build if: ${{ github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} steps: - - uses: r7kamura/rust-problem-matchers@v1.3.0 + - uses: r7kamura/rust-problem-matchers@v1.4.0 - name: Inject slug/short variables uses: rlespinasse/github-slug-action@v4 @@ -232,7 +232,7 @@ jobs: needs: build if: ${{ github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} steps: - - uses: r7kamura/rust-problem-matchers@v1.3.0 + - uses: r7kamura/rust-problem-matchers@v1.4.0 - name: Inject slug/short variables uses: rlespinasse/github-slug-action@v4 @@ -255,7 +255,7 @@ jobs: needs: build if: ${{ github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} steps: - - uses: r7kamura/rust-problem-matchers@v1.3.0 + - uses: r7kamura/rust-problem-matchers@v1.4.0 - name: Inject slug/short variables uses: rlespinasse/github-slug-action@v4 @@ -278,7 +278,7 @@ jobs: needs: build if: ${{ github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} steps: - - uses: r7kamura/rust-problem-matchers@v1.3.0 + - uses: r7kamura/rust-problem-matchers@v1.4.0 - name: Inject slug/short variables uses: rlespinasse/github-slug-action@v4 @@ -305,7 +305,7 @@ jobs: needs: build if: ${{ github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} steps: - - uses: r7kamura/rust-problem-matchers@v1.3.0 + - uses: r7kamura/rust-problem-matchers@v1.4.0 - name: Inject slug/short variables uses: rlespinasse/github-slug-action@v4 diff --git a/.github/workflows/continous-integration-os.yml b/.github/workflows/continous-integration-os.yml index 3447455131b..7242fddfb48 100644 --- a/.github/workflows/continous-integration-os.yml +++ b/.github/workflows/continous-integration-os.yml @@ -92,7 +92,7 @@ jobs: - uses: actions/checkout@v3.5.3 with: persist-credentials: false - - uses: r7kamura/rust-problem-matchers@v1.3.0 + - uses: r7kamura/rust-problem-matchers@v1.4.0 - name: Install last version of Protoc uses: arduino/setup-protoc@v2.0.0 @@ -206,7 +206,7 @@ jobs: - uses: actions/checkout@v3.5.3 with: persist-credentials: false - - uses: r7kamura/rust-problem-matchers@v1.3.0 + - uses: r7kamura/rust-problem-matchers@v1.4.0 # Setup Rust with stable toolchain and minimal profile - name: Setup Rust @@ -228,7 +228,7 @@ jobs: - uses: actions/checkout@v3.5.3 with: persist-credentials: false - - uses: r7kamura/rust-problem-matchers@v1.3.0 + - uses: r7kamura/rust-problem-matchers@v1.4.0 - name: Install last version of Protoc uses: arduino/setup-protoc@v2.0.0 @@ -272,7 +272,7 @@ jobs: - uses: actions/checkout@v3.5.3 with: persist-credentials: false - - uses: r7kamura/rust-problem-matchers@v1.3.0 + - uses: r7kamura/rust-problem-matchers@v1.4.0 - name: Check ${{ matrix.checks }} with features ${{ matrix.features }} uses: EmbarkStudios/cargo-deny-action@v1 @@ -293,7 +293,7 @@ jobs: uses: actions/checkout@v3.5.3 with: persist-credentials: false - - uses: r7kamura/rust-problem-matchers@v1.3.0 + - uses: r7kamura/rust-problem-matchers@v1.4.0 # Setup Rust with stable toolchain and minimal profile - name: Setup Rust diff --git a/.github/workflows/deploy-gcp-tests.yml b/.github/workflows/deploy-gcp-tests.yml index da8442fa9ad..dfd5e294edd 100644 --- a/.github/workflows/deploy-gcp-tests.yml +++ b/.github/workflows/deploy-gcp-tests.yml @@ -118,7 +118,7 @@ jobs: with: persist-credentials: false fetch-depth: '2' - - uses: r7kamura/rust-problem-matchers@v1.3.0 + - uses: r7kamura/rust-problem-matchers@v1.4.0 - name: Inject slug/short variables uses: rlespinasse/github-slug-action@v4 @@ -213,7 +213,7 @@ jobs: with: persist-credentials: false fetch-depth: '2' - - uses: r7kamura/rust-problem-matchers@v1.3.0 + - uses: r7kamura/rust-problem-matchers@v1.4.0 - name: Inject slug/short variables uses: rlespinasse/github-slug-action@v4 @@ -279,7 +279,7 @@ jobs: with: persist-credentials: false fetch-depth: '2' - - uses: r7kamura/rust-problem-matchers@v1.3.0 + - uses: r7kamura/rust-problem-matchers@v1.4.0 - name: Inject slug/short variables uses: rlespinasse/github-slug-action@v4 @@ -457,7 +457,7 @@ jobs: with: persist-credentials: false fetch-depth: '2' - - uses: r7kamura/rust-problem-matchers@v1.3.0 + - uses: r7kamura/rust-problem-matchers@v1.4.0 - name: Inject slug/short variables uses: rlespinasse/github-slug-action@v4 @@ -1716,7 +1716,7 @@ jobs: with: persist-credentials: false fetch-depth: '2' - - uses: r7kamura/rust-problem-matchers@v1.3.0 + - uses: r7kamura/rust-problem-matchers@v1.4.0 - name: Inject slug/short variables uses: rlespinasse/github-slug-action@v4 @@ -2014,7 +2014,7 @@ jobs: with: persist-credentials: false fetch-depth: '2' - - uses: r7kamura/rust-problem-matchers@v1.3.0 + - uses: r7kamura/rust-problem-matchers@v1.4.0 - name: Inject slug/short variables uses: rlespinasse/github-slug-action@v4 diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index f07a0b001bc..b1133910a09 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -115,7 +115,7 @@ jobs: - uses: actions/checkout@v3.5.3 with: persist-credentials: false - - uses: r7kamura/rust-problem-matchers@v1.3.0 + - uses: r7kamura/rust-problem-matchers@v1.4.0 - name: Install last version of Protoc uses: arduino/setup-protoc@v2.0.0 @@ -154,7 +154,7 @@ jobs: - uses: actions/checkout@v3.5.3 with: persist-credentials: false - - uses: r7kamura/rust-problem-matchers@v1.3.0 + - uses: r7kamura/rust-problem-matchers@v1.4.0 - name: Install last version of Protoc uses: arduino/setup-protoc@v2.0.0 diff --git a/.github/workflows/release-crates-io.yml b/.github/workflows/release-crates-io.yml index 350bf4d4bc1..cd0764b9041 100644 --- a/.github/workflows/release-crates-io.yml +++ b/.github/workflows/release-crates-io.yml @@ -67,7 +67,7 @@ jobs: timeout-minutes: 15 runs-on: ubuntu-latest steps: - - uses: r7kamura/rust-problem-matchers@v1.3.0 + - uses: r7kamura/rust-problem-matchers@v1.4.0 - name: Checkout git repository uses: actions/checkout@v3.5.3 From 38b227f06ca849f0b58b4e244261fe2943e8e9d5 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 13 Aug 2023 16:06:32 +0000 Subject: [PATCH 261/265] build(deps): bump the async group with 1 update (#7300) Bumps the async group with 1 update: [pin-project](https://github.com/taiki-e/pin-project). - [Release notes](https://github.com/taiki-e/pin-project/releases) - [Changelog](https://github.com/taiki-e/pin-project/blob/main/CHANGELOG.md) - [Commits](https://github.com/taiki-e/pin-project/compare/v1.1.2...v1.1.3) --- updated-dependencies: - dependency-name: pin-project dependency-type: direct:production update-type: version-update:semver-patch dependency-group: async ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> --- Cargo.lock | 8 ++++---- tower-batch-control/Cargo.toml | 2 +- tower-fallback/Cargo.toml | 2 +- zebra-network/Cargo.toml | 2 +- zebrad/Cargo.toml | 2 +- 5 files changed, 8 insertions(+), 8 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index caad4ba7ebd..ef3b771d9ce 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2902,18 +2902,18 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.1.2" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "030ad2bc4db10a8944cb0d837f158bdfec4d4a4873ab701a95046770d11f8842" +checksum = "fda4ed1c6c173e3fc7a83629421152e01d7b1f9b7f65fb301e490e8cfc656422" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.2" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec2e072ecce94ec471b13398d5402c188e76ac03cf74dd1a975161b23a3f6d9c" +checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405" dependencies = [ "proc-macro2 1.0.66", "quote 1.0.31", diff --git a/tower-batch-control/Cargo.toml b/tower-batch-control/Cargo.toml index ad29bbf4819..1df5a5e52e3 100644 --- a/tower-batch-control/Cargo.toml +++ b/tower-batch-control/Cargo.toml @@ -24,7 +24,7 @@ categories = ["algorithms", "asynchronous"] [dependencies] futures = "0.3.28" futures-core = "0.3.28" -pin-project = "1.1.2" +pin-project = "1.1.3" rayon = "1.7.0" tokio = { version = "1.29.1", features = ["time", "sync", "tracing", "macros"] } tokio-util = "0.7.8" diff --git a/tower-fallback/Cargo.toml b/tower-fallback/Cargo.toml index 7fcc4387dcd..6cd7ed219e5 100644 --- a/tower-fallback/Cargo.toml +++ b/tower-fallback/Cargo.toml @@ -16,7 +16,7 @@ keywords = ["tower", "batch"] categories = ["algorithms", "asynchronous"] [dependencies] -pin-project = "1.1.2" +pin-project = "1.1.3" tower = "0.4.13" futures-core = "0.3.28" tracing = "0.1.37" diff --git a/zebra-network/Cargo.toml b/zebra-network/Cargo.toml index 0510ba13818..2cdb5cbad97 100644 --- a/zebra-network/Cargo.toml +++ b/zebra-network/Cargo.toml @@ -52,7 +52,7 @@ itertools = "0.11.0" lazy_static = "1.4.0" num-integer = "0.1.45" ordered-map = "0.4.2" -pin-project = "1.1.2" +pin-project = "1.1.3" rand = "0.8.5" rayon = "1.7.0" regex = "1.9.3" diff --git a/zebrad/Cargo.toml b/zebrad/Cargo.toml index c9eda5e8d38..924636e57a9 100644 --- a/zebrad/Cargo.toml +++ b/zebrad/Cargo.toml @@ -166,7 +166,7 @@ futures = "0.3.28" rayon = "1.7.0" tokio = { version = "1.29.1", features = ["time", "rt-multi-thread", "macros", "tracing", "signal"] } tower = { version = "0.4.13", features = ["hedge", "limit"] } -pin-project = "1.1.2" +pin-project = "1.1.3" color-eyre = { version = "0.6.2", default-features = false, features = ["issue-url"] } # This is a transitive dependency via color-eyre. From 63597307c14c5ded3c27b9fb53edf69f3a0ba163 Mon Sep 17 00:00:00 2001 From: Gustavo Valverde Date: Tue, 15 Aug 2023 15:13:00 +0100 Subject: [PATCH 262/265] fix(doc): consolidate firebase hosting in prod project (#7313) * fix(doc): consolidate firebase hosting in prod project * fix(deploy): use correct environment and permissions * ref(firebase): use a single firebase configuration * fix(firebase): entrypoint no longer needed * ref(docs): deploy docs on PR for preview * fix(docs): allow to comment on PR * fix(docs): allow enough time to build docs * fix(docs): cancel previous docs build * fix(docs): use a more specific group * fix(firebase): extra permissions * fix: commit `.firebaserc` * fix(mdbook): use different actual directory * fix(deploy): increase Rust build timeout * chore: more timeout for building and publishing * fix(deploy): login to GCP before deploying This is to avoid timeout issues with the GCP credentials * chore: lint code * chore: revert SC2002 fix try --- .firebaserc | 21 ++++ .github/workflows/docs.yml | 191 +++++++++++++++++++++++++++++-------- .gitignore | 3 +- book/firebase.json | 16 ---- firebase.json | 82 ++++++++++++---- 5 files changed, 235 insertions(+), 78 deletions(-) create mode 100644 .firebaserc delete mode 100644 book/firebase.json diff --git a/.firebaserc b/.firebaserc new file mode 100644 index 00000000000..edf98286677 --- /dev/null +++ b/.firebaserc @@ -0,0 +1,21 @@ +{ + "projects": { + "default": "zfnd-prod-zebra" + }, + "targets": { + "zfnd-prod-zebra": { + "hosting": { + "docs-book": [ + "zebra-docs-book" + ], + "docs-external": [ + "zebra-docs-external" + ], + "docs-internal": [ + "zebra-docs-internal" + ] + } + } + }, + "etags": {} +} diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index 62340ce123e..9c7ee5aa519 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -4,8 +4,8 @@ name: Docs # already in process, won't get cancelled. Instead, we let the first to complete # then queue the latest pending workflow, cancelling any workflows in between concurrency: - group: ${{ github.workflow }}-${{ github.ref }} - cancel-in-progress: false + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true on: workflow_dispatch: @@ -27,17 +27,93 @@ on: # workflow definitions - '.github/workflows/docs.yml' + pull_request: + branches: + - main + paths: + # doc source files + - 'book/**' + - '**/firebase.json' + - 'katex-header.html' + # rustdoc source files + - '**/*.rs' + - '**/Cargo.toml' + - '**/Cargo.lock' + # configuration files + - '.cargo/config.toml' + - '**/clippy.toml' + # workflow definitions + - '.github/workflows/docs.yml' + env: RUST_LOG: ${{ vars.RUST_LOG }} RUST_BACKTRACE: ${{ vars.RUST_BACKTRACE }} RUST_LIB_BACKTRACE: ${{ vars.RUST_LIB_BACKTRACE }} COLORBT_SHOW_HIDDEN: ${{ vars.COLORBT_SHOW_HIDDEN }} + FIREBASE_CHANNEL: ${{ github.event_name == 'pull_request' && 'preview' || 'live' }} jobs: - build: - name: Build and Deploy Docs (+beta) + build-docs-book: + name: Build and Deploy Zebra Book Docs + timeout-minutes: 5 + runs-on: ubuntu-latest + environment: docs + permissions: + checks: write + contents: 'read' + id-token: 'write' + pull-requests: write + steps: + - name: Checkout the source code + uses: actions/checkout@v3.5.3 + with: + persist-credentials: false + + - name: Setup mdBook + uses: jontze/action-mdbook@v2.2.1 + with: + token: ${{ secrets.GITHUB_TOKEN }} + mdbook-version: '~0.4' + use-linkcheck: true + use-mermaid: true + + - name: Build Zebra book + run: | + mdbook build book --dest-dir "$(pwd)"/target/book + + # Setup gcloud CLI + - name: Authenticate to Google Cloud + id: auth + uses: google-github-actions/auth@v1.1.1 + with: + retries: '3' + workload_identity_provider: '${{ vars.GCP_WIF }}' + service_account: '${{ vars.GCP_FIREBASE_SA }}' + + # TODO: remove this step after issue https://github.com/FirebaseExtended/action-hosting-deploy/issues/174 is fixed + - run: | + # shellcheck disable=SC2002 + echo "GCP_FIREBASE_SA=$(cat ${{ steps.auth.outputs.credentials_file_path }} | tr -d '\n')" >> "$GITHUB_ENV" + + - name: Deploy Zebra book to firebase + uses: FirebaseExtended/action-hosting-deploy@v0.7.1 + with: + repoToken: ${{ secrets.GITHUB_TOKEN }} + firebaseServiceAccount: ${{ env.GCP_FIREBASE_SA }} + channelId: ${{ env.FIREBASE_CHANNEL }} + projectId: ${{ vars.GCP_PROJECT }} + target: docs-book + + build-docs-external: + name: Build and Deploy Zebra External Docs timeout-minutes: 45 runs-on: ubuntu-latest + environment: docs + permissions: + checks: write + contents: 'read' + id-token: 'write' + pull-requests: write steps: - name: Checkout the source code uses: actions/checkout@v3.5.3 @@ -47,7 +123,6 @@ jobs: - name: Install last version of Protoc uses: arduino/setup-protoc@v2.0.0 with: - # TODO: increase to latest version after https://github.com/arduino/setup-protoc/issues/33 is fixed version: '23.x' repo-token: ${{ secrets.GITHUB_TOKEN }} @@ -58,54 +133,90 @@ jobs: - uses: Swatinem/rust-cache@v2.6.0 - - name: Setup mdBook - uses: peaceiris/actions-mdbook@v1.2.0 - with: - mdbook-version: '0.4.18' - - # TODO: actions-mdbook does not yet have an option to install mdbook-mermaid https://github.com/peaceiris/actions-mdbook/issues/426 - - name: Install mdbook - run: | - cargo install mdbook-mermaid - - - name: Build Zebra book - run: | - mdbook build book/ - - - name: Deploy Zebra book to firebase - uses: w9jds/firebase-action@v12.4.0 - with: - args: deploy - env: - FIREBASE_TOKEN: ${{ secrets.FIREBASE_TOKEN }} - PROJECT_PATH: book/ - PROJECT_ID: zebra-book-b535f - - name: Build external docs run: | # Exclude zebra-utils, it is not for library or app users - cargo doc --no-deps --workspace --all-features --exclude zebra-utils + cargo doc --no-deps --workspace --all-features --exclude zebra-utils --target-dir target/external env: RUSTDOCFLAGS: '--html-in-header katex-header.html' + # Setup gcloud CLI + - name: Authenticate to Google Cloud + id: auth + uses: google-github-actions/auth@v1.1.1 + with: + retries: '3' + workload_identity_provider: '${{ vars.GCP_WIF }}' + service_account: '${{ vars.GCP_FIREBASE_SA }}' + + # TODO: remove this step after issue https://github.com/FirebaseExtended/action-hosting-deploy/issues/174 is fixed + - run: | + # shellcheck disable=SC2002 + echo "GCP_FIREBASE_SA=$(cat ${{ steps.auth.outputs.credentials_file_path }} | tr -d '\n')" >> "$GITHUB_ENV" + - name: Deploy external docs to firebase - uses: w9jds/firebase-action@v12.4.0 + uses: FirebaseExtended/action-hosting-deploy@v0.7.1 with: - args: deploy - env: - FIREBASE_TOKEN: ${{ secrets.FIREBASE_TOKEN }} - PROJECT_ID: zebra-doc-external + repoToken: ${{ secrets.GITHUB_TOKEN }} + firebaseServiceAccount: ${{ env.GCP_FIREBASE_SA }} + channelId: ${{ env.FIREBASE_CHANNEL }} + target: docs-external + projectId: ${{ vars.GCP_PROJECT }} + + build-docs-internal: + name: Build and Deploy Zebra Internal Docs + timeout-minutes: 45 + runs-on: ubuntu-latest + environment: docs + permissions: + checks: write + contents: 'read' + id-token: 'write' + pull-requests: write + steps: + - name: Checkout the source code + uses: actions/checkout@v3.5.3 + with: + persist-credentials: false + + - name: Install last version of Protoc + uses: arduino/setup-protoc@v2.0.0 + with: + version: '23.x' + repo-token: ${{ secrets.GITHUB_TOKEN }} + + # Setup Rust with beta toolchain and default profile (to include rust-docs) + - name: Setup Rust + run: | + curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain=beta --profile=default + + - uses: Swatinem/rust-cache@v2.6.0 - name: Build internal docs run: | - cargo doc --no-deps --workspace --all-features --document-private-items + cargo doc --no-deps --workspace --all-features --document-private-items --target-dir target/internal env: RUSTDOCFLAGS: '--html-in-header katex-header.html' + # Setup gcloud CLI + - name: Authenticate to Google Cloud + id: auth + uses: google-github-actions/auth@v1.1.1 + with: + retries: '3' + workload_identity_provider: '${{ vars.GCP_WIF }}' + service_account: '${{ vars.GCP_FIREBASE_SA }}' + + # TODO: remove this step after issue https://github.com/FirebaseExtended/action-hosting-deploy/issues/174 is fixed + - run: | + # shellcheck disable=SC2002 + echo "GCP_FIREBASE_SA=$(cat ${{ steps.auth.outputs.credentials_file_path }} | tr -d '\n')" >> "$GITHUB_ENV" + - name: Deploy internal docs to firebase - uses: w9jds/firebase-action@v12.4.0 + uses: FirebaseExtended/action-hosting-deploy@v0.7.1 with: - args: deploy - env: - FIREBASE_TOKEN: ${{ secrets.FIREBASE_TOKEN }} - PROJECT_ID: zebra-doc-internal-e9fd4 + repoToken: ${{ secrets.GITHUB_TOKEN }} + firebaseServiceAccount: ${{ env.GCP_FIREBASE_SA }} + channelId: ${{ env.FIREBASE_CHANNEL }} + target: docs-internal + projectId: ${{ vars.GCP_PROJECT }} diff --git a/.gitignore b/.gitignore index 07b32ac0abe..ef29d45439f 100644 --- a/.gitignore +++ b/.gitignore @@ -63,7 +63,8 @@ flycheck_*.el ### Firebase ### .idea **/node_modules/* -**/.firebaserc +# We need to check in the .firebaserc file because it contains the target names +# **/.firebaserc ### Firebase Patch ### .runtimeconfig.json diff --git a/book/firebase.json b/book/firebase.json deleted file mode 100644 index 6fe0e74e2c3..00000000000 --- a/book/firebase.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "hosting": { - "public": "book", - "ignore": [ - "firebase.json", - "**/.*", - "**/node_modules/**" - ], - "rewrites": [ - { - "source": "**", - "destination": "/index.html" - } - ] - } -} \ No newline at end of file diff --git a/firebase.json b/firebase.json index d69ad9a177b..7dd48adfe68 100644 --- a/firebase.json +++ b/firebase.json @@ -1,23 +1,63 @@ { - "hosting": { - "public": "target/doc", - "ignore": [ - "firebase.json", - "**/.*", - "**/node_modules/**" - ], - "rewrites": [ - { - "source": "**", - "destination": "/index.html" - } - ], - "redirects": [ - { - "source": "/", - "destination": "/zebrad", - "type": 301 - } - ] - } + "hosting": [ + { + "public": "target/external", + "target": "docs-external", + "ignore": [ + "firebase.json", + "**/.*", + "**/node_modules/**" + ], + "rewrites": [ + { + "source": "**", + "destination": "/index.html" + } + ], + "redirects": [ + { + "source": "/", + "destination": "/zebrad", + "type": 301 + } + ] + }, + { + "public": "target/internal", + "target": "docs-internal", + "ignore": [ + "firebase.json", + "**/.*", + "**/node_modules/**" + ], + "rewrites": [ + { + "source": "**", + "destination": "/index.html" + } + ], + "redirects": [ + { + "source": "/", + "destination": "/zebrad", + "type": 301 + } + ] + }, + { + "public": "target/book", + "target": "docs-book", + "ignore": [ + "firebase.json", + "**/.*", + "**/node_modules/**" + ], + "rewrites": [ + { + "source": "**", + "destination": "/index.html" + } + ] + } + ] } From b322748b66ab886a8ffa105dc08a852423041dbb Mon Sep 17 00:00:00 2001 From: Alfredo Garcia Date: Tue, 15 Aug 2023 15:48:50 -0300 Subject: [PATCH 263/265] add(rpc): note tree sizes to `getblock` api (#7278) * add the basics * add some docs, move code * upgrade compact formats to https://github.com/zcash/lightwalletd/blob/v0.4.15/walletrpc/compact_formats.proto * add a test for in sync chain * test changing to ecc lightwalletd * revert change of lightwalletd repo (already merged to main) * add debug log to see whats going on with the test * change log to tracing::info * remove log line --------- Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> --- zebra-rpc/src/methods.rs | 93 +++++++++++++++++++ ...k_verbose_hash_verbosity_1@mainnet_10.snap | 3 +- ...k_verbose_hash_verbosity_1@testnet_10.snap | 3 +- ...ose_hash_verbosity_default@mainnet_10.snap | 3 +- ...ose_hash_verbosity_default@testnet_10.snap | 3 +- ...verbose_height_verbosity_1@mainnet_10.snap | 3 +- ...verbose_height_verbosity_1@testnet_10.snap | 3 +- ...e_height_verbosity_default@mainnet_10.snap | 3 +- ...e_height_verbosity_default@testnet_10.snap | 3 +- zebra-rpc/src/methods/tests/vectors.rs | 9 ++ .../lightwalletd/proto/compact_formats.proto | 46 +++++---- .../common/lightwalletd/wallet_grpc_test.rs | 44 ++++++--- 12 files changed, 178 insertions(+), 38 deletions(-) diff --git a/zebra-rpc/src/methods.rs b/zebra-rpc/src/methods.rs index 263dbddf373..0f9ccd9906f 100644 --- a/zebra-rpc/src/methods.rs +++ b/zebra-rpc/src/methods.rs @@ -744,11 +744,68 @@ where // this needs a new state request for the height -> hash index let height = hash_or_height.height(); + // Sapling trees + // + // # Concurrency + // + // We look up by block hash so the hash, transaction IDs, and confirmations + // are consistent. + let request = zebra_state::ReadRequest::SaplingTree(hash.into()); + let response = state + .ready() + .and_then(|service| service.call(request)) + .await + .map_err(|error| Error { + code: ErrorCode::ServerError(0), + message: error.to_string(), + data: None, + })?; + + let sapling_note_commitment_tree_count = match response { + zebra_state::ReadResponse::SaplingTree(Some(nct)) => nct.count(), + zebra_state::ReadResponse::SaplingTree(None) => 0, + _ => unreachable!("unmatched response to a SaplingTree request"), + }; + + // Orchard trees + // + // # Concurrency + // + // We look up by block hash so the hash, transaction IDs, and confirmations + // are consistent. + let request = zebra_state::ReadRequest::OrchardTree(hash.into()); + let response = state + .ready() + .and_then(|service| service.call(request)) + .await + .map_err(|error| Error { + code: ErrorCode::ServerError(0), + message: error.to_string(), + data: None, + })?; + + let orchard_note_commitment_tree_count = match response { + zebra_state::ReadResponse::OrchardTree(Some(nct)) => nct.count(), + zebra_state::ReadResponse::OrchardTree(None) => 0, + _ => unreachable!("unmatched response to a OrchardTree request"), + }; + + let sapling = SaplingTrees { + size: sapling_note_commitment_tree_count, + }; + + let orchard = OrchardTrees { + size: orchard_note_commitment_tree_count, + }; + + let trees = GetBlockTrees { sapling, orchard }; + Ok(GetBlock::Object { hash: GetBlockHash(hash), confirmations, height, tx, + trees, }) } else { Err(Error { @@ -1362,6 +1419,9 @@ pub enum GetBlock { // // TODO: use a typed Vec here tx: Vec, + + /// Information about the note commitment trees. + trees: GetBlockTrees, }, } @@ -1524,6 +1584,39 @@ impl GetRawTransaction { } } +/// Information about the sapling and orchard note commitment trees if any. +#[derive(Copy, Clone, Debug, Eq, PartialEq, serde::Deserialize, serde::Serialize)] +pub struct GetBlockTrees { + #[serde(skip_serializing_if = "SaplingTrees::is_empty")] + sapling: SaplingTrees, + #[serde(skip_serializing_if = "OrchardTrees::is_empty")] + orchard: OrchardTrees, +} + +/// Sapling note commitment tree information. +#[derive(Copy, Clone, Debug, Eq, PartialEq, serde::Deserialize, serde::Serialize)] +pub struct SaplingTrees { + size: u64, +} + +impl SaplingTrees { + fn is_empty(&self) -> bool { + self.size == 0 + } +} + +/// Orchard note commitment tree information. +#[derive(Copy, Clone, Debug, Eq, PartialEq, serde::Deserialize, serde::Serialize)] +pub struct OrchardTrees { + size: u64, +} + +impl OrchardTrees { + fn is_empty(&self) -> bool { + self.size == 0 + } +} + /// Check if provided height range is valid for address indexes. fn check_height_range(start: Height, end: Height, chain_height: Height) -> Result<()> { if start == Height(0) || end == Height(0) { diff --git a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_1@mainnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_1@mainnet_10.snap index d4d6b540a83..6bed7d59cd2 100644 --- a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_1@mainnet_10.snap +++ b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_1@mainnet_10.snap @@ -7,5 +7,6 @@ expression: block "confirmations": 10, "tx": [ "851bf6fbf7a976327817c738c489d7fa657752445430922d94c983c0b9ed4609" - ] + ], + "trees": {} } diff --git a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_1@testnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_1@testnet_10.snap index 393f918ebef..fe2c9527562 100644 --- a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_1@testnet_10.snap +++ b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_1@testnet_10.snap @@ -7,5 +7,6 @@ expression: block "confirmations": 10, "tx": [ "f37e9f691fffb635de0999491d906ee85ba40cd36dae9f6e5911a8277d7c5f75" - ] + ], + "trees": {} } diff --git a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_default@mainnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_default@mainnet_10.snap index d4d6b540a83..6bed7d59cd2 100644 --- a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_default@mainnet_10.snap +++ b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_default@mainnet_10.snap @@ -7,5 +7,6 @@ expression: block "confirmations": 10, "tx": [ "851bf6fbf7a976327817c738c489d7fa657752445430922d94c983c0b9ed4609" - ] + ], + "trees": {} } diff --git a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_default@testnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_default@testnet_10.snap index 393f918ebef..fe2c9527562 100644 --- a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_default@testnet_10.snap +++ b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_default@testnet_10.snap @@ -7,5 +7,6 @@ expression: block "confirmations": 10, "tx": [ "f37e9f691fffb635de0999491d906ee85ba40cd36dae9f6e5911a8277d7c5f75" - ] + ], + "trees": {} } diff --git a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_1@mainnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_1@mainnet_10.snap index ad487d39140..3d66b2dffa2 100644 --- a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_1@mainnet_10.snap +++ b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_1@mainnet_10.snap @@ -8,5 +8,6 @@ expression: block "height": 1, "tx": [ "851bf6fbf7a976327817c738c489d7fa657752445430922d94c983c0b9ed4609" - ] + ], + "trees": {} } diff --git a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_1@testnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_1@testnet_10.snap index 02469914e6d..f79a4283b50 100644 --- a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_1@testnet_10.snap +++ b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_1@testnet_10.snap @@ -8,5 +8,6 @@ expression: block "height": 1, "tx": [ "f37e9f691fffb635de0999491d906ee85ba40cd36dae9f6e5911a8277d7c5f75" - ] + ], + "trees": {} } diff --git a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_default@mainnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_default@mainnet_10.snap index ad487d39140..3d66b2dffa2 100644 --- a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_default@mainnet_10.snap +++ b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_default@mainnet_10.snap @@ -8,5 +8,6 @@ expression: block "height": 1, "tx": [ "851bf6fbf7a976327817c738c489d7fa657752445430922d94c983c0b9ed4609" - ] + ], + "trees": {} } diff --git a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_default@testnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_default@testnet_10.snap index 02469914e6d..f79a4283b50 100644 --- a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_default@testnet_10.snap +++ b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_default@testnet_10.snap @@ -8,5 +8,6 @@ expression: block "height": 1, "tx": [ "f37e9f691fffb635de0999491d906ee85ba40cd36dae9f6e5911a8277d7c5f75" - ] + ], + "trees": {} } diff --git a/zebra-rpc/src/methods/tests/vectors.rs b/zebra-rpc/src/methods/tests/vectors.rs index 8cb49e40c2e..6490b8c88dc 100644 --- a/zebra-rpc/src/methods/tests/vectors.rs +++ b/zebra-rpc/src/methods/tests/vectors.rs @@ -121,6 +121,11 @@ async fn rpc_getblock() { assert_eq!(get_block, expected_result); } + // Create empty note commitment tree information. + let sapling = SaplingTrees { size: 0 }; + let orchard = OrchardTrees { size: 0 }; + let trees = GetBlockTrees { sapling, orchard }; + // Make height calls with verbosity=1 and check response for (i, block) in blocks.iter().enumerate() { let get_block = rpc @@ -139,6 +144,7 @@ async fn rpc_getblock() { .iter() .map(|tx| tx.hash().encode_hex()) .collect(), + trees, } ); } @@ -161,6 +167,7 @@ async fn rpc_getblock() { .iter() .map(|tx| tx.hash().encode_hex()) .collect(), + trees, } ); } @@ -183,6 +190,7 @@ async fn rpc_getblock() { .iter() .map(|tx| tx.hash().encode_hex()) .collect(), + trees, } ); } @@ -205,6 +213,7 @@ async fn rpc_getblock() { .iter() .map(|tx| tx.hash().encode_hex()) .collect(), + trees, } ); } diff --git a/zebrad/tests/common/lightwalletd/proto/compact_formats.proto b/zebrad/tests/common/lightwalletd/proto/compact_formats.proto index f2129f2cbf7..09df06d48be 100644 --- a/zebrad/tests/common/lightwalletd/proto/compact_formats.proto +++ b/zebrad/tests/common/lightwalletd/proto/compact_formats.proto @@ -1,4 +1,4 @@ -// Copyright (c) 2019-2020 The Zcash developers +// Copyright (c) 2019-2021 The Zcash developers // Distributed under the MIT software license, see the accompanying // file COPYING or https://www.opensource.org/licenses/mit-license.php . @@ -6,39 +6,50 @@ syntax = "proto3"; package cash.z.wallet.sdk.rpc; option go_package = "lightwalletd/walletrpc"; option swift_prefix = ""; + // Remember that proto3 fields are all optional. A field that is not present will be set to its zero value. // bytes fields of hashes are in canonical little-endian format. +// ChainMetadata represents information about the state of the chain as of a given block. +message ChainMetadata { + uint32 saplingCommitmentTreeSize = 1; // the size of the Sapling note commitment tree as of the end of this block + uint32 orchardCommitmentTreeSize = 2; // the size of the Orchard note commitment tree as of the end of this block +} + // CompactBlock is a packaging of ONLY the data from a block that's needed to: // 1. Detect a payment to your shielded Sapling address // 2. Detect a spend of your shielded Sapling notes // 3. Update your witnesses to generate new Sapling spend proofs. message CompactBlock { - uint32 protoVersion = 1; // the version of this wire format, for storage - uint64 height = 2; // the height of this block - bytes hash = 3; // the ID (hash) of this block, same as in block explorers - bytes prevHash = 4; // the ID (hash) of this block's predecessor - uint32 time = 5; // Unix epoch time when the block was mined - bytes header = 6; // (hash, prevHash, and time) OR (full header) - repeated CompactTx vtx = 7; // zero or more compact transactions from this block + uint32 protoVersion = 1; // the version of this wire format, for storage + uint64 height = 2; // the height of this block + bytes hash = 3; // the ID (hash) of this block, same as in block explorers + bytes prevHash = 4; // the ID (hash) of this block's predecessor + uint32 time = 5; // Unix epoch time when the block was mined + bytes header = 6; // (hash, prevHash, and time) OR (full header) + repeated CompactTx vtx = 7; // zero or more compact transactions from this block + ChainMetadata chainMetadata = 8; // information about the state of the chain as of this block } // CompactTx contains the minimum information for a wallet to know if this transaction // is relevant to it (either pays to it or spends from it) via shielded elements // only. This message will not encode a transparent-to-transparent transaction. message CompactTx { + // Index and hash will allow the receiver to call out to chain + // explorers or other data structures to retrieve more information + // about this transaction. uint64 index = 1; // the index within the full block bytes hash = 2; // the ID (hash) of this transaction, same as in block explorers // The transaction fee: present if server can provide. In the case of a // stateless server and a transaction with transparent inputs, this will be // unset because the calculation requires reference to prior transactions. - // in a pure-Sapling context, the fee will be calculable as: - // valueBalance + (sum(vPubNew) - sum(vPubOld) - sum(tOut)) + // If there are no transparent inputs, the fee will be calculable as: + // valueBalanceSapling + valueBalanceOrchard + sum(vPubNew) - sum(vPubOld) - sum(tOut) uint32 fee = 3; - repeated CompactSaplingSpend spends = 4; // inputs - repeated CompactSaplingOutput outputs = 5; // outputs + repeated CompactSaplingSpend spends = 4; + repeated CompactSaplingOutput outputs = 5; repeated CompactOrchardAction actions = 6; } @@ -48,11 +59,14 @@ message CompactSaplingSpend { bytes nf = 1; // nullifier (see the Zcash protocol specification) } -// output is a Sapling Output Description as described in section 7.4 of the -// Zcash protocol spec. Total size is 948. +// output encodes the `cmu` field, `ephemeralKey` field, and a 52-byte prefix of the +// `encCiphertext` field of a Sapling Output Description. These fields are described in +// section 7.4 of the Zcash protocol spec: +// https://zips.z.cash/protocol/protocol.pdf#outputencodingandconsensus +// Total size is 116 bytes. message CompactSaplingOutput { bytes cmu = 1; // note commitment u-coordinate - bytes epk = 2; // ephemeral public key + bytes ephemeralKey = 2; // ephemeral public key bytes ciphertext = 3; // first 52 bytes of ciphertext } @@ -62,5 +76,5 @@ message CompactOrchardAction { bytes nullifier = 1; // [32] The nullifier of the input note bytes cmx = 2; // [32] The x-coordinate of the note commitment for the output note bytes ephemeralKey = 3; // [32] An encoding of an ephemeral Pallas public key - bytes ciphertext = 4; // [52] The note plaintext component of the encCiphertext field + bytes ciphertext = 4; // [52] The first 52 bytes of the encCiphertext field } diff --git a/zebrad/tests/common/lightwalletd/wallet_grpc_test.rs b/zebrad/tests/common/lightwalletd/wallet_grpc_test.rs index 4b84e4e7753..2001f94f8f1 100644 --- a/zebrad/tests/common/lightwalletd/wallet_grpc_test.rs +++ b/zebrad/tests/common/lightwalletd/wallet_grpc_test.rs @@ -39,7 +39,7 @@ use color_eyre::eyre::Result; use zebra_chain::{ block::Block, parameters::Network, - parameters::NetworkUpgrade::{self, Canopy}, + parameters::NetworkUpgrade::{Nu5, Sapling}, serialization::ZcashDeserializeInto, }; @@ -145,27 +145,43 @@ pub async fn run() -> Result<()> { .await? .into_inner(); - // As we are using a pretty much synchronized blockchain, we can assume the tip is above the Canopy network upgrade - assert!(block_tip.height > Canopy.activation_height(network).unwrap().0 as u64); + // Get `Sapling` activation height. + let sapling_activation_height = Sapling.activation_height(network).unwrap().0 as u64; - // `lightwalletd` only supports post-Sapling blocks, so we begin at the - // Sapling activation height. - let sapling_activation_height = NetworkUpgrade::Sapling - .activation_height(network) - .unwrap() - .0 as u64; + // As we are using a pretty much synchronized blockchain, we can assume the tip is above the Nu5 network upgrade + assert!(block_tip.height > Nu5.activation_height(network).unwrap().0 as u64); - // Call `GetBlock` with block 1 height - let block_one = rpc_client + // The first block in the mainnet that has sapling and orchard information. + let block_with_trees = 1687107; + + // Call `GetBlock` with `block_with_trees`. + let get_block_response = rpc_client .get_block(BlockId { - height: sapling_activation_height, + height: block_with_trees, hash: vec![], }) .await? .into_inner(); - // Make sure we got block 1 back - assert_eq!(block_one.height, sapling_activation_height); + // Make sure we got block `block_with_trees` back + assert_eq!(get_block_response.height, block_with_trees); + + // Testing the `trees` field of `GetBlock`. + assert_eq!( + get_block_response + .chain_metadata + .clone() + .unwrap() + .sapling_commitment_tree_size, + 1170439 + ); + assert_eq!( + get_block_response + .chain_metadata + .unwrap() + .orchard_commitment_tree_size, + 2 + ); // Call `GetBlockRange` with the range starting at block 1 up to block 10 let mut block_range = rpc_client From d8168db3e1e2507839495f789b14f2907283934d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 15 Aug 2023 18:49:21 +0000 Subject: [PATCH 264/265] build(deps): bump the test group with 1 update (#7301) Bumps the test group with 1 update: [tempfile](https://github.com/Stebalien/tempfile). - [Changelog](https://github.com/Stebalien/tempfile/blob/master/CHANGELOG.md) - [Commits](https://github.com/Stebalien/tempfile/compare/v3.7.0...v3.7.1) --- updated-dependencies: - dependency-name: tempfile dependency-type: direct:production update-type: version-update:semver-patch dependency-group: test ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- zebra-network/Cargo.toml | 2 +- zebra-state/Cargo.toml | 2 +- zebra-test/Cargo.toml | 2 +- zebrad/Cargo.toml | 2 +- 5 files changed, 6 insertions(+), 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ef3b771d9ce..0fe7059d5c2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4200,9 +4200,9 @@ checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" [[package]] name = "tempfile" -version = "3.7.0" +version = "3.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5486094ee78b2e5038a6382ed7645bc084dc2ec433426ca4c3cb61e2007b8998" +checksum = "dc02fddf48964c42031a0b3fe0428320ecf3a73c401040fc0096f97794310651" dependencies = [ "cfg-if 1.0.0", "fastrand", diff --git a/zebra-network/Cargo.toml b/zebra-network/Cargo.toml index 2cdb5cbad97..268c7c39352 100644 --- a/zebra-network/Cargo.toml +++ b/zebra-network/Cargo.toml @@ -57,7 +57,7 @@ rand = "0.8.5" rayon = "1.7.0" regex = "1.9.3" serde = { version = "1.0.179", features = ["serde_derive"] } -tempfile = "3.7.0" +tempfile = "3.7.1" thiserror = "1.0.44" futures = "0.3.28" diff --git a/zebra-state/Cargo.toml b/zebra-state/Cargo.toml index 0827053fa59..51e929bbcf4 100644 --- a/zebra-state/Cargo.toml +++ b/zebra-state/Cargo.toml @@ -58,7 +58,7 @@ rlimit = "0.10.1" rocksdb = { version = "0.21.0", default-features = false, features = ["lz4"] } semver = "1.0.18" serde = { version = "1.0.179", features = ["serde_derive"] } -tempfile = "3.7.0" +tempfile = "3.7.1" thiserror = "1.0.44" rayon = "1.7.0" diff --git a/zebra-test/Cargo.toml b/zebra-test/Cargo.toml index 24de90b03ff..7544d4793cf 100644 --- a/zebra-test/Cargo.toml +++ b/zebra-test/Cargo.toml @@ -43,4 +43,4 @@ tracing-error = "0.2.0" tracing = "0.1.37" [dev-dependencies] -tempfile = "3.7.0" +tempfile = "3.7.1" diff --git a/zebrad/Cargo.toml b/zebrad/Cargo.toml index 924636e57a9..dcc32e20409 100644 --- a/zebrad/Cargo.toml +++ b/zebrad/Cargo.toml @@ -237,7 +237,7 @@ regex = "1.9.3" # zebra-rpc needs the preserve_order feature, it also makes test results more stable serde_json = { version = "1.0.104", features = ["preserve_order"] } -tempfile = "3.7.0" +tempfile = "3.7.1" hyper = { version = "0.14.27", features = ["http1", "http2", "server"]} tracing-test = { version = "0.2.4", features = ["no-env-filter"] } From 80726e248b1056c573d20c730c70cf2337224533 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 15 Aug 2023 18:50:05 +0000 Subject: [PATCH 265/265] build(deps): bump the app group with 1 update (#7325) Bumps the app group with 1 update: [clap](https://github.com/clap-rs/clap). - [Release notes](https://github.com/clap-rs/clap/releases) - [Changelog](https://github.com/clap-rs/clap/blob/master/CHANGELOG.md) - [Commits](https://github.com/clap-rs/clap/compare/v4.3.19...v4.3.21) --- updated-dependencies: - dependency-name: clap dependency-type: direct:production update-type: version-update:semver-patch dependency-group: app ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 14 +++++++------- zebrad/Cargo.toml | 2 +- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0fe7059d5c2..84225549cd1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -12,7 +12,7 @@ dependencies = [ "arc-swap", "backtrace", "canonical-path", - "clap 4.3.19", + "clap 4.3.21", "color-eyre", "fs-err", "once_cell", @@ -744,9 +744,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.3.19" +version = "4.3.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fd304a20bff958a57f04c4e96a2e7594cc4490a0e809cbd48bb6437edaa452d" +checksum = "c27cdf28c0f604ba3f512b0c9a409f8de8513e4816705deb0498b627e7c3a3fd" dependencies = [ "clap_builder", "clap_derive", @@ -755,9 +755,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.3.19" +version = "4.3.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01c6a3f08f1fe5662a35cfe393aec09c4df95f60ee93b7556505260f75eee9e1" +checksum = "08a9f1ab5e9f01a9b81f202e8562eb9a10de70abf9eaeac1be465c28b75aa4aa" dependencies = [ "anstream", "anstyle", @@ -916,7 +916,7 @@ dependencies = [ "anes", "cast", "ciborium", - "clap 4.3.19", + "clap 4.3.21", "criterion-plot", "is-terminal", "itertools 0.10.5", @@ -5751,7 +5751,7 @@ dependencies = [ "abscissa_core", "atty", "chrono", - "clap 4.3.19", + "clap 4.3.21", "color-eyre", "console-subscriber", "dirs", diff --git a/zebrad/Cargo.toml b/zebrad/Cargo.toml index dcc32e20409..27af2edd065 100644 --- a/zebrad/Cargo.toml +++ b/zebrad/Cargo.toml @@ -153,7 +153,7 @@ zebra-state = { path = "../zebra-state", version = "1.0.0-beta.28" } zebra-utils = { path = "../zebra-utils", version = "1.0.0-beta.28", optional = true } abscissa_core = "0.7.0" -clap = { version = "4.3.19", features = ["cargo"] } +clap = { version = "4.3.21", features = ["cargo"] } chrono = { version = "0.4.26", default-features = false, features = ["clock", "std"] } humantime-serde = "1.1.1" indexmap = "2.0.0"