diff --git a/.cargo/config.toml b/.cargo/config.toml index 68a0d7b552dc..8573f582e258 100644 --- a/.cargo/config.toml +++ b/.cargo/config.toml @@ -9,6 +9,7 @@ rustdocflags = [ CC_x86_64_unknown_linux_musl = { value = ".cargo/musl-gcc", force = true, relative = true } CXX_x86_64_unknown_linux_musl = { value = ".cargo/musl-g++", force = true, relative = true } CARGO_WORKSPACE_ROOT_DIR = { value = "", relative = true } +SQLX_OFFLINE = "true" [net] retry = 5 diff --git a/.github/workflows/build-publish-eth-rpc.yml b/.github/workflows/build-publish-eth-rpc.yml index 3aa1624096df..a98b3881a145 100644 --- a/.github/workflows/build-publish-eth-rpc.yml +++ b/.github/workflows/build-publish-eth-rpc.yml @@ -12,7 +12,8 @@ concurrency: cancel-in-progress: true env: - IMAGE_NAME: "docker.io/paritypr/eth-rpc" + ETH_RPC_IMAGE_NAME: "docker.io/paritypr/eth-rpc" + ETH_INDEXER_IMAGE_NAME: "docker.io/paritypr/eth-indexer" jobs: set-variables: @@ -34,7 +35,7 @@ jobs: echo "set VERSION=${VERSION}" build_docker: - name: Build docker image + name: Build docker images runs-on: parity-large needs: [set-variables] env: @@ -43,17 +44,26 @@ jobs: - name: Check out the repo uses: actions/checkout@v4 - - name: Build Docker image + - name: Build eth-rpc Docker image uses: docker/build-push-action@v6 with: context: . - file: ./substrate/frame/revive/rpc/Dockerfile + file: ./substrate/frame/revive/rpc/dockerfiles/eth-rpc/Dockerfile push: false tags: | - ${{ env.IMAGE_NAME }}:${{ env.VERSION }} + ${{ env.ETH_RPC_IMAGE_NAME }}:${{ env.VERSION }} + + - name: Build eth-indexer Docker image + uses: docker/build-push-action@v6 + with: + context: . + file: ./substrate/frame/revive/rpc/dockerfiles/eth-indexer/Dockerfile + push: false + tags: | + ${{ env.ETH_INDEXER_IMAGE_NAME }}:${{ env.VERSION }} build_push_docker: - name: Build and push docker image + name: Build and push docker images runs-on: parity-large if: github.ref == 'refs/heads/master' needs: [set-variables] @@ -69,11 +79,20 @@ jobs: username: ${{ secrets.PARITYPR_DOCKERHUB_USERNAME }} password: ${{ secrets.PARITYPR_DOCKERHUB_PASSWORD }} - - name: Build Docker image + - name: Build eth-rpc Docker image + uses: docker/build-push-action@v6 + with: + context: . + file: ./substrate/frame/revive/rpc/dockerfiles/eth-rpc/Dockerfile + push: true + tags: | + ${{ env.ETH_RPC_IMAGE_NAME }}:${{ env.VERSION }} + + - name: Build eth-indexer Docker image uses: docker/build-push-action@v6 with: context: . - file: ./substrate/frame/revive/rpc/Dockerfile + file: ./substrate/frame/revive/rpc/dockerfiles/eth-indexer/Dockerfile push: true tags: | - ${{ env.IMAGE_NAME }}:${{ env.VERSION }} + ${{ env.ETH_INDEXER_IMAGE_NAME }}:${{ env.VERSION }} diff --git a/.github/workflows/checks-quick.yml b/.github/workflows/checks-quick.yml index 4c26b85a6303..1a8813833def 100644 --- a/.github/workflows/checks-quick.yml +++ b/.github/workflows/checks-quick.yml @@ -138,7 +138,7 @@ jobs: # Fixes "detected dubious ownership" error in the ci git config --global --add safe.directory '*' python3 scripts/generate-umbrella.py --sdk . --version 0.1.0 - cargo +nightly fmt --all + cargo +nightly fmt -p polkadot-sdk if [ -n "$(git status --porcelain)" ]; then cat <( slot_now, + relay_slot, timestamp, block_hash, included_block, diff --git a/cumulus/client/consensus/aura/src/collators/mod.rs b/cumulus/client/consensus/aura/src/collators/mod.rs index 89070607fbab..031fa963ba6a 100644 --- a/cumulus/client/consensus/aura/src/collators/mod.rs +++ b/cumulus/client/consensus/aura/src/collators/mod.rs @@ -34,7 +34,7 @@ use polkadot_primitives::{ ValidationCodeHash, }; use sc_consensus_aura::{standalone as aura_internal, AuraApi}; -use sp_api::ProvideRuntimeApi; +use sp_api::{ApiExt, ProvideRuntimeApi}; use sp_core::Pair; use sp_keystore::KeystorePtr; use sp_timestamp::Timestamp; @@ -160,7 +160,8 @@ async fn cores_scheduled_for_para( // Checks if we own the slot at the given block and whether there // is space in the unincluded segment. async fn can_build_upon( - slot: Slot, + para_slot: Slot, + relay_slot: Slot, timestamp: Timestamp, parent_hash: Block::Hash, included_block: Block::Hash, @@ -169,25 +170,28 @@ async fn can_build_upon( ) -> Option> where Client: ProvideRuntimeApi, - Client::Api: AuraApi + AuraUnincludedSegmentApi, + Client::Api: AuraApi + AuraUnincludedSegmentApi + ApiExt, P: Pair, P::Public: Codec, P::Signature: Codec, { let runtime_api = client.runtime_api(); let authorities = runtime_api.authorities(parent_hash).ok()?; - let author_pub = aura_internal::claim_slot::

(slot, &authorities, keystore).await?; + let author_pub = aura_internal::claim_slot::

(para_slot, &authorities, keystore).await?; - // Here we lean on the property that building on an empty unincluded segment must always - // be legal. Skipping the runtime API query here allows us to seamlessly run this - // collator against chains which have not yet upgraded their runtime. - if parent_hash != included_block && - !runtime_api.can_build_upon(parent_hash, included_block, slot).ok()? - { - return None - } + let Ok(Some(api_version)) = + runtime_api.api_version::>(parent_hash) + else { + return (parent_hash == included_block) + .then(|| SlotClaim::unchecked::

(author_pub, para_slot, timestamp)); + }; + + let slot = if api_version > 1 { relay_slot } else { para_slot }; - Some(SlotClaim::unchecked::

(author_pub, slot, timestamp)) + runtime_api + .can_build_upon(parent_hash, included_block, slot) + .ok()? + .then(|| SlotClaim::unchecked::

(author_pub, para_slot, timestamp)) } /// Use [`cumulus_client_consensus_common::find_potential_parents`] to find parachain blocks that diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs index 41751f1db530..48287555dea6 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs @@ -23,7 +23,7 @@ use cumulus_primitives_aura::AuraUnincludedSegmentApi; use cumulus_primitives_core::{GetCoreSelectorApi, PersistedValidationData}; use cumulus_relay_chain_interface::RelayChainInterface; -use polkadot_primitives::Id as ParaId; +use polkadot_primitives::{Block as RelayBlock, Id as ParaId}; use futures::prelude::*; use sc_client_api::{backend::AuxStore, BlockBackend, BlockOf, UsageProvider}; @@ -302,8 +302,17 @@ where // on-chain data. collator.collator_service().check_block_status(parent_hash, &parent_header); + let Ok(relay_slot) = + sc_consensus_babe::find_pre_digest::(relay_parent_header) + .map(|babe_pre_digest| babe_pre_digest.slot()) + else { + tracing::error!(target: crate::LOG_TARGET, "Relay chain does not contain babe slot. This should never happen."); + continue; + }; + let slot_claim = match crate::collators::can_build_upon::<_, _, P>( para_slot.slot, + relay_slot, para_slot.timestamp, parent_hash, included_block, diff --git a/cumulus/client/parachain-inherent/src/mock.rs b/cumulus/client/parachain-inherent/src/mock.rs index e08aca932564..8dbc6ace0f06 100644 --- a/cumulus/client/parachain-inherent/src/mock.rs +++ b/cumulus/client/parachain-inherent/src/mock.rs @@ -17,8 +17,9 @@ use crate::{ParachainInherentData, INHERENT_IDENTIFIER}; use codec::Decode; use cumulus_primitives_core::{ - relay_chain, relay_chain::UpgradeGoAhead, InboundDownwardMessage, InboundHrmpMessage, ParaId, - PersistedValidationData, + relay_chain, + relay_chain::{Slot, UpgradeGoAhead}, + InboundDownwardMessage, InboundHrmpMessage, ParaId, PersistedValidationData, }; use cumulus_primitives_parachain_inherent::MessageQueueChain; use cumulus_test_relay_sproof_builder::RelayStateSproofBuilder; @@ -28,9 +29,6 @@ use sp_inherents::{InherentData, InherentDataProvider}; use sp_runtime::traits::Block; use std::collections::BTreeMap; -/// Relay chain slot duration, in milliseconds. -pub const RELAY_CHAIN_SLOT_DURATION_MILLIS: u32 = 6000; - /// Inherent data provider that supplies mocked validation data. /// /// This is useful when running a node that is not actually backed by any relay chain. @@ -175,8 +173,7 @@ impl> InherentDataProvider // Calculate the mocked relay block based on the current para block let relay_parent_number = self.relay_offset + self.relay_blocks_per_para_block * self.current_para_block; - sproof_builder.current_slot = - ((relay_parent_number / RELAY_CHAIN_SLOT_DURATION_MILLIS) as u64).into(); + sproof_builder.current_slot = Slot::from(relay_parent_number as u64); sproof_builder.upgrade_go_ahead = self.upgrade_go_ahead; // Process the downward messages and set up the correct head diff --git a/cumulus/pallets/aura-ext/Cargo.toml b/cumulus/pallets/aura-ext/Cargo.toml index fcda79f1d5c1..82638de71aa1 100644 --- a/cumulus/pallets/aura-ext/Cargo.toml +++ b/cumulus/pallets/aura-ext/Cargo.toml @@ -28,9 +28,15 @@ sp-runtime = { workspace = true } cumulus-pallet-parachain-system = { workspace = true } [dev-dependencies] - # Cumulus cumulus-pallet-parachain-system = { workspace = true, default-features = true } +cumulus-primitives-core = { workspace = true, default-features = true } +cumulus-test-relay-sproof-builder = { workspace = true, default-features = true } + +# Substrate +sp-core = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } +sp-version = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/cumulus/pallets/aura-ext/src/consensus_hook.rs b/cumulus/pallets/aura-ext/src/consensus_hook.rs index c1a8568bdd83..56966aa0c8f8 100644 --- a/cumulus/pallets/aura-ext/src/consensus_hook.rs +++ b/cumulus/pallets/aura-ext/src/consensus_hook.rs @@ -18,7 +18,6 @@ //! block velocity. //! //! The velocity `V` refers to the rate of block processing by the relay chain. - use super::{pallet, Aura}; use core::{marker::PhantomData, num::NonZeroU32}; use cumulus_pallet_parachain_system::{ @@ -54,8 +53,23 @@ where let velocity = V.max(1); let relay_chain_slot = state_proof.read_slot().expect("failed to read relay chain slot"); - let (slot, authored) = - pallet::SlotInfo::::get().expect("slot info is inserted on block initialization"); + let (relay_chain_slot, authored_in_relay) = match pallet::RelaySlotInfo::::get() { + Some((slot, authored)) if slot == relay_chain_slot => (slot, authored), + Some((slot, _)) if slot < relay_chain_slot => (relay_chain_slot, 0), + Some((slot, _)) => { + panic!("Slot moved backwards: stored_slot={slot:?}, relay_chain_slot={relay_chain_slot:?}") + }, + None => (relay_chain_slot, 0), + }; + + // We need to allow one additional block to be built to fill the unincluded segment. + if authored_in_relay > velocity { + panic!("authored blocks limit is reached for the slot: relay_chain_slot={relay_chain_slot:?}, authored={authored_in_relay:?}, velocity={velocity:?}"); + } + + pallet::RelaySlotInfo::::put((relay_chain_slot, authored_in_relay + 1)); + + let para_slot = pallet_aura::CurrentSlot::::get(); // Convert relay chain timestamp. let relay_chain_timestamp = @@ -67,19 +81,16 @@ where // Check that we are not too far in the future. Since we expect `V` parachain blocks // during the relay chain slot, we can allow for `V` parachain slots into the future. - if *slot > *para_slot_from_relay + u64::from(velocity) { + if *para_slot > *para_slot_from_relay + u64::from(velocity) { panic!( - "Parachain slot is too far in the future: parachain_slot: {:?}, derived_from_relay_slot: {:?} velocity: {:?}", - slot, + "Parachain slot is too far in the future: parachain_slot={:?}, derived_from_relay_slot={:?} velocity={:?}, relay_chain_slot={:?}", + para_slot, para_slot_from_relay, - velocity + velocity, + relay_chain_slot ); } - // We need to allow authoring multiple blocks in the same slot. - if slot != para_slot_from_relay && authored > velocity { - panic!("authored blocks limit is reached for the slot") - } let weight = T::DbWeight::get().reads(1); ( @@ -110,7 +121,7 @@ impl< /// is more recent than the included block itself. pub fn can_build_upon(included_hash: T::Hash, new_slot: Slot) -> bool { let velocity = V.max(1); - let (last_slot, authored_so_far) = match pallet::SlotInfo::::get() { + let (last_slot, authored_so_far) = match pallet::RelaySlotInfo::::get() { None => return true, Some(x) => x, }; @@ -123,11 +134,8 @@ impl< return false } - // TODO: This logic needs to be adjusted. - // It checks that we have not authored more than `V + 1` blocks in the slot. - // As a slot however, we take the parachain slot here. Velocity should - // be measured in relation to the relay chain slot. - // https://github.com/paritytech/polkadot-sdk/issues/3967 + // Check that we have not authored more than `V + 1` parachain blocks in the current relay + // chain slot. if last_slot == new_slot { authored_so_far < velocity + 1 } else { diff --git a/cumulus/pallets/aura-ext/src/lib.rs b/cumulus/pallets/aura-ext/src/lib.rs index dc854eb82018..19c2634ca708 100644 --- a/cumulus/pallets/aura-ext/src/lib.rs +++ b/cumulus/pallets/aura-ext/src/lib.rs @@ -40,6 +40,9 @@ use sp_consensus_aura::{digests::CompatibleDigestItem, Slot}; use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; pub mod consensus_hook; +pub mod migration; +mod test; + pub use consensus_hook::FixedVelocityConsensusHook; type Aura = pallet_aura::Pallet; @@ -57,6 +60,7 @@ pub mod pallet { pub trait Config: pallet_aura::Config + frame_system::Config {} #[pallet::pallet] + #[pallet::storage_version(migration::STORAGE_VERSION)] pub struct Pallet(_); #[pallet::hooks] @@ -70,20 +74,7 @@ pub mod pallet { // Fetch the authorities once to get them into the storage proof of the PoV. Authorities::::get(); - let new_slot = pallet_aura::CurrentSlot::::get(); - - let (new_slot, authored) = match SlotInfo::::get() { - Some((slot, authored)) if slot == new_slot => (slot, authored + 1), - Some((slot, _)) if slot < new_slot => (new_slot, 1), - Some(..) => { - panic!("slot moved backwards") - }, - None => (new_slot, 1), - }; - - SlotInfo::::put((new_slot, authored)); - - T::DbWeight::get().reads_writes(4, 2) + T::DbWeight::get().reads_writes(1, 0) } } @@ -99,11 +90,12 @@ pub mod pallet { ValueQuery, >; - /// Current slot paired with a number of authored blocks. + /// Current relay chain slot paired with a number of authored blocks. /// - /// Updated on each block initialization. + /// This is updated in [`FixedVelocityConsensusHook::on_state_proof`] with the current relay + /// chain slot as provided by the relay chain state proof. #[pallet::storage] - pub(crate) type SlotInfo = StorageValue<_, (Slot, u32), OptionQuery>; + pub(crate) type RelaySlotInfo = StorageValue<_, (Slot, u32), OptionQuery>; #[pallet::genesis_config] #[derive(frame_support::DefaultNoBound)] diff --git a/cumulus/pallets/aura-ext/src/migration.rs b/cumulus/pallets/aura-ext/src/migration.rs new file mode 100644 index 000000000000..b580c19fc733 --- /dev/null +++ b/cumulus/pallets/aura-ext/src/migration.rs @@ -0,0 +1,74 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . +extern crate alloc; + +use crate::{Config, Pallet}; +#[cfg(feature = "try-runtime")] +use alloc::vec::Vec; +use frame_support::{migrations::VersionedMigration, pallet_prelude::StorageVersion}; + +/// The in-code storage version. +pub const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); + +mod v0 { + use super::*; + use frame_support::{pallet_prelude::OptionQuery, storage_alias}; + use sp_consensus_aura::Slot; + + /// Current slot paired with a number of authored blocks. + /// + /// Updated on each block initialization. + #[storage_alias] + pub(super) type SlotInfo = StorageValue, (Slot, u32), OptionQuery>; +} +mod v1 { + use super::*; + use frame_support::{pallet_prelude::*, traits::UncheckedOnRuntimeUpgrade}; + + pub struct UncheckedMigrationToV1(PhantomData); + + impl UncheckedOnRuntimeUpgrade for UncheckedMigrationToV1 { + fn on_runtime_upgrade() -> Weight { + let mut weight: Weight = Weight::zero(); + weight += migrate::(); + weight + } + + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { + Ok(Vec::new()) + } + #[cfg(feature = "try-runtime")] + fn post_upgrade(_state: Vec) -> Result<(), sp_runtime::TryRuntimeError> { + ensure!(!v0::SlotInfo::::exists(), "SlotInfo should not exist"); + Ok(()) + } + } + + pub fn migrate() -> Weight { + v0::SlotInfo::::kill(); + T::DbWeight::get().writes(1) + } +} + +/// Migrate `V0` to `V1`. +pub type MigrateV0ToV1 = VersionedMigration< + 0, + 1, + v1::UncheckedMigrationToV1, + Pallet, + ::DbWeight, +>; diff --git a/cumulus/pallets/aura-ext/src/test.rs b/cumulus/pallets/aura-ext/src/test.rs new file mode 100644 index 000000000000..b0099381e682 --- /dev/null +++ b/cumulus/pallets/aura-ext/src/test.rs @@ -0,0 +1,338 @@ +// Copyright Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +#![cfg(test)] +extern crate alloc; + +use super::*; + +use core::num::NonZeroU32; +use cumulus_pallet_parachain_system::{ + consensus_hook::ExpectParentIncluded, AnyRelayNumber, DefaultCoreSelector, ParachainSetCode, +}; +use cumulus_primitives_core::ParaId; +use frame_support::{ + derive_impl, + pallet_prelude::ConstU32, + parameter_types, + traits::{ConstBool, ConstU64, EnqueueWithOrigin}, +}; +use sp_io::TestExternalities; +use sp_version::RuntimeVersion; + +type Block = frame_system::mocking::MockBlock; + +frame_support::construct_runtime!( + pub enum Test { + System: frame_system, + ParachainSystem: cumulus_pallet_parachain_system, + Aura: pallet_aura, + AuraExt: crate, + } +); + +parameter_types! { + pub Version: RuntimeVersion = RuntimeVersion { + spec_name: "test".into(), + impl_name: "system-test".into(), + authoring_version: 1, + spec_version: 1, + impl_version: 1, + apis: sp_version::create_apis_vec!([]), + transaction_version: 1, + system_version: 1, + }; +} + +#[derive_impl(frame_system::config_preludes::TestDefaultConfig)] +impl frame_system::Config for Test { + type Block = Block; + type Version = Version; + type OnSetCode = ParachainSetCode; + type RuntimeEvent = (); +} + +impl crate::Config for Test {} + +impl pallet_aura::Config for Test { + type AuthorityId = sp_consensus_aura::sr25519::AuthorityId; + type MaxAuthorities = ConstU32<100_000>; + type DisabledValidators = (); + type AllowMultipleBlocksPerSlot = ConstBool; + type SlotDuration = ConstU64<6000>; +} + +impl pallet_timestamp::Config for Test { + type Moment = u64; + type OnTimestampSet = (); + type MinimumPeriod = (); + type WeightInfo = (); +} + +impl cumulus_pallet_parachain_system::Config for Test { + type WeightInfo = (); + type RuntimeEvent = (); + type OnSystemEvent = (); + type SelfParaId = (); + type OutboundXcmpMessageSource = (); + // Ignore all DMP messages by enqueueing them into `()`: + type DmpQueue = EnqueueWithOrigin<(), sp_core::ConstU8<0>>; + type ReservedDmpWeight = (); + type XcmpMessageHandler = (); + type ReservedXcmpWeight = (); + type CheckAssociatedRelayNumber = AnyRelayNumber; + type ConsensusHook = ExpectParentIncluded; + type SelectCore = DefaultCoreSelector; +} + +#[cfg(test)] +mod test { + use crate::test::*; + use cumulus_pallet_parachain_system::{ + Ancestor, ConsensusHook, RelayChainStateProof, UsedBandwidth, + }; + use sp_core::H256; + + fn set_ancestors() { + let mut ancestors = Vec::new(); + for i in 0..3 { + let mut ancestor = Ancestor::new_unchecked(UsedBandwidth::default(), None); + ancestor.replace_para_head_hash(H256::repeat_byte(i + 1)); + ancestors.push(ancestor); + } + cumulus_pallet_parachain_system::UnincludedSegment::::put(ancestors); + } + + pub fn new_test_ext(para_slot: u64) -> sp_io::TestExternalities { + let mut ext = TestExternalities::new_empty(); + ext.execute_with(|| { + set_ancestors(); + // Set initial parachain slot + pallet_aura::CurrentSlot::::put(Slot::from(para_slot)); + }); + ext + } + + fn set_relay_slot(slot: u64, authored: u32) { + RelaySlotInfo::::put((Slot::from(slot), authored)) + } + + fn relay_chain_state_proof(relay_slot: u64) -> RelayChainStateProof { + let mut builder = cumulus_test_relay_sproof_builder::RelayStateSproofBuilder::default(); + builder.current_slot = relay_slot.into(); + + let (hash, state_proof) = builder.into_state_root_and_proof(); + + RelayChainStateProof::new(ParaId::from(200), hash, state_proof) + .expect("Should be able to construct state proof.") + } + + fn assert_slot_info(expected_slot: u64, expected_authored: u32) { + let (slot, authored) = pallet::RelaySlotInfo::::get().unwrap(); + assert_eq!(slot, Slot::from(expected_slot), "Slot stored in RelaySlotInfo is incorrect."); + assert_eq!( + authored, expected_authored, + "Number of authored blocks stored in RelaySlotInfo is incorrect." + ); + } + + #[test] + fn test_velocity() { + type Hook = FixedVelocityConsensusHook; + + new_test_ext(1).execute_with(|| { + let state_proof = relay_chain_state_proof(10); + let (_, capacity) = Hook::on_state_proof(&state_proof); + assert_eq!(capacity, NonZeroU32::new(1).unwrap().into()); + assert_slot_info(10, 1); + + let (_, capacity) = Hook::on_state_proof(&state_proof); + assert_eq!(capacity, NonZeroU32::new(1).unwrap().into()); + assert_slot_info(10, 2); + }); + } + + #[test] + #[should_panic(expected = "authored blocks limit is reached for the slot")] + fn test_exceeding_velocity_limit() { + const VELOCITY: u32 = 2; + type Hook = FixedVelocityConsensusHook; + + new_test_ext(1).execute_with(|| { + let state_proof = relay_chain_state_proof(10); + for authored in 0..=VELOCITY + 1 { + Hook::on_state_proof(&state_proof); + assert_slot_info(10, authored + 1); + } + }); + } + + #[test] + fn test_para_slot_calculated_from_slot_duration() { + const VELOCITY: u32 = 2; + type Hook = FixedVelocityConsensusHook; + + new_test_ext(6).execute_with(|| { + let state_proof = relay_chain_state_proof(10); + Hook::on_state_proof(&state_proof); + + let para_slot = Slot::from(7); + pallet_aura::CurrentSlot::::put(para_slot); + Hook::on_state_proof(&state_proof); + }); + } + + #[test] + fn test_velocity_at_least_one() { + // Even though this is 0, one block should always be allowed. + const VELOCITY: u32 = 0; + type Hook = FixedVelocityConsensusHook; + + new_test_ext(6).execute_with(|| { + let state_proof = relay_chain_state_proof(10); + Hook::on_state_proof(&state_proof); + }); + } + + #[test] + #[should_panic( + expected = "Parachain slot is too far in the future: parachain_slot=Slot(8), derived_from_relay_slot=Slot(5) velocity=2" + )] + fn test_para_slot_calculated_from_slot_duration_2() { + const VELOCITY: u32 = 2; + type Hook = FixedVelocityConsensusHook; + + new_test_ext(8).execute_with(|| { + let state_proof = relay_chain_state_proof(10); + let (_, _) = Hook::on_state_proof(&state_proof); + }); + } + + #[test] + fn test_velocity_resets_on_new_relay_slot() { + const VELOCITY: u32 = 2; + type Hook = FixedVelocityConsensusHook; + + new_test_ext(1).execute_with(|| { + let state_proof = relay_chain_state_proof(10); + for authored in 0..=VELOCITY { + Hook::on_state_proof(&state_proof); + assert_slot_info(10, authored + 1); + } + + let state_proof = relay_chain_state_proof(11); + for authored in 0..=VELOCITY { + Hook::on_state_proof(&state_proof); + assert_slot_info(11, authored + 1); + } + }); + } + + #[test] + #[should_panic( + expected = "Slot moved backwards: stored_slot=Slot(10), relay_chain_slot=Slot(9)" + )] + fn test_backward_relay_slot_not_tolerated() { + type Hook = FixedVelocityConsensusHook; + + new_test_ext(1).execute_with(|| { + let state_proof = relay_chain_state_proof(10); + Hook::on_state_proof(&state_proof); + assert_slot_info(10, 1); + + let state_proof = relay_chain_state_proof(9); + Hook::on_state_proof(&state_proof); + }); + } + + #[test] + #[should_panic( + expected = "Parachain slot is too far in the future: parachain_slot=Slot(13), derived_from_relay_slot=Slot(10) velocity=2" + )] + fn test_future_parachain_slot_errors() { + type Hook = FixedVelocityConsensusHook; + + new_test_ext(13).execute_with(|| { + let state_proof = relay_chain_state_proof(10); + Hook::on_state_proof(&state_proof); + }); + } + + #[test] + fn test_can_build_upon_true_when_empty() { + const VELOCITY: u32 = 2; + type Hook = FixedVelocityConsensusHook; + + new_test_ext(1).execute_with(|| { + let hash = H256::repeat_byte(0x1); + assert!(Hook::can_build_upon(hash, Slot::from(1))); + }); + } + + #[test] + fn test_can_build_upon_respects_velocity() { + const VELOCITY: u32 = 2; + type Hook = FixedVelocityConsensusHook; + + new_test_ext(1).execute_with(|| { + let hash = H256::repeat_byte(0x1); + let relay_slot = Slot::from(10); + + set_relay_slot(10, VELOCITY - 1); + assert!(Hook::can_build_upon(hash, relay_slot)); + + set_relay_slot(10, VELOCITY); + assert!(Hook::can_build_upon(hash, relay_slot)); + + set_relay_slot(10, VELOCITY + 1); + // Velocity too high + assert!(!Hook::can_build_upon(hash, relay_slot)); + }); + } + + #[test] + fn test_can_build_upon_slot_can_not_decrease() { + const VELOCITY: u32 = 2; + type Hook = FixedVelocityConsensusHook; + + new_test_ext(1).execute_with(|| { + let hash = H256::repeat_byte(0x1); + + set_relay_slot(10, VELOCITY); + // Slot moves backwards + assert!(!Hook::can_build_upon(hash, Slot::from(9))); + }); + } + + #[test] + fn test_can_build_upon_unincluded_segment_size() { + const VELOCITY: u32 = 2; + type Hook = FixedVelocityConsensusHook; + + new_test_ext(1).execute_with(|| { + let relay_slot = Slot::from(10); + + set_relay_slot(10, VELOCITY); + // Size after included is two, we can not build + let hash = H256::repeat_byte(0x1); + assert!(!Hook::can_build_upon(hash, relay_slot)); + + // Size after included is one, we can build + let hash = H256::repeat_byte(0x2); + assert!(Hook::can_build_upon(hash, relay_slot)); + }); + } +} diff --git a/cumulus/pallets/dmp-queue/src/tests.rs b/cumulus/pallets/dmp-queue/src/tests.rs index 70d542ea2ed2..368a1c0b4364 100644 --- a/cumulus/pallets/dmp-queue/src/tests.rs +++ b/cumulus/pallets/dmp-queue/src/tests.rs @@ -21,11 +21,7 @@ use super::{migration::*, mock::*}; use crate::*; -use frame_support::{ - pallet_prelude::*, - traits::{OnFinalize, OnIdle, OnInitialize}, - StorageNoopGuard, -}; +use frame_support::{pallet_prelude::*, traits::OnIdle, StorageNoopGuard}; #[test] fn migration_works() { @@ -183,14 +179,12 @@ fn migration_too_long_ignored() { } fn run_to_block(n: u64) { - assert!(n > System::block_number(), "Cannot go back in time"); - - while System::block_number() < n { - AllPalletsWithSystem::on_finalize(System::block_number()); - System::set_block_number(System::block_number() + 1); - AllPalletsWithSystem::on_initialize(System::block_number()); - AllPalletsWithSystem::on_idle(System::block_number(), Weight::MAX); - } + System::run_to_block_with::( + n, + frame_system::RunToBlockHooks::default().after_initialize(|bn| { + AllPalletsWithSystem::on_idle(bn, Weight::MAX); + }), + ); } fn assert_only_event(e: Event) { diff --git a/cumulus/pallets/parachain-system/src/consensus_hook.rs b/cumulus/pallets/parachain-system/src/consensus_hook.rs index 3062396a4e78..6d65bdc77186 100644 --- a/cumulus/pallets/parachain-system/src/consensus_hook.rs +++ b/cumulus/pallets/parachain-system/src/consensus_hook.rs @@ -22,7 +22,7 @@ use core::num::NonZeroU32; use frame_support::weights::Weight; /// The possible capacity of the unincluded segment. -#[derive(Clone)] +#[derive(Clone, Debug, PartialEq)] pub struct UnincludedSegmentCapacity(UnincludedSegmentCapacityInner); impl UnincludedSegmentCapacity { @@ -41,7 +41,7 @@ impl UnincludedSegmentCapacity { } } -#[derive(Clone)] +#[derive(Clone, Debug, PartialEq)] pub(crate) enum UnincludedSegmentCapacityInner { ExpectParentIncluded, Value(NonZeroU32), diff --git a/cumulus/pallets/parachain-system/src/lib.rs b/cumulus/pallets/parachain-system/src/lib.rs index 0fa759357f65..6857b08e66b7 100644 --- a/cumulus/pallets/parachain-system/src/lib.rs +++ b/cumulus/pallets/parachain-system/src/lib.rs @@ -80,8 +80,7 @@ pub mod relay_state_snapshot; pub mod validate_block; use unincluded_segment::{ - Ancestor, HrmpChannelUpdate, HrmpWatermarkUpdate, OutboundBandwidthLimits, SegmentTracker, - UsedBandwidth, + HrmpChannelUpdate, HrmpWatermarkUpdate, OutboundBandwidthLimits, SegmentTracker, }; pub use consensus_hook::{ConsensusHook, ExpectParentIncluded}; @@ -109,6 +108,7 @@ pub use consensus_hook::{ConsensusHook, ExpectParentIncluded}; /// ``` pub use cumulus_pallet_parachain_system_proc_macro::register_validate_block; pub use relay_state_snapshot::{MessagingStateSnapshot, RelayChainStateProof}; +pub use unincluded_segment::{Ancestor, UsedBandwidth}; pub use pallet::*; diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs index 1db152e39fd9..db9a8201ebbe 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs @@ -1050,6 +1050,7 @@ pub type Migrations = ( >, // permanent pallet_xcm::migration::MigrateToLatestXcmVersion, + cumulus_pallet_aura_ext::migration::MigrateV0ToV1, ); parameter_types! { diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs index 5fb495e4e8cf..cfc150ce5d6f 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs @@ -341,7 +341,6 @@ pub type LocalAndForeignAssets = fungibles::UnionOf< xcm::v5::Location, AccountId, >; - /// Union fungibles implementation for [`LocalAndForeignAssets`] and `Balances`. pub type NativeAndAssets = fungible::UnionOf< Balances, @@ -981,6 +980,7 @@ impl pallet_revive::Config for Runtime { type Xcm = pallet_xcm::Pallet; type ChainId = ConstU64<420_420_421>; type NativeToEthRatio = ConstU32<1_000_000>; // 10^(18 - 12) Eth is 10^18, Native is 10^12. + type EthGasEncoder = (); } impl TryFrom for pallet_revive::Call { @@ -1128,6 +1128,7 @@ pub type Migrations = ( >, // permanent pallet_xcm::migration::MigrateToLatestXcmVersion, + cumulus_pallet_aura_ext::migration::MigrateV0ToV1, ); /// Asset Hub Westend has some undecodable storage, delete it. diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs index 35af034310d9..67bc06a9321e 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs @@ -184,6 +184,7 @@ pub type Migrations = ( pallet_bridge_relayers::migration::v1::MigrationToV1, // permanent pallet_xcm::migration::MigrateToLatestXcmVersion, + cumulus_pallet_aura_ext::migration::MigrateV0ToV1, ); parameter_types! { diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs index 2c2e01b4d21d..3824a4e9a7cb 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs @@ -171,6 +171,7 @@ pub type Migrations = ( bridge_to_ethereum_config::migrations::MigrationForXcmV5, // permanent pallet_xcm::migration::MigrateToLatestXcmVersion, + cumulus_pallet_aura_ext::migration::MigrateV0ToV1, ); parameter_types! { diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs index e9adc4d1eae7..5eafc2960cc8 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs @@ -770,6 +770,7 @@ type Migrations = ( pallet_core_fellowship::migration::MigrateV0ToV1, // unreleased pallet_core_fellowship::migration::MigrateV0ToV1, + cumulus_pallet_aura_ext::migration::MigrateV0ToV1, ); /// Executive: handles dispatch to the various modules. diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs index 3348a635df01..eaaaf0a9a9a7 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs @@ -118,6 +118,7 @@ pub type Migrations = ( cumulus_pallet_xcmp_queue::migration::v5::MigrateV4ToV5, // permanent pallet_xcm::migration::MigrateToLatestXcmVersion, + cumulus_pallet_aura_ext::migration::MigrateV0ToV1, ); type EventRecord = frame_system::EventRecord< diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs index e9171c79afae..622a40e1d8dc 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs @@ -129,6 +129,7 @@ pub type Migrations = ( pallet_broker::migration::MigrateV3ToV4, // permanent pallet_xcm::migration::MigrateToLatestXcmVersion, + cumulus_pallet_aura_ext::migration::MigrateV0ToV1, ); /// Executive: handles dispatch to the various modules. diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs index 975856b3b6ff..7312c9c1639d 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs @@ -129,6 +129,7 @@ pub type Migrations = ( pallet_broker::migration::MigrateV3ToV4, // permanent pallet_xcm::migration::MigrateToLatestXcmVersion, + cumulus_pallet_aura_ext::migration::MigrateV0ToV1, ); /// Executive: handles dispatch to the various modules. diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs b/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs index ffdd86c500e5..cb0282b17a6c 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs @@ -116,6 +116,7 @@ pub type Migrations = ( cumulus_pallet_xcmp_queue::migration::v5::MigrateV4ToV5, // permanent pallet_xcm::migration::MigrateToLatestXcmVersion, + cumulus_pallet_aura_ext::migration::MigrateV0ToV1, ); /// Executive: handles dispatch to the various modules. diff --git a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs index ee6b0db55b91..050256dd4f6a 100644 --- a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs @@ -115,6 +115,7 @@ pub type Migrations = ( pallet_collator_selection::migration::v2::MigrationToV2, // permanent pallet_xcm::migration::MigrateToLatestXcmVersion, + cumulus_pallet_aura_ext::migration::MigrateV0ToV1, ); /// Executive: handles dispatch to the various modules. diff --git a/cumulus/primitives/aura/src/lib.rs b/cumulus/primitives/aura/src/lib.rs index aeeee5f8bafa..4e7d7dc3e79d 100644 --- a/cumulus/primitives/aura/src/lib.rs +++ b/cumulus/primitives/aura/src/lib.rs @@ -34,10 +34,14 @@ sp_api::decl_runtime_apis! { /// When the unincluded segment is short, Aura chains will allow authors to create multiple /// blocks per slot in order to build a backlog. When it is saturated, this API will limit /// the amount of blocks that can be created. + /// + /// Changes: + /// - Version 2: Update to `can_build_upon` to take a relay chain `Slot` instead of a parachain `Slot`. + #[api_version(2)] pub trait AuraUnincludedSegmentApi { /// Whether it is legal to extend the chain, assuming the given block is the most /// recently included one as-of the relay parent that will be built against, and - /// the given slot. + /// the given relay chain slot. /// /// This should be consistent with the logic the runtime uses when validating blocks to /// avoid issues. diff --git a/cumulus/xcm/xcm-emulator/src/lib.rs b/cumulus/xcm/xcm-emulator/src/lib.rs index ff14b747973c..d9b1e7fd9d04 100644 --- a/cumulus/xcm/xcm-emulator/src/lib.rs +++ b/cumulus/xcm/xcm-emulator/src/lib.rs @@ -1118,6 +1118,7 @@ macro_rules! decl_test_networks { ) -> $crate::ParachainInherentData { let mut sproof = $crate::RelayStateSproofBuilder::default(); sproof.para_id = para_id.into(); + sproof.current_slot = $crate::polkadot_primitives::Slot::from(relay_parent_number as u64); // egress channel let e_index = sproof.hrmp_egress_channel_index.get_or_insert_with(Vec::new); diff --git a/docs/contributor/prdoc.md b/docs/contributor/prdoc.md index 1f6252425e69..b3f7a7e94f0c 100644 --- a/docs/contributor/prdoc.md +++ b/docs/contributor/prdoc.md @@ -81,9 +81,6 @@ picked if no other applies. The `None` option is equivalent to the `R0-silent` l level. Experimental and private APIs are exempt from bumping and can be broken at any time. Please read the [Crate Section](../RELEASE.md) of the RELEASE doc about them. -> **Note**: There is currently no CI in place to sanity check this information, but should be added -> soon. - ### Example For example when you modified two crates and record the changes: @@ -106,3 +103,21 @@ you do not need to bump a crate that had a SemVer breaking change only from re-e crate with a breaking change. `minor` an `patch` bumps do not need to be inherited, since `cargo` will automatically update them to the latest compatible version. + +### Overwrite CI check + +The `check-semver` CI check is doing sanity checks based on the provided `PRDoc` and the mentioned +crate version bumps. The tooling is not perfect and it may recommends incorrect bumps of the version. +The CI check can be forced to accept the provided version bump. This can be done like: + +```yaml +crates: + - name: frame-example + bump: major + validate: false + - name: frame-example-pallet + bump: minor +``` + +By putting `validate: false` for `frame-example`, the version bump is ignored by the tooling. For +`frame-example-pallet` the version bump is still validated by the CI check. diff --git a/docs/sdk/Cargo.toml b/docs/sdk/Cargo.toml index f526c07796ea..4d83e2045ab0 100644 --- a/docs/sdk/Cargo.toml +++ b/docs/sdk/Cargo.toml @@ -110,6 +110,7 @@ sp-offchain = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } sp-runtime-interface = { workspace = true, default-features = true } sp-std = { workspace = true, default-features = true } +sp-storage = { workspace = true, default-features = true } sp-tracing = { workspace = true, default-features = true } sp-version = { workspace = true, default-features = true } sp-weights = { workspace = true, default-features = true } diff --git a/docs/sdk/src/reference_docs/mod.rs b/docs/sdk/src/reference_docs/mod.rs index e47eece784c4..7ad8a37241bf 100644 --- a/docs/sdk/src/reference_docs/mod.rs +++ b/docs/sdk/src/reference_docs/mod.rs @@ -111,3 +111,6 @@ pub mod custom_runtime_api_rpc; /// The [`polkadot-omni-node`](https://crates.io/crates/polkadot-omni-node) and its related binaries. pub mod omni_node; + +/// Learn about the state in Substrate. +pub mod state; diff --git a/docs/sdk/src/reference_docs/state.rs b/docs/sdk/src/reference_docs/state.rs new file mode 100644 index 000000000000..a8138caebf1e --- /dev/null +++ b/docs/sdk/src/reference_docs/state.rs @@ -0,0 +1,12 @@ +//! # State +//! +//! The state is abstracted as a key-value like database. Every item that +//! needs to be persisted by the [State Transition +//! Function](crate::reference_docs::blockchain_state_machines) is written to the state. +//! +//! ## Special keys +//! +//! The key-value pairs in the state are represented as byte sequences. The node +//! doesn't know how to interpret most the key-value pairs. However, there exist some +//! special keys and its values that are known to the node, the so-called +//! [`well-known-keys`](sp_storage::well_known_keys). diff --git a/polkadot/node/core/approval-voting/src/approval_checking.rs b/polkadot/node/core/approval-voting/src/approval_checking.rs index 3b7262a46826..c7f38619ea1f 100644 --- a/polkadot/node/core/approval-voting/src/approval_checking.rs +++ b/polkadot/node/core/approval-voting/src/approval_checking.rs @@ -712,13 +712,13 @@ mod tests { } .into(); - approval_entry.import_assignment(0, ValidatorIndex(0), block_tick); - approval_entry.import_assignment(0, ValidatorIndex(1), block_tick); + approval_entry.import_assignment(0, ValidatorIndex(0), block_tick, false); + approval_entry.import_assignment(0, ValidatorIndex(1), block_tick, false); - approval_entry.import_assignment(1, ValidatorIndex(2), block_tick + 1); - approval_entry.import_assignment(1, ValidatorIndex(3), block_tick + 1); + approval_entry.import_assignment(1, ValidatorIndex(2), block_tick + 1, false); + approval_entry.import_assignment(1, ValidatorIndex(3), block_tick + 1, false); - approval_entry.import_assignment(2, ValidatorIndex(4), block_tick + 2); + approval_entry.import_assignment(2, ValidatorIndex(4), block_tick + 2, false); let approvals = bitvec![u8, BitOrderLsb0; 1; 5]; @@ -757,8 +757,10 @@ mod tests { } .into(); - approval_entry.import_assignment(0, ValidatorIndex(0), block_tick); - approval_entry.import_assignment(1, ValidatorIndex(2), block_tick); + approval_entry.import_assignment(0, ValidatorIndex(0), block_tick, false); + approval_entry.import_assignment(0, ValidatorIndex(0), block_tick, true); + approval_entry.import_assignment(1, ValidatorIndex(2), block_tick, false); + approval_entry.import_assignment(1, ValidatorIndex(2), block_tick, true); let approvals = bitvec![u8, BitOrderLsb0; 0; 10]; @@ -798,10 +800,10 @@ mod tests { } .into(); - approval_entry.import_assignment(0, ValidatorIndex(0), block_tick); - approval_entry.import_assignment(0, ValidatorIndex(1), block_tick); + approval_entry.import_assignment(0, ValidatorIndex(0), block_tick, false); + approval_entry.import_assignment(0, ValidatorIndex(1), block_tick, false); - approval_entry.import_assignment(1, ValidatorIndex(2), block_tick); + approval_entry.import_assignment(1, ValidatorIndex(2), block_tick, false); let mut approvals = bitvec![u8, BitOrderLsb0; 0; 10]; approvals.set(0, true); @@ -844,11 +846,11 @@ mod tests { } .into(); - approval_entry.import_assignment(0, ValidatorIndex(0), block_tick); - approval_entry.import_assignment(0, ValidatorIndex(1), block_tick); + approval_entry.import_assignment(0, ValidatorIndex(0), block_tick, false); + approval_entry.import_assignment(0, ValidatorIndex(1), block_tick, false); - approval_entry.import_assignment(1, ValidatorIndex(2), block_tick); - approval_entry.import_assignment(1, ValidatorIndex(3), block_tick); + approval_entry.import_assignment(1, ValidatorIndex(2), block_tick, false); + approval_entry.import_assignment(1, ValidatorIndex(3), block_tick, false); let mut approvals = bitvec![u8, BitOrderLsb0; 0; n_validators]; approvals.set(0, true); @@ -913,14 +915,24 @@ mod tests { } .into(); - approval_entry.import_assignment(0, ValidatorIndex(0), block_tick); - approval_entry.import_assignment(0, ValidatorIndex(1), block_tick); + approval_entry.import_assignment(0, ValidatorIndex(0), block_tick, false); + approval_entry.import_assignment(0, ValidatorIndex(1), block_tick, false); - approval_entry.import_assignment(1, ValidatorIndex(2), block_tick + 1); - approval_entry.import_assignment(1, ValidatorIndex(3), block_tick + 1); + approval_entry.import_assignment(1, ValidatorIndex(2), block_tick + 1, false); + approval_entry.import_assignment(1, ValidatorIndex(3), block_tick + 1, false); - approval_entry.import_assignment(2, ValidatorIndex(4), block_tick + no_show_duration + 2); - approval_entry.import_assignment(2, ValidatorIndex(5), block_tick + no_show_duration + 2); + approval_entry.import_assignment( + 2, + ValidatorIndex(4), + block_tick + no_show_duration + 2, + false, + ); + approval_entry.import_assignment( + 2, + ValidatorIndex(5), + block_tick + no_show_duration + 2, + false, + ); let mut approvals = bitvec![u8, BitOrderLsb0; 0; n_validators]; approvals.set(0, true); @@ -1007,14 +1019,24 @@ mod tests { } .into(); - approval_entry.import_assignment(0, ValidatorIndex(0), block_tick); - approval_entry.import_assignment(0, ValidatorIndex(1), block_tick); + approval_entry.import_assignment(0, ValidatorIndex(0), block_tick, false); + approval_entry.import_assignment(0, ValidatorIndex(1), block_tick, false); - approval_entry.import_assignment(1, ValidatorIndex(2), block_tick + 1); - approval_entry.import_assignment(1, ValidatorIndex(3), block_tick + 1); + approval_entry.import_assignment(1, ValidatorIndex(2), block_tick + 1, false); + approval_entry.import_assignment(1, ValidatorIndex(3), block_tick + 1, false); - approval_entry.import_assignment(2, ValidatorIndex(4), block_tick + no_show_duration + 2); - approval_entry.import_assignment(2, ValidatorIndex(5), block_tick + no_show_duration + 2); + approval_entry.import_assignment( + 2, + ValidatorIndex(4), + block_tick + no_show_duration + 2, + false, + ); + approval_entry.import_assignment( + 2, + ValidatorIndex(5), + block_tick + no_show_duration + 2, + false, + ); let mut approvals = bitvec![u8, BitOrderLsb0; 0; n_validators]; approvals.set(0, true); @@ -1066,7 +1088,7 @@ mod tests { }, ); - approval_entry.import_assignment(3, ValidatorIndex(6), block_tick); + approval_entry.import_assignment(3, ValidatorIndex(6), block_tick, false); approvals.set(6, true); let tranche_now = no_show_duration as DelayTranche + 3; @@ -1176,7 +1198,7 @@ mod tests { // Populate the requested tranches. The assignments aren't inspected in // this test. for &t in &test_tranche { - approval_entry.import_assignment(t, ValidatorIndex(0), 0) + approval_entry.import_assignment(t, ValidatorIndex(0), 0, false); } let filled_tranches = filled_tranche_iterator(approval_entry.tranches()); diff --git a/polkadot/node/core/approval-voting/src/lib.rs b/polkadot/node/core/approval-voting/src/lib.rs index 7cea22d1a6a7..2deca5a1aba8 100644 --- a/polkadot/node/core/approval-voting/src/lib.rs +++ b/polkadot/node/core/approval-voting/src/lib.rs @@ -21,6 +21,7 @@ //! of others. It uses this information to determine when candidates and blocks have //! been sufficiently approved to finalize. +use futures_timer::Delay; use polkadot_node_primitives::{ approval::{ v1::{BlockApprovalMeta, DelayTranche}, @@ -122,12 +123,25 @@ const APPROVAL_CHECKING_TIMEOUT: Duration = Duration::from_secs(120); const WAIT_FOR_SIGS_TIMEOUT: Duration = Duration::from_millis(500); const APPROVAL_CACHE_SIZE: u32 = 1024; +/// The maximum number of times we retry to approve a block if is still needed. +const MAX_APPROVAL_RETRIES: u32 = 16; + const APPROVAL_DELAY: Tick = 2; pub(crate) const LOG_TARGET: &str = "parachain::approval-voting"; // The max number of ticks we delay sending the approval after we are ready to issue the approval const MAX_APPROVAL_COALESCE_WAIT_TICKS: Tick = 12; +// If the node restarted and the tranche has passed without the assignment +// being trigger, we won't trigger the assignment at restart because we don't have +// an wakeup schedule for it. +// The solution, is to always schedule a wake up after the restart and let the +// process_wakeup to decide if the assignment needs to be triggered. +// We need to have a delay after restart to give time to the node to catch up with +// messages and not trigger its assignment unnecessarily, because it hasn't seen +// the assignments from the other validators. +const RESTART_WAKEUP_DELAY: Tick = 12; + /// Configuration for the approval voting subsystem #[derive(Debug, Clone)] pub struct Config { @@ -165,6 +179,10 @@ pub struct ApprovalVotingSubsystem { metrics: Metrics, clock: Arc, spawner: Arc, + /// The maximum time we retry to approve a block if it is still needed and PoV fetch failed. + max_approval_retries: u32, + /// The backoff before we retry the approval. + retry_backoff: Duration, } #[derive(Clone)] @@ -493,6 +511,8 @@ impl ApprovalVotingSubsystem { metrics, Arc::new(SystemClock {}), spawner, + MAX_APPROVAL_RETRIES, + APPROVAL_CHECKING_TIMEOUT / 2, ) } @@ -505,6 +525,8 @@ impl ApprovalVotingSubsystem { metrics: Metrics, clock: Arc, spawner: Arc, + max_approval_retries: u32, + retry_backoff: Duration, ) -> Self { ApprovalVotingSubsystem { keystore, @@ -515,6 +537,8 @@ impl ApprovalVotingSubsystem { metrics, clock, spawner, + max_approval_retries, + retry_backoff, } } @@ -706,18 +730,53 @@ enum ApprovalOutcome { TimedOut, } +#[derive(Clone)] +struct RetryApprovalInfo { + candidate: CandidateReceipt, + backing_group: GroupIndex, + executor_params: ExecutorParams, + core_index: Option, + session_index: SessionIndex, + attempts_remaining: u32, + backoff: Duration, +} + struct ApprovalState { validator_index: ValidatorIndex, candidate_hash: CandidateHash, approval_outcome: ApprovalOutcome, + retry_info: Option, } impl ApprovalState { fn approved(validator_index: ValidatorIndex, candidate_hash: CandidateHash) -> Self { - Self { validator_index, candidate_hash, approval_outcome: ApprovalOutcome::Approved } + Self { + validator_index, + candidate_hash, + approval_outcome: ApprovalOutcome::Approved, + retry_info: None, + } } fn failed(validator_index: ValidatorIndex, candidate_hash: CandidateHash) -> Self { - Self { validator_index, candidate_hash, approval_outcome: ApprovalOutcome::Failed } + Self { + validator_index, + candidate_hash, + approval_outcome: ApprovalOutcome::Failed, + retry_info: None, + } + } + + fn failed_with_retry( + validator_index: ValidatorIndex, + candidate_hash: CandidateHash, + retry_info: Option, + ) -> Self { + Self { + validator_index, + candidate_hash, + approval_outcome: ApprovalOutcome::Failed, + retry_info, + } } } @@ -757,6 +816,7 @@ impl CurrentlyCheckingSet { candidate_hash, validator_index, approval_outcome: ApprovalOutcome::TimedOut, + retry_info: None, }, Some(approval_state) => approval_state, } @@ -1271,18 +1331,19 @@ where validator_index, candidate_hash, approval_outcome, + retry_info, } ) = approval_state; if matches!(approval_outcome, ApprovalOutcome::Approved) { let mut approvals: Vec = relay_block_hashes - .into_iter() + .iter() .map(|block_hash| Action::IssueApproval( candidate_hash, ApprovalVoteRequest { validator_index, - block_hash, + block_hash: *block_hash, }, ) ) @@ -1290,6 +1351,43 @@ where actions.append(&mut approvals); } + if let Some(retry_info) = retry_info { + for block_hash in relay_block_hashes { + if overlayed_db.load_block_entry(&block_hash).map(|block_info| block_info.is_some()).unwrap_or(false) { + let sender = to_other_subsystems.clone(); + let spawn_handle = subsystem.spawner.clone(); + let metrics = subsystem.metrics.clone(); + let retry_info = retry_info.clone(); + let executor_params = retry_info.executor_params.clone(); + let candidate = retry_info.candidate.clone(); + + currently_checking_set + .insert_relay_block_hash( + candidate_hash, + validator_index, + block_hash, + async move { + launch_approval( + sender, + spawn_handle, + metrics, + retry_info.session_index, + candidate, + validator_index, + block_hash, + retry_info.backing_group, + executor_params, + retry_info.core_index, + retry_info, + ) + .await + }, + ) + .await?; + } + } + } + actions }, (block_hash, validator_index) = delayed_approvals_timers.select_next_some() => { @@ -1340,6 +1438,8 @@ where &mut approvals_cache, &mut subsystem.mode, actions, + subsystem.max_approval_retries, + subsystem.retry_backoff, ) .await? { @@ -1389,6 +1489,8 @@ pub async fn start_approval_worker< metrics, clock, spawner, + MAX_APPROVAL_RETRIES, + APPROVAL_CHECKING_TIMEOUT / 2, ); let backend = DbBackend::new(db.clone(), approval_voting.db_config); let spawner = approval_voting.spawner.clone(); @@ -1456,6 +1558,8 @@ async fn handle_actions< approvals_cache: &mut LruMap, mode: &mut Mode, actions: Vec, + max_approval_retries: u32, + retry_backoff: Duration, ) -> SubsystemResult { let mut conclude = false; let mut actions_iter = actions.into_iter(); @@ -1542,6 +1646,16 @@ async fn handle_actions< let sender = sender.clone(); let spawn_handle = spawn_handle.clone(); + let retry = RetryApprovalInfo { + candidate: candidate.clone(), + backing_group, + executor_params: executor_params.clone(), + core_index, + session_index: session, + attempts_remaining: max_approval_retries, + backoff: retry_backoff, + }; + currently_checking_set .insert_relay_block_hash( candidate_hash, @@ -1559,6 +1673,7 @@ async fn handle_actions< backing_group, executor_params, core_index, + retry, ) .await }, @@ -1732,7 +1847,20 @@ async fn distribution_messages_for_activation { match approval_entry.local_statements() { - (None, None) | (None, Some(_)) => {}, // second is impossible case. + (None, None) => + if approval_entry + .our_assignment() + .map(|assignment| !assignment.triggered()) + .unwrap_or(false) + { + actions.push(Action::ScheduleWakeup { + block_hash, + block_number: block_entry.block_number(), + candidate_hash: *candidate_hash, + tick: state.clock.tick_now() + RESTART_WAKEUP_DELAY, + }) + }, + (None, Some(_)) => {}, // second is impossible case. (Some(assignment), None) => { let claimed_core_indices = get_core_indices_on_startup(&assignment.cert().kind, *core_index); @@ -2680,8 +2808,15 @@ where Vec::new(), )), }; - is_duplicate &= approval_entry.is_assigned(assignment.validator); - approval_entry.import_assignment(tranche, assignment.validator, tick_now); + + let is_duplicate_for_candidate = approval_entry.is_assigned(assignment.validator); + is_duplicate &= is_duplicate_for_candidate; + approval_entry.import_assignment( + tranche, + assignment.validator, + tick_now, + is_duplicate_for_candidate, + ); // We've imported a new assignment, so we need to schedule a wake-up for when that might // no-show. @@ -3329,6 +3464,7 @@ async fn launch_approval< backing_group: GroupIndex, executor_params: ExecutorParams, core_index: Option, + retry: RetryApprovalInfo, ) -> SubsystemResult> { let (a_tx, a_rx) = oneshot::channel(); let (code_tx, code_rx) = oneshot::channel(); @@ -3360,6 +3496,7 @@ async fn launch_approval< let candidate_hash = candidate.hash(); let para_id = candidate.descriptor.para_id(); + let mut next_retry = None; gum::trace!(target: LOG_TARGET, ?candidate_hash, ?para_id, "Recovering data."); let timer = metrics.time_recover_and_approve(); @@ -3388,7 +3525,6 @@ async fn launch_approval< let background = async move { // Force the move of the timer into the background task. let _timer = timer; - let available_data = match a_rx.await { Err(_) => return ApprovalState::failed(validator_index, candidate_hash), Ok(Ok(a)) => a, @@ -3399,10 +3535,27 @@ async fn launch_approval< target: LOG_TARGET, ?para_id, ?candidate_hash, + attempts_remaining = retry.attempts_remaining, "Data unavailable for candidate {:?}", (candidate_hash, candidate.descriptor.para_id()), ); - // do nothing. we'll just be a no-show and that'll cause others to rise up. + // Availability could fail if we did not discover much of the network, so + // let's back off and order the subsystem to retry at a later point if the + // approval is still needed, because no-show wasn't covered yet. + if retry.attempts_remaining > 0 { + Delay::new(retry.backoff).await; + next_retry = Some(RetryApprovalInfo { + candidate, + backing_group, + executor_params, + core_index, + session_index, + attempts_remaining: retry.attempts_remaining - 1, + backoff: retry.backoff, + }); + } else { + next_retry = None; + } metrics_guard.take().on_approval_unavailable(); }, &RecoveryError::ChannelClosed => { @@ -3433,7 +3586,7 @@ async fn launch_approval< metrics_guard.take().on_approval_invalid(); }, } - return ApprovalState::failed(validator_index, candidate_hash) + return ApprovalState::failed_with_retry(validator_index, candidate_hash, next_retry) }, }; diff --git a/polkadot/node/core/approval-voting/src/persisted_entries.rs b/polkadot/node/core/approval-voting/src/persisted_entries.rs index a5d42d9fd6e6..14c678913dc3 100644 --- a/polkadot/node/core/approval-voting/src/persisted_entries.rs +++ b/polkadot/node/core/approval-voting/src/persisted_entries.rs @@ -172,7 +172,7 @@ impl ApprovalEntry { }); our.map(|a| { - self.import_assignment(a.tranche(), a.validator_index(), tick_now); + self.import_assignment(a.tranche(), a.validator_index(), tick_now, false); (a.cert().clone(), a.validator_index(), a.tranche()) }) @@ -197,6 +197,7 @@ impl ApprovalEntry { tranche: DelayTranche, validator_index: ValidatorIndex, tick_now: Tick, + is_duplicate: bool, ) { // linear search probably faster than binary. not many tranches typically. let idx = match self.tranches.iter().position(|t| t.tranche >= tranche) { @@ -213,8 +214,15 @@ impl ApprovalEntry { self.tranches.len() - 1 }, }; - - self.tranches[idx].assignments.push((validator_index, tick_now)); + // At restart we might have duplicate assignments because approval-distribution is not + // persistent across restarts, so avoid adding duplicates. + // We already know if we have seen an assignment from this validator and since this + // function is on the hot path we can avoid iterating through tranches by using + // !is_duplicate to determine if it is already present in the vector and does not need + // adding. + if !is_duplicate { + self.tranches[idx].assignments.push((validator_index, tick_now)); + } self.assigned_validators.set(validator_index.0 as _, true); } diff --git a/polkadot/node/core/approval-voting/src/tests.rs b/polkadot/node/core/approval-voting/src/tests.rs index be569a1de3ec..9fe716833b88 100644 --- a/polkadot/node/core/approval-voting/src/tests.rs +++ b/polkadot/node/core/approval-voting/src/tests.rs @@ -78,6 +78,9 @@ const SLOT_DURATION_MILLIS: u64 = 5000; const TIMEOUT: Duration = Duration::from_millis(2000); +const NUM_APPROVAL_RETRIES: u32 = 3; +const RETRY_BACKOFF: Duration = Duration::from_millis(300); + #[derive(Clone)] struct TestSyncOracle { flag: Arc, @@ -573,6 +576,8 @@ fn test_harness>( Metrics::default(), clock.clone(), Arc::new(SpawnGlue(pool)), + NUM_APPROVAL_RETRIES, + RETRY_BACKOFF, ), assignment_criteria, backend, @@ -3202,6 +3207,20 @@ async fn recover_available_data(virtual_overseer: &mut VirtualOverseer) { ); } +async fn recover_available_data_failure(virtual_overseer: &mut VirtualOverseer) { + let available_data = RecoveryError::Unavailable; + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::AvailabilityRecovery( + AvailabilityRecoveryMessage::RecoverAvailableData(_, _, _, _, tx) + ) => { + tx.send(Err(available_data)).unwrap(); + }, + "overseer did not receive recover available data message", + ); +} + struct TriggersAssignmentConfig { our_assigned_tranche: DelayTranche, assign_validator_tranche: F1, @@ -4791,6 +4810,133 @@ fn subsystem_relaunches_approval_work_on_restart() { }); } +/// Test that we retry the approval of candidate on availability failure, up to max retries. +#[test] +fn subsystem_relaunches_approval_work_on_availability_failure() { + let assignment_criteria = Box::new(MockAssignmentCriteria( + || { + let mut assignments = HashMap::new(); + + let _ = assignments.insert( + CoreIndex(0), + approval_db::v2::OurAssignment { + cert: garbage_assignment_cert_v2(AssignmentCertKindV2::RelayVRFModuloCompact { + core_bitfield: vec![CoreIndex(0), CoreIndex(2)].try_into().unwrap(), + }), + tranche: 0, + validator_index: ValidatorIndex(0), + triggered: false, + } + .into(), + ); + + let _ = assignments.insert( + CoreIndex(1), + approval_db::v2::OurAssignment { + cert: garbage_assignment_cert_v2(AssignmentCertKindV2::RelayVRFDelay { + core_index: CoreIndex(1), + }), + tranche: 0, + validator_index: ValidatorIndex(0), + triggered: false, + } + .into(), + ); + assignments + }, + |_| Ok(0), + )); + let config = HarnessConfigBuilder::default().assignment_criteria(assignment_criteria).build(); + let store = config.backend(); + + test_harness(config, |test_harness| async move { + let TestHarness { mut virtual_overseer, clock, sync_oracle_handle } = test_harness; + + setup_overseer_with_blocks_with_two_assignments_triggered( + &mut virtual_overseer, + store, + &clock, + sync_oracle_handle, + ) + .await; + + // We have two candidates for one we are going to fail the availability for up to + // max_retries and for the other we are going to succeed on the last retry, so we should + // see the approval being distributed. + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::ApprovalDistribution(ApprovalDistributionMessage::DistributeAssignment( + _, + _, + )) => { + } + ); + + recover_available_data_failure(&mut virtual_overseer).await; + fetch_validation_code(&mut virtual_overseer).await; + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::ApprovalDistribution(ApprovalDistributionMessage::DistributeAssignment( + _, + _ + )) => { + } + ); + + recover_available_data_failure(&mut virtual_overseer).await; + fetch_validation_code(&mut virtual_overseer).await; + + recover_available_data_failure(&mut virtual_overseer).await; + fetch_validation_code(&mut virtual_overseer).await; + + recover_available_data_failure(&mut virtual_overseer).await; + fetch_validation_code(&mut virtual_overseer).await; + + recover_available_data_failure(&mut virtual_overseer).await; + fetch_validation_code(&mut virtual_overseer).await; + + recover_available_data_failure(&mut virtual_overseer).await; + fetch_validation_code(&mut virtual_overseer).await; + + recover_available_data_failure(&mut virtual_overseer).await; + fetch_validation_code(&mut virtual_overseer).await; + + recover_available_data(&mut virtual_overseer).await; + fetch_validation_code(&mut virtual_overseer).await; + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::CandidateValidation(CandidateValidationMessage::ValidateFromExhaustive { + exec_kind, + response_sender, + .. + }) if exec_kind == PvfExecKind::Approval => { + response_sender.send(Ok(ValidationResult::Valid(Default::default(), Default::default()))) + .unwrap(); + } + ); + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request(_, RuntimeApiRequest::ApprovalVotingParams(_, sender))) => { + let _ = sender.send(Ok(ApprovalVotingParams { + max_approval_coalesce_count: 1, + })); + } + ); + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::ApprovalDistribution(ApprovalDistributionMessage::DistributeApproval(_)) + ); + + // Assert that there are no more messages being sent by the subsystem + assert!(overseer_recv(&mut virtual_overseer).timeout(TIMEOUT / 2).await.is_none()); + + virtual_overseer + }); +} + // Test that cached approvals, which are candidates that we approved but we didn't issue // the signature yet because we want to coalesce it with more candidate are sent after restart. #[test] @@ -5234,6 +5380,252 @@ fn subsystem_sends_assignment_approval_in_correct_order_on_approval_restart() { }); } +// Test that if the subsystem missed the triggering of some tranches because it was not running +// it launches the missed assignements on restart. +#[test] +fn subsystem_launches_missed_assignments_on_restart() { + let test_tranche = 20; + let assignment_criteria = Box::new(MockAssignmentCriteria( + move || { + let mut assignments = HashMap::new(); + let _ = assignments.insert( + CoreIndex(0), + approval_db::v2::OurAssignment { + cert: garbage_assignment_cert_v2(AssignmentCertKindV2::RelayVRFDelay { + core_index: CoreIndex(0), + }), + tranche: test_tranche, + validator_index: ValidatorIndex(0), + triggered: false, + } + .into(), + ); + + assignments + }, + |_| Ok(0), + )); + let config = HarnessConfigBuilder::default().assignment_criteria(assignment_criteria).build(); + let store = config.backend(); + let store_clone = config.backend(); + + test_harness(config, |test_harness| async move { + let TestHarness { mut virtual_overseer, clock, sync_oracle_handle } = test_harness; + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::ChainApi(ChainApiMessage::FinalizedBlockNumber(rx)) => { + rx.send(Ok(0)).unwrap(); + } + ); + + let block_hash = Hash::repeat_byte(0x01); + let fork_block_hash = Hash::repeat_byte(0x02); + let candidate_commitments = CandidateCommitments::default(); + let mut candidate_receipt = dummy_candidate_receipt_v2(block_hash); + candidate_receipt.commitments_hash = candidate_commitments.hash(); + let candidate_hash = candidate_receipt.hash(); + let slot = Slot::from(1); + let (chain_builder, _session_info) = build_chain_with_two_blocks_with_one_candidate_each( + block_hash, + fork_block_hash, + slot, + sync_oracle_handle, + candidate_receipt, + ) + .await; + chain_builder.build(&mut virtual_overseer).await; + + assert!(!clock.inner.lock().current_wakeup_is(1)); + clock.inner.lock().wakeup_all(1); + + assert!(clock.inner.lock().current_wakeup_is(slot_to_tick(slot) + test_tranche as u64)); + clock.inner.lock().wakeup_all(slot_to_tick(slot)); + + futures_timer::Delay::new(Duration::from_millis(200)).await; + + clock.inner.lock().wakeup_all(slot_to_tick(slot + 2)); + + assert_eq!(clock.inner.lock().wakeups.len(), 0); + + futures_timer::Delay::new(Duration::from_millis(200)).await; + + let candidate_entry = store.load_candidate_entry(&candidate_hash).unwrap().unwrap(); + let our_assignment = + candidate_entry.approval_entry(&block_hash).unwrap().our_assignment().unwrap(); + assert!(!our_assignment.triggered()); + + // Assignment is not triggered because its tranches has not been reached. + virtual_overseer + }); + + // Restart a new approval voting subsystem with the same database and major syncing true until + // the last leaf. + let config = HarnessConfigBuilder::default().backend(store_clone).major_syncing(true).build(); + + test_harness(config, |test_harness| async move { + let TestHarness { mut virtual_overseer, clock, sync_oracle_handle } = test_harness; + let slot = Slot::from(1); + // 1. Set the clock to the to a tick past the tranche where the assignment should be + // triggered. + clock.inner.lock().set_tick(slot_to_tick(slot) + 2 * test_tranche as u64); + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::ChainApi(ChainApiMessage::FinalizedBlockNumber(rx)) => { + rx.send(Ok(0)).unwrap(); + } + ); + + let block_hash = Hash::repeat_byte(0x01); + let fork_block_hash = Hash::repeat_byte(0x02); + let candidate_commitments = CandidateCommitments::default(); + let mut candidate_receipt = dummy_candidate_receipt_v2(block_hash); + candidate_receipt.commitments_hash = candidate_commitments.hash(); + let (chain_builder, session_info) = build_chain_with_two_blocks_with_one_candidate_each( + block_hash, + fork_block_hash, + slot, + sync_oracle_handle, + candidate_receipt, + ) + .await; + + chain_builder.build(&mut virtual_overseer).await; + + futures_timer::Delay::new(Duration::from_millis(2000)).await; + + // On major syncing ending Approval voting should send all the necessary messages for a + // candidate to be approved. + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::ApprovalDistribution(ApprovalDistributionMessage::NewBlocks( + _, + )) => { + } + ); + + clock + .inner + .lock() + .wakeup_all(slot_to_tick(slot) + 2 * test_tranche as u64 + RESTART_WAKEUP_DELAY - 1); + + // Subsystem should not send any messages because the assignment is not triggered yet. + assert!(overseer_recv(&mut virtual_overseer).timeout(TIMEOUT / 2).await.is_none()); + + // Set the clock to the tick where the assignment should be triggered. + clock + .inner + .lock() + .wakeup_all(slot_to_tick(slot) + 2 * test_tranche as u64 + RESTART_WAKEUP_DELAY); + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request( + _, + RuntimeApiRequest::SessionInfo(_, si_tx), + ) + ) => { + si_tx.send(Ok(Some(session_info.clone()))).unwrap(); + } + ); + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request( + _, + RuntimeApiRequest::SessionExecutorParams(_, si_tx), + ) + ) => { + // Make sure all SessionExecutorParams calls are not made for the leaf (but for its relay parent) + si_tx.send(Ok(Some(ExecutorParams::default()))).unwrap(); + } + ); + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(_, RuntimeApiRequest::NodeFeatures(_, si_tx), ) + ) => { + si_tx.send(Ok(NodeFeatures::EMPTY)).unwrap(); + } + ); + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::ApprovalDistribution(ApprovalDistributionMessage::DistributeAssignment( + _, + _, + )) => { + } + ); + + // Guarantees the approval work has been relaunched. + recover_available_data(&mut virtual_overseer).await; + fetch_validation_code(&mut virtual_overseer).await; + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::CandidateValidation(CandidateValidationMessage::ValidateFromExhaustive { + exec_kind, + response_sender, + .. + }) if exec_kind == PvfExecKind::Approval => { + response_sender.send(Ok(ValidationResult::Valid(Default::default(), Default::default()))) + .unwrap(); + } + ); + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request(_, RuntimeApiRequest::ApprovalVotingParams(_, sender))) => { + let _ = sender.send(Ok(ApprovalVotingParams { + max_approval_coalesce_count: 1, + })); + } + ); + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::ApprovalDistribution(ApprovalDistributionMessage::DistributeApproval(_)) + ); + + clock + .inner + .lock() + .wakeup_all(slot_to_tick(slot) + 2 * test_tranche as u64 + RESTART_WAKEUP_DELAY); + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::ApprovalDistribution(ApprovalDistributionMessage::DistributeAssignment( + _, + _, + )) => { + } + ); + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request(_, RuntimeApiRequest::ApprovalVotingParams(_, sender))) => { + let _ = sender.send(Ok(ApprovalVotingParams { + max_approval_coalesce_count: 1, + })); + } + ); + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::ApprovalDistribution(ApprovalDistributionMessage::DistributeApproval(_)) + ); + + // Assert that there are no more messages being sent by the subsystem + assert!(overseer_recv(&mut virtual_overseer).timeout(TIMEOUT / 2).await.is_none()); + + virtual_overseer + }); +} + // Test we correctly update the timer when we mark the beginning of gathering assignments. #[test] fn test_gathering_assignments_statements() { diff --git a/polkadot/node/core/candidate-validation/src/lib.rs b/polkadot/node/core/candidate-validation/src/lib.rs index 25614349486e..2a4643031bf8 100644 --- a/polkadot/node/core/candidate-validation/src/lib.rs +++ b/polkadot/node/core/candidate-validation/src/lib.rs @@ -912,15 +912,10 @@ async fn validate_candidate_exhaustive( // invalid. Ok(ValidationResult::Invalid(InvalidCandidate::CommitmentsHashMismatch)) } else { - let core_index = candidate_receipt.descriptor.core_index(); - - match (core_index, exec_kind) { + match exec_kind { // Core selectors are optional for V2 descriptors, but we still check the // descriptor core index. - ( - Some(_core_index), - PvfExecKind::Backing(_) | PvfExecKind::BackingSystemParas(_), - ) => { + PvfExecKind::Backing(_) | PvfExecKind::BackingSystemParas(_) => { let Some(claim_queue) = maybe_claim_queue else { let error = "cannot fetch the claim queue from the runtime"; gum::warn!( @@ -937,9 +932,9 @@ async fn validate_candidate_exhaustive( { gum::warn!( target: LOG_TARGET, - ?err, candidate_hash = ?candidate_receipt.hash(), - "Candidate core index is invalid", + "Candidate core index is invalid: {}", + err ); return Ok(ValidationResult::Invalid( InvalidCandidate::InvalidCoreIndex, @@ -947,7 +942,7 @@ async fn validate_candidate_exhaustive( } }, // No checks for approvals and disputes - (_, _) => {}, + _ => {}, } Ok(ValidationResult::Valid( diff --git a/polkadot/node/core/candidate-validation/src/tests.rs b/polkadot/node/core/candidate-validation/src/tests.rs index 98e34a1cb4c1..795d7c93f8a7 100644 --- a/polkadot/node/core/candidate-validation/src/tests.rs +++ b/polkadot/node/core/candidate-validation/src/tests.rs @@ -30,8 +30,8 @@ use polkadot_node_subsystem_util::reexports::SubsystemContext; use polkadot_overseer::ActivatedLeaf; use polkadot_primitives::{ vstaging::{ - CandidateDescriptorV2, ClaimQueueOffset, CoreSelector, MutateDescriptorV2, UMPSignal, - UMP_SEPARATOR, + CandidateDescriptorV2, CandidateDescriptorVersion, ClaimQueueOffset, CoreSelector, + MutateDescriptorV2, UMPSignal, UMP_SEPARATOR, }, CandidateDescriptor, CoreIndex, GroupIndex, HeadData, Id as ParaId, OccupiedCoreAssumption, SessionInfo, UpwardMessage, ValidatorId, @@ -851,7 +851,7 @@ fn invalid_session_or_core_index() { )) .unwrap(); - // Validation doesn't fail for approvals, core/session index is not checked. + // Validation doesn't fail for disputes, core/session index is not checked. assert_matches!(v, ValidationResult::Valid(outputs, used_validation_data) => { assert_eq!(outputs.head_data, HeadData(vec![1, 1, 1])); assert_eq!(outputs.upward_messages, commitments.upward_messages); @@ -911,6 +911,69 @@ fn invalid_session_or_core_index() { assert_eq!(outputs.hrmp_watermark, 0); assert_eq!(used_validation_data, validation_data); }); + + // Test that a v1 candidate that outputs the core selector UMP signal is invalid. + let descriptor_v1 = make_valid_candidate_descriptor( + ParaId::from(1_u32), + dummy_hash(), + dummy_hash(), + pov.hash(), + validation_code.hash(), + validation_result.head_data.hash(), + dummy_hash(), + sp_keyring::Sr25519Keyring::Ferdie, + ); + let descriptor: CandidateDescriptorV2 = descriptor_v1.into(); + + perform_basic_checks(&descriptor, validation_data.max_pov_size, &pov, &validation_code.hash()) + .unwrap(); + assert_eq!(descriptor.version(), CandidateDescriptorVersion::V1); + let candidate_receipt = CandidateReceipt { descriptor, commitments_hash: commitments.hash() }; + + for exec_kind in + [PvfExecKind::Backing(dummy_hash()), PvfExecKind::BackingSystemParas(dummy_hash())] + { + let result = executor::block_on(validate_candidate_exhaustive( + Some(1), + MockValidateCandidateBackend::with_hardcoded_result(Ok(validation_result.clone())), + validation_data.clone(), + validation_code.clone(), + candidate_receipt.clone(), + Arc::new(pov.clone()), + ExecutorParams::default(), + exec_kind, + &Default::default(), + Some(Default::default()), + )) + .unwrap(); + assert_matches!(result, ValidationResult::Invalid(InvalidCandidate::InvalidCoreIndex)); + } + + // Validation doesn't fail for approvals and disputes, core/session index is not checked. + for exec_kind in [PvfExecKind::Approval, PvfExecKind::Dispute] { + let v = executor::block_on(validate_candidate_exhaustive( + Some(1), + MockValidateCandidateBackend::with_hardcoded_result(Ok(validation_result.clone())), + validation_data.clone(), + validation_code.clone(), + candidate_receipt.clone(), + Arc::new(pov.clone()), + ExecutorParams::default(), + exec_kind, + &Default::default(), + Default::default(), + )) + .unwrap(); + + assert_matches!(v, ValidationResult::Valid(outputs, used_validation_data) => { + assert_eq!(outputs.head_data, HeadData(vec![1, 1, 1])); + assert_eq!(outputs.upward_messages, commitments.upward_messages); + assert_eq!(outputs.horizontal_messages, Vec::new()); + assert_eq!(outputs.new_validation_code, Some(vec![2, 2, 2].into())); + assert_eq!(outputs.hrmp_watermark, 0); + assert_eq!(used_validation_data, validation_data); + }); + } } #[test] @@ -1407,7 +1470,7 @@ fn compressed_code_works() { ExecutorParams::default(), PvfExecKind::Backing(dummy_hash()), &Default::default(), - Default::default(), + Some(Default::default()), )); assert_matches!(v, Ok(ValidationResult::Valid(_, _))); diff --git a/polkadot/node/service/src/lib.rs b/polkadot/node/service/src/lib.rs index 227bc5253994..820cce8d083a 100644 --- a/polkadot/node/service/src/lib.rs +++ b/polkadot/node/service/src/lib.rs @@ -944,14 +944,9 @@ pub fn new_full< secure_validator_mode, prep_worker_path, exec_worker_path, - pvf_execute_workers_max_num: execute_workers_max_num.unwrap_or_else( - || match config.chain_spec.identify_chain() { - // The intention is to use this logic for gradual increasing from 2 to 4 - // of this configuration chain by chain until it reaches production chain. - Chain::Polkadot | Chain::Kusama => 2, - Chain::Rococo | Chain::Westend | Chain::Unknown => 4, - }, - ), + // Default execution workers is 4 because we have 8 cores on the reference hardware, + // and this accounts for 50% of that cpu capacity. + pvf_execute_workers_max_num: execute_workers_max_num.unwrap_or(4), pvf_prepare_workers_soft_max_num: prepare_workers_soft_max_num.unwrap_or(1), pvf_prepare_workers_hard_max_num: prepare_workers_hard_max_num.unwrap_or(2), }) diff --git a/polkadot/node/subsystem-bench/src/lib/approval/mod.rs b/polkadot/node/subsystem-bench/src/lib/approval/mod.rs index 1b20960a3f8a..5f1689cb226b 100644 --- a/polkadot/node/subsystem-bench/src/lib/approval/mod.rs +++ b/polkadot/node/subsystem-bench/src/lib/approval/mod.rs @@ -891,6 +891,8 @@ fn build_overseer( state.approval_voting_parallel_metrics.approval_voting_metrics(), Arc::new(system_clock.clone()), Arc::new(SpawnGlue(spawn_task_handle.clone())), + 1, + Duration::from_secs(1), ); let approval_distribution = ApprovalDistribution::new_with_clock( diff --git a/polkadot/primitives/src/vstaging/mod.rs b/polkadot/primitives/src/vstaging/mod.rs index 271f78efe090..c52f3539c3e5 100644 --- a/polkadot/primitives/src/vstaging/mod.rs +++ b/polkadot/primitives/src/vstaging/mod.rs @@ -505,6 +505,10 @@ pub enum CommittedCandidateReceiptError { /// Currenly only one such message is allowed. #[cfg_attr(feature = "std", error("Too many UMP signals"))] TooManyUMPSignals, + /// If the parachain runtime started sending core selectors, v1 descriptors are no longer + /// allowed. + #[cfg_attr(feature = "std", error("Version 1 receipt does not support core selectors"))] + CoreSelectorWithV1Decriptor, } macro_rules! impl_getter { @@ -603,15 +607,25 @@ impl CommittedCandidateReceiptV2 { &self, cores_per_para: &TransposedClaimQueue, ) -> Result<(), CommittedCandidateReceiptError> { + let maybe_core_selector = self.commitments.core_selector()?; + match self.descriptor.version() { - // Don't check v1 descriptors. - CandidateDescriptorVersion::V1 => return Ok(()), + CandidateDescriptorVersion::V1 => { + // If the parachain runtime started sending core selectors, v1 descriptors are no + // longer allowed. + if maybe_core_selector.is_some() { + return Err(CommittedCandidateReceiptError::CoreSelectorWithV1Decriptor) + } else { + // Nothing else to check for v1 descriptors. + return Ok(()) + } + }, CandidateDescriptorVersion::V2 => {}, CandidateDescriptorVersion::Unknown => return Err(CommittedCandidateReceiptError::UnknownVersion(self.descriptor.version)), } - let (maybe_core_index_selector, cq_offset) = self.commitments.core_selector()?.map_or_else( + let (maybe_core_index_selector, cq_offset) = maybe_core_selector.map_or_else( || (None, ClaimQueueOffset(DEFAULT_CLAIM_QUEUE_OFFSET)), |(sel, off)| (Some(sel), off), ); @@ -1207,8 +1221,7 @@ mod tests { assert_eq!(new_ccr.hash(), v2_ccr.hash()); } - // Only check descriptor `core_index` field of v2 descriptors. If it is v1, that field - // will be garbage. + // V1 descriptors are forbidden once the parachain runtime started sending UMP signals. #[test] fn test_v1_descriptors_with_ump_signal() { let mut ccr = dummy_old_committed_candidate_receipt(); @@ -1234,9 +1247,12 @@ mod tests { cq.insert(CoreIndex(0), vec![v1_ccr.descriptor.para_id()].into()); cq.insert(CoreIndex(1), vec![v1_ccr.descriptor.para_id()].into()); - assert!(v1_ccr.check_core_index(&transpose_claim_queue(cq)).is_ok()); - assert_eq!(v1_ccr.descriptor.core_index(), None); + + assert_eq!( + v1_ccr.check_core_index(&transpose_claim_queue(cq)), + Err(CommittedCandidateReceiptError::CoreSelectorWithV1Decriptor) + ); } #[test] diff --git a/polkadot/runtime/common/src/assigned_slots/mod.rs b/polkadot/runtime/common/src/assigned_slots/mod.rs index 65942c127b1c..dea29f53cad4 100644 --- a/polkadot/runtime/common/src/assigned_slots/mod.rs +++ b/polkadot/runtime/common/src/assigned_slots/mod.rs @@ -788,39 +788,14 @@ mod tests { t.into() } - fn run_to_block(n: BlockNumber) { - while System::block_number() < n { - let mut block = System::block_number(); - // on_finalize hooks - AssignedSlots::on_finalize(block); - Slots::on_finalize(block); - Parachains::on_finalize(block); - ParasShared::on_finalize(block); - Configuration::on_finalize(block); - Balances::on_finalize(block); - System::on_finalize(block); - // Set next block - System::set_block_number(block + 1); - block = System::block_number(); - // on_initialize hooks - System::on_initialize(block); - Balances::on_initialize(block); - Configuration::on_initialize(block); - ParasShared::on_initialize(block); - Parachains::on_initialize(block); - Slots::on_initialize(block); - AssignedSlots::on_initialize(block); - } - } - #[test] fn basic_setup_works() { new_test_ext().execute_with(|| { - run_to_block(1); + System::run_to_block::(1); assert_eq!(AssignedSlots::current_lease_period_index(), 0); assert_eq!(Slots::deposit_held(1.into(), &1), 0); - run_to_block(3); + System::run_to_block::(3); assert_eq!(AssignedSlots::current_lease_period_index(), 1); }); } @@ -828,7 +803,7 @@ mod tests { #[test] fn assign_perm_slot_fails_for_unknown_para() { new_test_ext().execute_with(|| { - run_to_block(1); + System::run_to_block::(1); assert_noop!( AssignedSlots::assign_perm_parachain_slot( @@ -843,7 +818,7 @@ mod tests { #[test] fn assign_perm_slot_fails_for_invalid_origin() { new_test_ext().execute_with(|| { - run_to_block(1); + System::run_to_block::(1); assert_noop!( AssignedSlots::assign_perm_parachain_slot( @@ -858,7 +833,7 @@ mod tests { #[test] fn assign_perm_slot_fails_when_not_parathread() { new_test_ext().execute_with(|| { - run_to_block(1); + System::run_to_block::(1); assert_ok!(TestRegistrar::::register( 1, @@ -881,7 +856,7 @@ mod tests { #[test] fn assign_perm_slot_fails_when_existing_lease() { new_test_ext().execute_with(|| { - run_to_block(1); + System::run_to_block::(1); assert_ok!(TestRegistrar::::register( 1, @@ -920,7 +895,7 @@ mod tests { #[test] fn assign_perm_slot_fails_when_max_perm_slots_exceeded() { new_test_ext().execute_with(|| { - run_to_block(1); + System::run_to_block::(1); assert_ok!(TestRegistrar::::register( 1, @@ -967,7 +942,7 @@ mod tests { fn assign_perm_slot_succeeds_for_parathread() { new_test_ext().execute_with(|| { let mut block = 1; - run_to_block(block); + System::run_to_block::(block); assert_ok!(TestRegistrar::::register( 1, ParaId::from(1_u32), @@ -1000,7 +975,7 @@ mod tests { assert_eq!(Slots::already_leased(ParaId::from(1_u32), 0, 2), true); block += 1; - run_to_block(block); + System::run_to_block::(block); } // Para lease ended, downgraded back to parathread (on-demand parachain) @@ -1012,7 +987,7 @@ mod tests { #[test] fn assign_temp_slot_fails_for_unknown_para() { new_test_ext().execute_with(|| { - run_to_block(1); + System::run_to_block::(1); assert_noop!( AssignedSlots::assign_temp_parachain_slot( @@ -1028,7 +1003,7 @@ mod tests { #[test] fn assign_temp_slot_fails_for_invalid_origin() { new_test_ext().execute_with(|| { - run_to_block(1); + System::run_to_block::(1); assert_noop!( AssignedSlots::assign_temp_parachain_slot( @@ -1044,7 +1019,7 @@ mod tests { #[test] fn assign_temp_slot_fails_when_not_parathread() { new_test_ext().execute_with(|| { - run_to_block(1); + System::run_to_block::(1); assert_ok!(TestRegistrar::::register( 1, @@ -1068,7 +1043,7 @@ mod tests { #[test] fn assign_temp_slot_fails_when_existing_lease() { new_test_ext().execute_with(|| { - run_to_block(1); + System::run_to_block::(1); assert_ok!(TestRegistrar::::register( 1, @@ -1109,7 +1084,7 @@ mod tests { #[test] fn assign_temp_slot_fails_when_max_temp_slots_exceeded() { new_test_ext().execute_with(|| { - run_to_block(1); + System::run_to_block::(1); // Register 6 paras & a temp slot for each for n in 0..=5 { @@ -1151,7 +1126,7 @@ mod tests { fn assign_temp_slot_succeeds_for_single_parathread() { new_test_ext().execute_with(|| { let mut block = 1; - run_to_block(block); + System::run_to_block::(block); assert_ok!(TestRegistrar::::register( 1, ParaId::from(1_u32), @@ -1195,7 +1170,7 @@ mod tests { assert_eq!(Slots::already_leased(ParaId::from(1_u32), 0, 1), true); block += 1; - run_to_block(block); + System::run_to_block::(block); } // Block 6 @@ -1210,7 +1185,7 @@ mod tests { // Block 12 // Para should get a turn after TemporarySlotLeasePeriodLength * LeasePeriod blocks - run_to_block(12); + System::run_to_block::(12); println!("block #{}", block); println!("lease period #{}", AssignedSlots::current_lease_period_index()); println!("lease {:?}", slots::Leases::::get(ParaId::from(1_u32))); @@ -1225,7 +1200,7 @@ mod tests { fn assign_temp_slot_succeeds_for_multiple_parathreads() { new_test_ext().execute_with(|| { // Block 1, Period 0 - run_to_block(1); + System::run_to_block::(1); // Register 6 paras & a temp slot for each // (3 slots in current lease period, 3 in the next one) @@ -1251,7 +1226,7 @@ mod tests { // Block 1-5, Period 0-1 for n in 1..=5 { if n > 1 { - run_to_block(n); + System::run_to_block::(n); } assert_eq!(TestRegistrar::::is_parachain(ParaId::from(0)), true); assert_eq!(TestRegistrar::::is_parachain(ParaId::from(1_u32)), false); @@ -1264,7 +1239,7 @@ mod tests { // Block 6-11, Period 2-3 for n in 6..=11 { - run_to_block(n); + System::run_to_block::(n); assert_eq!(TestRegistrar::::is_parachain(ParaId::from(0)), false); assert_eq!(TestRegistrar::::is_parachain(ParaId::from(1_u32)), true); assert_eq!(TestRegistrar::::is_parachain(ParaId::from(2_u32)), false); @@ -1276,7 +1251,7 @@ mod tests { // Block 12-17, Period 4-5 for n in 12..=17 { - run_to_block(n); + System::run_to_block::(n); assert_eq!(TestRegistrar::::is_parachain(ParaId::from(0)), false); assert_eq!(TestRegistrar::::is_parachain(ParaId::from(1_u32)), false); assert_eq!(TestRegistrar::::is_parachain(ParaId::from(2_u32)), false); @@ -1288,7 +1263,7 @@ mod tests { // Block 18-23, Period 6-7 for n in 18..=23 { - run_to_block(n); + System::run_to_block::(n); assert_eq!(TestRegistrar::::is_parachain(ParaId::from(0)), true); assert_eq!(TestRegistrar::::is_parachain(ParaId::from(1_u32)), false); assert_eq!(TestRegistrar::::is_parachain(ParaId::from(2_u32)), true); @@ -1300,7 +1275,7 @@ mod tests { // Block 24-29, Period 8-9 for n in 24..=29 { - run_to_block(n); + System::run_to_block::(n); assert_eq!(TestRegistrar::::is_parachain(ParaId::from(0)), false); assert_eq!(TestRegistrar::::is_parachain(ParaId::from(1_u32)), true); assert_eq!(TestRegistrar::::is_parachain(ParaId::from(2_u32)), false); @@ -1312,7 +1287,7 @@ mod tests { // Block 30-35, Period 10-11 for n in 30..=35 { - run_to_block(n); + System::run_to_block::(n); assert_eq!(TestRegistrar::::is_parachain(ParaId::from(0)), false); assert_eq!(TestRegistrar::::is_parachain(ParaId::from(1_u32)), false); assert_eq!(TestRegistrar::::is_parachain(ParaId::from(2_u32)), false); @@ -1327,7 +1302,7 @@ mod tests { #[test] fn unassign_slot_fails_for_unknown_para() { new_test_ext().execute_with(|| { - run_to_block(1); + System::run_to_block::(1); assert_noop!( AssignedSlots::unassign_parachain_slot(RuntimeOrigin::root(), ParaId::from(1_u32),), @@ -1339,7 +1314,7 @@ mod tests { #[test] fn unassign_slot_fails_for_invalid_origin() { new_test_ext().execute_with(|| { - run_to_block(1); + System::run_to_block::(1); assert_noop!( AssignedSlots::assign_perm_parachain_slot( @@ -1354,7 +1329,7 @@ mod tests { #[test] fn unassign_perm_slot_succeeds() { new_test_ext().execute_with(|| { - run_to_block(1); + System::run_to_block::(1); assert_ok!(TestRegistrar::::register( 1, @@ -1386,7 +1361,7 @@ mod tests { #[test] fn unassign_temp_slot_succeeds() { new_test_ext().execute_with(|| { - run_to_block(1); + System::run_to_block::(1); assert_ok!(TestRegistrar::::register( 1, @@ -1419,7 +1394,7 @@ mod tests { #[test] fn set_max_permanent_slots_fails_for_no_root_origin() { new_test_ext().execute_with(|| { - run_to_block(1); + System::run_to_block::(1); assert_noop!( AssignedSlots::set_max_permanent_slots(RuntimeOrigin::signed(1), 5), @@ -1430,7 +1405,7 @@ mod tests { #[test] fn set_max_permanent_slots_succeeds() { new_test_ext().execute_with(|| { - run_to_block(1); + System::run_to_block::(1); assert_eq!(MaxPermanentSlots::::get(), 2); assert_ok!(AssignedSlots::set_max_permanent_slots(RuntimeOrigin::root(), 10),); @@ -1441,7 +1416,7 @@ mod tests { #[test] fn set_max_temporary_slots_fails_for_no_root_origin() { new_test_ext().execute_with(|| { - run_to_block(1); + System::run_to_block::(1); assert_noop!( AssignedSlots::set_max_temporary_slots(RuntimeOrigin::signed(1), 5), @@ -1452,7 +1427,7 @@ mod tests { #[test] fn set_max_temporary_slots_succeeds() { new_test_ext().execute_with(|| { - run_to_block(1); + System::run_to_block::(1); assert_eq!(MaxTemporarySlots::::get(), 6); assert_ok!(AssignedSlots::set_max_temporary_slots(RuntimeOrigin::root(), 12),); diff --git a/polkadot/runtime/common/src/auctions/mock.rs b/polkadot/runtime/common/src/auctions/mock.rs index 9fe19e579cfa..e0365d363ca2 100644 --- a/polkadot/runtime/common/src/auctions/mock.rs +++ b/polkadot/runtime/common/src/auctions/mock.rs @@ -20,8 +20,7 @@ use super::*; use crate::{auctions, mock::TestRegistrar}; use frame_support::{ - assert_ok, derive_impl, ord_parameter_types, parameter_types, - traits::{EitherOfDiverse, OnFinalize, OnInitialize}, + assert_ok, derive_impl, ord_parameter_types, parameter_types, traits::EitherOfDiverse, }; use frame_system::{EnsureRoot, EnsureSignedBy}; use pallet_balances; @@ -244,15 +243,3 @@ pub fn new_test_ext() -> sp_io::TestExternalities { }); ext } - -pub fn run_to_block(n: BlockNumber) { - while System::block_number() < n { - Auctions::on_finalize(System::block_number()); - Balances::on_finalize(System::block_number()); - System::on_finalize(System::block_number()); - System::set_block_number(System::block_number() + 1); - System::on_initialize(System::block_number()); - Balances::on_initialize(System::block_number()); - Auctions::on_initialize(System::block_number()); - } -} diff --git a/polkadot/runtime/common/src/auctions/tests.rs b/polkadot/runtime/common/src/auctions/tests.rs index 07574eeb295d..26e2ac47df84 100644 --- a/polkadot/runtime/common/src/auctions/tests.rs +++ b/polkadot/runtime/common/src/auctions/tests.rs @@ -36,7 +36,7 @@ fn basic_setup_works() { AuctionStatus::::NotStarted ); - run_to_block(10); + System::run_to_block::(10); assert_eq!(AuctionCounter::::get(), 0); assert_eq!(TestLeaser::deposit_held(0u32.into(), &1), 0); @@ -50,7 +50,7 @@ fn basic_setup_works() { #[test] fn can_start_auction() { new_test_ext().execute_with(|| { - run_to_block(1); + System::run_to_block::(1); assert_noop!(Auctions::new_auction(RuntimeOrigin::signed(1), 5, 1), BadOrigin); assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 5, 1)); @@ -66,7 +66,7 @@ fn can_start_auction() { #[test] fn bidding_works() { new_test_ext().execute_with(|| { - run_to_block(1); + System::run_to_block::(1); assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 5, 1)); assert_ok!(Auctions::bid(RuntimeOrigin::signed(1), 0.into(), 1, 1, 4, 5)); @@ -82,7 +82,7 @@ fn bidding_works() { #[test] fn under_bidding_works() { new_test_ext().execute_with(|| { - run_to_block(1); + System::run_to_block::(1); assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 5, 1)); assert_ok!(Auctions::bid(RuntimeOrigin::signed(1), 0.into(), 1, 1, 4, 5)); @@ -96,7 +96,7 @@ fn under_bidding_works() { #[test] fn over_bidding_works() { new_test_ext().execute_with(|| { - run_to_block(1); + System::run_to_block::(1); assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 5, 1)); assert_ok!(Auctions::bid(RuntimeOrigin::signed(1), 0.into(), 1, 1, 4, 5)); assert_ok!(Auctions::bid(RuntimeOrigin::signed(2), 0.into(), 1, 1, 4, 6)); @@ -115,7 +115,7 @@ fn over_bidding_works() { #[test] fn auction_proceeds_correctly() { new_test_ext().execute_with(|| { - run_to_block(1); + System::run_to_block::(1); assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 5, 1)); @@ -125,49 +125,49 @@ fn auction_proceeds_correctly() { AuctionStatus::::StartingPeriod ); - run_to_block(2); + System::run_to_block::(2); assert_eq!( Auctions::auction_status(System::block_number()), AuctionStatus::::StartingPeriod ); - run_to_block(3); + System::run_to_block::(3); assert_eq!( Auctions::auction_status(System::block_number()), AuctionStatus::::StartingPeriod ); - run_to_block(4); + System::run_to_block::(4); assert_eq!( Auctions::auction_status(System::block_number()), AuctionStatus::::StartingPeriod ); - run_to_block(5); + System::run_to_block::(5); assert_eq!( Auctions::auction_status(System::block_number()), AuctionStatus::::StartingPeriod ); - run_to_block(6); + System::run_to_block::(6); assert_eq!( Auctions::auction_status(System::block_number()), AuctionStatus::::EndingPeriod(0, 0) ); - run_to_block(7); + System::run_to_block::(7); assert_eq!( Auctions::auction_status(System::block_number()), AuctionStatus::::EndingPeriod(1, 0) ); - run_to_block(8); + System::run_to_block::(8); assert_eq!( Auctions::auction_status(System::block_number()), AuctionStatus::::EndingPeriod(2, 0) ); - run_to_block(9); + System::run_to_block::(9); assert_eq!( Auctions::auction_status(System::block_number()), AuctionStatus::::NotStarted @@ -178,12 +178,12 @@ fn auction_proceeds_correctly() { #[test] fn can_win_auction() { new_test_ext().execute_with(|| { - run_to_block(1); + System::run_to_block::(1); assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 5, 1)); assert_ok!(Auctions::bid(RuntimeOrigin::signed(1), 0.into(), 1, 1, 4, 1)); assert_eq!(Balances::reserved_balance(1), 1); assert_eq!(Balances::free_balance(1), 9); - run_to_block(9); + System::run_to_block::(9); assert_eq!( leases(), @@ -201,7 +201,7 @@ fn can_win_auction() { #[test] fn can_win_auction_with_late_randomness() { new_test_ext().execute_with(|| { - run_to_block(1); + System::run_to_block::(1); assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 5, 1)); assert_ok!(Auctions::bid(RuntimeOrigin::signed(1), 0.into(), 1, 1, 4, 1)); assert_eq!(Balances::reserved_balance(1), 1); @@ -210,7 +210,7 @@ fn can_win_auction_with_late_randomness() { Auctions::auction_status(System::block_number()), AuctionStatus::::StartingPeriod ); - run_to_block(8); + System::run_to_block::(8); // Auction has not yet ended. assert_eq!(leases(), vec![]); assert_eq!( @@ -222,7 +222,7 @@ fn can_win_auction_with_late_randomness() { set_last_random(H256::zero(), 8); // Auction definitely ended now, but we don't know exactly when in the last 3 blocks yet // since no randomness available yet. - run_to_block(9); + System::run_to_block::(9); // Auction has now ended... But auction winner still not yet decided, so no leases yet. assert_eq!( Auctions::auction_status(System::block_number()), @@ -233,7 +233,7 @@ fn can_win_auction_with_late_randomness() { // Random seed now updated to a value known at block 9, when the auction ended. This // means that the winner can now be chosen. set_last_random(H256::zero(), 9); - run_to_block(10); + System::run_to_block::(10); // Auction ended and winner selected assert_eq!( Auctions::auction_status(System::block_number()), @@ -255,10 +255,10 @@ fn can_win_auction_with_late_randomness() { #[test] fn can_win_incomplete_auction() { new_test_ext().execute_with(|| { - run_to_block(1); + System::run_to_block::(1); assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 5, 1)); assert_ok!(Auctions::bid(RuntimeOrigin::signed(1), 0.into(), 1, 4, 4, 5)); - run_to_block(9); + System::run_to_block::(9); assert_eq!(leases(), vec![((0.into(), 4), LeaseData { leaser: 1, amount: 5 }),]); assert_eq!(TestLeaser::deposit_held(0.into(), &1), 5); @@ -268,13 +268,13 @@ fn can_win_incomplete_auction() { #[test] fn should_choose_best_combination() { new_test_ext().execute_with(|| { - run_to_block(1); + System::run_to_block::(1); assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 5, 1)); assert_ok!(Auctions::bid(RuntimeOrigin::signed(1), 0.into(), 1, 1, 1, 1)); assert_ok!(Auctions::bid(RuntimeOrigin::signed(2), 0.into(), 1, 2, 3, 4)); assert_ok!(Auctions::bid(RuntimeOrigin::signed(3), 0.into(), 1, 4, 4, 2)); assert_ok!(Auctions::bid(RuntimeOrigin::signed(1), 1.into(), 1, 1, 4, 2)); - run_to_block(9); + System::run_to_block::(9); assert_eq!( leases(), @@ -295,7 +295,7 @@ fn should_choose_best_combination() { #[test] fn gap_bid_works() { new_test_ext().execute_with(|| { - run_to_block(1); + System::run_to_block::(1); assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 5, 1)); // User 1 will make a bid for period 1 and 4 for the same Para 0 @@ -314,7 +314,7 @@ fn gap_bid_works() { assert_eq!(Balances::reserved_balance(3), 3); // End the auction. - run_to_block(9); + System::run_to_block::(9); assert_eq!( leases(), @@ -334,11 +334,11 @@ fn gap_bid_works() { #[test] fn deposit_credit_should_work() { new_test_ext().execute_with(|| { - run_to_block(1); + System::run_to_block::(1); assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 5, 1)); assert_ok!(Auctions::bid(RuntimeOrigin::signed(1), 0.into(), 1, 1, 1, 5)); assert_eq!(Balances::reserved_balance(1), 5); - run_to_block(10); + System::run_to_block::(10); assert_eq!(leases(), vec![((0.into(), 1), LeaseData { leaser: 1, amount: 5 }),]); assert_eq!(TestLeaser::deposit_held(0.into(), &1), 5); @@ -347,7 +347,7 @@ fn deposit_credit_should_work() { assert_ok!(Auctions::bid(RuntimeOrigin::signed(1), 0.into(), 2, 2, 2, 6)); // Only 1 reserved since we have a deposit credit of 5. assert_eq!(Balances::reserved_balance(1), 1); - run_to_block(20); + System::run_to_block::(20); assert_eq!( leases(), @@ -363,11 +363,11 @@ fn deposit_credit_should_work() { #[test] fn deposit_credit_on_alt_para_should_not_count() { new_test_ext().execute_with(|| { - run_to_block(1); + System::run_to_block::(1); assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 5, 1)); assert_ok!(Auctions::bid(RuntimeOrigin::signed(1), 0.into(), 1, 1, 1, 5)); assert_eq!(Balances::reserved_balance(1), 5); - run_to_block(10); + System::run_to_block::(10); assert_eq!(leases(), vec![((0.into(), 1), LeaseData { leaser: 1, amount: 5 }),]); assert_eq!(TestLeaser::deposit_held(0.into(), &1), 5); @@ -376,7 +376,7 @@ fn deposit_credit_on_alt_para_should_not_count() { assert_ok!(Auctions::bid(RuntimeOrigin::signed(1), 1.into(), 2, 2, 2, 6)); // 6 reserved since we are bidding on a new para; only works because we don't assert_eq!(Balances::reserved_balance(1), 6); - run_to_block(20); + System::run_to_block::(20); assert_eq!( leases(), @@ -393,12 +393,12 @@ fn deposit_credit_on_alt_para_should_not_count() { #[test] fn multiple_bids_work_pre_ending() { new_test_ext().execute_with(|| { - run_to_block(1); + System::run_to_block::(1); assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 5, 1)); for i in 1..6u64 { - run_to_block(i as _); + System::run_to_block::(i as _); assert_ok!(Auctions::bid(RuntimeOrigin::signed(i), 0.into(), 1, 1, 4, i)); for j in 1..6 { assert_eq!(Balances::reserved_balance(j), if j == i { j } else { 0 }); @@ -406,7 +406,7 @@ fn multiple_bids_work_pre_ending() { } } - run_to_block(9); + System::run_to_block::(9); assert_eq!( leases(), vec![ @@ -422,12 +422,12 @@ fn multiple_bids_work_pre_ending() { #[test] fn multiple_bids_work_post_ending() { new_test_ext().execute_with(|| { - run_to_block(1); + System::run_to_block::(1); assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 0, 1)); for i in 1..6u64 { - run_to_block(((i - 1) / 2 + 1) as _); + System::run_to_block::(((i - 1) / 2 + 1) as _); assert_ok!(Auctions::bid(RuntimeOrigin::signed(i), 0.into(), 1, 1, 4, i)); for j in 1..6 { assert_eq!(Balances::reserved_balance(j), if j <= i { j } else { 0 }); @@ -438,7 +438,7 @@ fn multiple_bids_work_post_ending() { assert_eq!(ReservedAmounts::::get((i, ParaId::from(0))).unwrap(), i); } - run_to_block(5); + System::run_to_block::(5); assert_eq!( leases(), (1..=4) @@ -501,7 +501,7 @@ fn calculate_winners_works() { #[test] fn lower_bids_are_correctly_refunded() { new_test_ext().execute_with(|| { - run_to_block(1); + System::run_to_block::(1); assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 1, 1)); let para_1 = ParaId::from(1_u32); let para_2 = ParaId::from(2_u32); @@ -527,7 +527,7 @@ fn initialize_winners_in_ending_period_works() { new_test_ext().execute_with(|| { let ed: u64 = ::ExistentialDeposit::get(); assert_eq!(ed, 1); - run_to_block(1); + System::run_to_block::(1); assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 9, 1)); let para_1 = ParaId::from(1_u32); let para_2 = ParaId::from(2_u32); @@ -546,20 +546,20 @@ fn initialize_winners_in_ending_period_works() { winning[SlotRange::TwoThree as u8 as usize] = Some((2, para_2, 19)); assert_eq!(Winning::::get(0), Some(winning)); - run_to_block(9); + System::run_to_block::(9); assert_eq!( Auctions::auction_status(System::block_number()), AuctionStatus::::StartingPeriod ); - run_to_block(10); + System::run_to_block::(10); assert_eq!( Auctions::auction_status(System::block_number()), AuctionStatus::::EndingPeriod(0, 0) ); assert_eq!(Winning::::get(0), Some(winning)); - run_to_block(11); + System::run_to_block::(11); assert_eq!( Auctions::auction_status(System::block_number()), AuctionStatus::::EndingPeriod(1, 0) @@ -567,7 +567,7 @@ fn initialize_winners_in_ending_period_works() { assert_eq!(Winning::::get(1), Some(winning)); assert_ok!(Auctions::bid(RuntimeOrigin::signed(3), para_3, 1, 3, 4, 29)); - run_to_block(12); + System::run_to_block::(12); assert_eq!( Auctions::auction_status(System::block_number()), AuctionStatus::::EndingPeriod(2, 0) @@ -580,7 +580,7 @@ fn initialize_winners_in_ending_period_works() { #[test] fn handle_bid_requires_registered_para() { new_test_ext().execute_with(|| { - run_to_block(1); + System::run_to_block::(1); assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 5, 1)); assert_noop!( Auctions::bid(RuntimeOrigin::signed(1), 1337.into(), 1, 1, 4, 1), @@ -599,12 +599,12 @@ fn handle_bid_requires_registered_para() { #[test] fn handle_bid_checks_existing_lease_periods() { new_test_ext().execute_with(|| { - run_to_block(1); + System::run_to_block::(1); assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 5, 1)); assert_ok!(Auctions::bid(RuntimeOrigin::signed(1), 0.into(), 1, 2, 3, 1)); assert_eq!(Balances::reserved_balance(1), 1); assert_eq!(Balances::free_balance(1), 9); - run_to_block(9); + System::run_to_block::(9); assert_eq!( leases(), @@ -644,7 +644,7 @@ fn less_winning_samples_work() { EndingPeriod::set(30); SampleLength::set(10); - run_to_block(1); + System::run_to_block::(1); assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 9, 11)); let para_1 = ParaId::from(1_u32); let para_2 = ParaId::from(2_u32); @@ -663,13 +663,13 @@ fn less_winning_samples_work() { winning[SlotRange::TwoThree as u8 as usize] = Some((2, para_2, 19)); assert_eq!(Winning::::get(0), Some(winning)); - run_to_block(9); + System::run_to_block::(9); assert_eq!( Auctions::auction_status(System::block_number()), AuctionStatus::::StartingPeriod ); - run_to_block(10); + System::run_to_block::(10); assert_eq!( Auctions::auction_status(System::block_number()), AuctionStatus::::EndingPeriod(0, 0) @@ -681,19 +681,19 @@ fn less_winning_samples_work() { winning[SlotRange::ThreeThree as u8 as usize] = Some((3, para_3, 29)); assert_eq!(Winning::::get(0), Some(winning)); - run_to_block(20); + System::run_to_block::(20); assert_eq!( Auctions::auction_status(System::block_number()), AuctionStatus::::EndingPeriod(1, 0) ); assert_eq!(Winning::::get(1), Some(winning)); - run_to_block(25); + System::run_to_block::(25); // Overbid mid sample assert_ok!(Auctions::bid(RuntimeOrigin::signed(3), para_3, 1, 13, 14, 29)); winning[SlotRange::TwoThree as u8 as usize] = Some((3, para_3, 29)); assert_eq!(Winning::::get(1), Some(winning)); - run_to_block(30); + System::run_to_block::(30); assert_eq!( Auctions::auction_status(System::block_number()), AuctionStatus::::EndingPeriod(2, 0) @@ -701,7 +701,7 @@ fn less_winning_samples_work() { assert_eq!(Winning::::get(2), Some(winning)); set_last_random(H256::from([254; 32]), 40); - run_to_block(40); + System::run_to_block::(40); // Auction ended and winner selected assert_eq!( Auctions::auction_status(System::block_number()), @@ -729,71 +729,71 @@ fn auction_status_works() { AuctionStatus::::NotStarted ); - run_to_block(1); + System::run_to_block::(1); assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 9, 11)); - run_to_block(9); + System::run_to_block::(9); assert_eq!( Auctions::auction_status(System::block_number()), AuctionStatus::::StartingPeriod ); - run_to_block(10); + System::run_to_block::(10); assert_eq!( Auctions::auction_status(System::block_number()), AuctionStatus::::EndingPeriod(0, 0) ); - run_to_block(11); + System::run_to_block::(11); assert_eq!( Auctions::auction_status(System::block_number()), AuctionStatus::::EndingPeriod(0, 1) ); - run_to_block(19); + System::run_to_block::(19); assert_eq!( Auctions::auction_status(System::block_number()), AuctionStatus::::EndingPeriod(0, 9) ); - run_to_block(20); + System::run_to_block::(20); assert_eq!( Auctions::auction_status(System::block_number()), AuctionStatus::::EndingPeriod(1, 0) ); - run_to_block(25); + System::run_to_block::(25); assert_eq!( Auctions::auction_status(System::block_number()), AuctionStatus::::EndingPeriod(1, 5) ); - run_to_block(30); + System::run_to_block::(30); assert_eq!( Auctions::auction_status(System::block_number()), AuctionStatus::::EndingPeriod(2, 0) ); - run_to_block(39); + System::run_to_block::(39); assert_eq!( Auctions::auction_status(System::block_number()), AuctionStatus::::EndingPeriod(2, 9) ); - run_to_block(40); + System::run_to_block::(40); assert_eq!( Auctions::auction_status(System::block_number()), AuctionStatus::::VrfDelay(0) ); - run_to_block(44); + System::run_to_block::(44); assert_eq!( Auctions::auction_status(System::block_number()), AuctionStatus::::VrfDelay(4) ); set_last_random(dummy_hash(), 45); - run_to_block(45); + System::run_to_block::(45); assert_eq!( Auctions::auction_status(System::block_number()), AuctionStatus::::NotStarted @@ -804,7 +804,7 @@ fn auction_status_works() { #[test] fn can_cancel_auction() { new_test_ext().execute_with(|| { - run_to_block(1); + System::run_to_block::(1); assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 5, 1)); assert_ok!(Auctions::bid(RuntimeOrigin::signed(1), 0.into(), 1, 1, 4, 1)); assert_eq!(Balances::reserved_balance(1), 1); diff --git a/polkadot/runtime/common/src/crowdloan/mod.rs b/polkadot/runtime/common/src/crowdloan/mod.rs index 8cf288197e3d..f8b3169407e8 100644 --- a/polkadot/runtime/common/src/crowdloan/mod.rs +++ b/polkadot/runtime/common/src/crowdloan/mod.rs @@ -858,10 +858,7 @@ mod crypto { mod tests { use super::*; - use frame_support::{ - assert_noop, assert_ok, derive_impl, parameter_types, - traits::{OnFinalize, OnInitialize}, - }; + use frame_support::{assert_noop, assert_ok, derive_impl, parameter_types}; use polkadot_primitives::Id as ParaId; use sp_core::H256; use std::{cell::RefCell, collections::BTreeMap, sync::Arc}; @@ -1111,18 +1108,6 @@ mod tests { unreachable!() } - fn run_to_block(n: u64) { - while System::block_number() < n { - Crowdloan::on_finalize(System::block_number()); - Balances::on_finalize(System::block_number()); - System::on_finalize(System::block_number()); - System::set_block_number(System::block_number() + 1); - System::on_initialize(System::block_number()); - Balances::on_initialize(System::block_number()); - Crowdloan::on_initialize(System::block_number()); - } - } - fn last_event() -> RuntimeEvent { System::events().pop().expect("RuntimeEvent expected").event } @@ -1426,7 +1411,7 @@ mod tests { ); // Move past end date - run_to_block(10); + System::run_to_block::(10); // Cannot contribute to ended fund assert_noop!( @@ -1451,7 +1436,7 @@ mod tests { // crowdloan that has starting period 1. let para_3 = new_para(); assert_ok!(Crowdloan::create(RuntimeOrigin::signed(1), para_3, 1000, 1, 4, 40, None)); - run_to_block(40); + System::run_to_block::(40); let now = System::block_number(); assert_eq!(TestAuctioneer::lease_period_index(now).unwrap().0, 2); assert_noop!( @@ -1483,12 +1468,12 @@ mod tests { None )); - run_to_block(8); + System::run_to_block::(8); // Can def contribute when auction is running. assert!(TestAuctioneer::auction_status(System::block_number()).is_ending().is_some()); assert_ok!(Crowdloan::contribute(RuntimeOrigin::signed(2), para, 250, None)); - run_to_block(10); + System::run_to_block::(10); // Can't contribute when auction is in the VRF delay period. assert!(TestAuctioneer::auction_status(System::block_number()).is_vrf()); assert_noop!( @@ -1496,7 +1481,7 @@ mod tests { Error::::VrfDelayInProgress ); - run_to_block(15); + System::run_to_block::(15); // Its fine to contribute when no auction is running. assert!(!TestAuctioneer::auction_status(System::block_number()).is_in_progress()); assert_ok!(Crowdloan::contribute(RuntimeOrigin::signed(2), para, 250, None)); @@ -1526,15 +1511,15 @@ mod tests { let bidder = Crowdloan::fund_account_id(index); // Fund crowdloan - run_to_block(1); + System::run_to_block::(1); assert_ok!(Crowdloan::contribute(RuntimeOrigin::signed(2), para, 100, None)); - run_to_block(3); + System::run_to_block::(3); assert_ok!(Crowdloan::contribute(RuntimeOrigin::signed(3), para, 150, None)); - run_to_block(5); + System::run_to_block::(5); assert_ok!(Crowdloan::contribute(RuntimeOrigin::signed(4), para, 200, None)); - run_to_block(8); + System::run_to_block::(8); assert_ok!(Crowdloan::contribute(RuntimeOrigin::signed(2), para, 250, None)); - run_to_block(10); + System::run_to_block::(10); assert_eq!( bids(), @@ -1561,7 +1546,7 @@ mod tests { assert_ok!(Crowdloan::contribute(RuntimeOrigin::signed(2), para, 100, None)); assert_ok!(Crowdloan::contribute(RuntimeOrigin::signed(3), para, 50, None)); - run_to_block(10); + System::run_to_block::(10); let account_id = Crowdloan::fund_account_id(index); // para has no reserved funds, indicating it did not win the auction. assert_eq!(Balances::reserved_balance(&account_id), 0); @@ -1591,7 +1576,7 @@ mod tests { assert_ok!(Crowdloan::create(RuntimeOrigin::signed(1), para, 1000, 1, 1, 9, None)); assert_ok!(Crowdloan::contribute(RuntimeOrigin::signed(2), para, 100, None)); - run_to_block(10); + System::run_to_block::(10); let account_id = Crowdloan::fund_account_id(index); // user sends the crowdloan funds trying to make an accounting error @@ -1636,7 +1621,7 @@ mod tests { ); // Move to the end of the crowdloan - run_to_block(10); + System::run_to_block::(10); assert_ok!(Crowdloan::refund(RuntimeOrigin::signed(1337), para)); // Funds are returned @@ -1671,7 +1656,7 @@ mod tests { assert_eq!(Balances::free_balance(account_id), 21000); // Move to the end of the crowdloan - run_to_block(10); + System::run_to_block::(10); assert_ok!(Crowdloan::refund(RuntimeOrigin::signed(1337), para)); assert_eq!( last_event(), @@ -1705,7 +1690,7 @@ mod tests { assert_ok!(Crowdloan::contribute(RuntimeOrigin::signed(2), para, 100, None)); assert_ok!(Crowdloan::contribute(RuntimeOrigin::signed(3), para, 50, None)); - run_to_block(10); + System::run_to_block::(10); // All funds are refunded assert_ok!(Crowdloan::refund(RuntimeOrigin::signed(2), para)); @@ -1730,7 +1715,7 @@ mod tests { assert_ok!(Crowdloan::contribute(RuntimeOrigin::signed(2), para, 100, None)); assert_ok!(Crowdloan::contribute(RuntimeOrigin::signed(3), para, 50, None)); - run_to_block(10); + System::run_to_block::(10); // We test the historic case where crowdloan accounts only have one provider: { @@ -1770,7 +1755,7 @@ mod tests { Error::::NotReadyToDissolve ); - run_to_block(10); + System::run_to_block::(10); set_winner(para, 1, true); // Can't dissolve when it won. assert_noop!( @@ -1815,13 +1800,13 @@ mod tests { // simulate the reserving of para's funds. this actually happens in the Slots pallet. assert_ok!(Balances::reserve(&account_id, 149)); - run_to_block(19); + System::run_to_block::(19); assert_noop!( Crowdloan::withdraw(RuntimeOrigin::signed(2), 2, para), Error::::BidOrLeaseActive ); - run_to_block(20); + System::run_to_block::(20); // simulate the unreserving of para's funds, now that the lease expired. this actually // happens in the Slots pallet. Balances::unreserve(&account_id, 150); @@ -1949,7 +1934,7 @@ mod tests { Error::::NoContributions ); assert_ok!(Crowdloan::contribute(RuntimeOrigin::signed(2), para_1, 100, None)); - run_to_block(6); + System::run_to_block::(6); assert_ok!(Crowdloan::poke(RuntimeOrigin::signed(1), para_1)); assert_eq!(crowdloan::NewRaise::::get(), vec![para_1]); assert_noop!( diff --git a/polkadot/runtime/common/src/integration_tests.rs b/polkadot/runtime/common/src/integration_tests.rs index 8a76a138305e..bb4ad8b75065 100644 --- a/polkadot/runtime/common/src/integration_tests.rs +++ b/polkadot/runtime/common/src/integration_tests.rs @@ -28,7 +28,7 @@ use alloc::sync::Arc; use codec::Encode; use frame_support::{ assert_noop, assert_ok, derive_impl, parameter_types, - traits::{ConstU32, Currency, OnFinalize, OnInitialize}, + traits::{ConstU32, Currency}, weights::Weight, PalletId, }; @@ -377,14 +377,12 @@ fn add_blocks(n: u32) { } fn run_to_block(n: u32) { - assert!(System::block_number() < n); - while System::block_number() < n { - let block_number = System::block_number(); - AllPalletsWithSystem::on_finalize(block_number); - System::set_block_number(block_number + 1); - maybe_new_session(block_number + 1); - AllPalletsWithSystem::on_initialize(block_number + 1); - } + System::run_to_block_with::( + n, + frame_system::RunToBlockHooks::default().before_initialize(|bn| { + maybe_new_session(bn); + }), + ); } fn run_to_session(n: u32) { diff --git a/polkadot/runtime/common/src/paras_registrar/mock.rs b/polkadot/runtime/common/src/paras_registrar/mock.rs index 1627fd70365d..07b8fbca5189 100644 --- a/polkadot/runtime/common/src/paras_registrar/mock.rs +++ b/polkadot/runtime/common/src/paras_registrar/mock.rs @@ -20,10 +20,7 @@ use super::*; use crate::paras_registrar; use alloc::collections::btree_map::BTreeMap; -use frame_support::{ - derive_impl, parameter_types, - traits::{OnFinalize, OnInitialize}, -}; +use frame_support::{derive_impl, parameter_types}; use frame_system::limits; use polkadot_primitives::{Balance, BlockNumber, MAX_CODE_SIZE}; use polkadot_runtime_parachains::{configuration, origin, shared}; @@ -205,26 +202,21 @@ pub const VALIDATORS: &[Sr25519Keyring] = &[ pub fn run_to_block(n: BlockNumber) { // NOTE that this function only simulates modules of interest. Depending on new pallet may // require adding it here. - assert!(System::block_number() < n); - while System::block_number() < n { - let b = System::block_number(); - - if System::block_number() > 1 { - System::on_finalize(System::block_number()); - } - // Session change every 3 blocks. - if (b + 1) % BLOCKS_PER_SESSION == 0 { - let session_index = shared::CurrentSessionIndex::::get() + 1; - let validators_pub_keys = VALIDATORS.iter().map(|v| v.public().into()).collect(); - - shared::Pallet::::set_session_index(session_index); - shared::Pallet::::set_active_validators_ascending(validators_pub_keys); - - Parachains::test_on_new_session(); - } - System::set_block_number(b + 1); - System::on_initialize(System::block_number()); - } + System::run_to_block_with::( + n, + frame_system::RunToBlockHooks::default().before_finalize(|bn| { + // Session change every 3 blocks. + if (bn + 1) % BLOCKS_PER_SESSION == 0 { + let session_index = shared::CurrentSessionIndex::::get() + 1; + let validators_pub_keys = VALIDATORS.iter().map(|v| v.public().into()).collect(); + + shared::Pallet::::set_session_index(session_index); + shared::Pallet::::set_active_validators_ascending(validators_pub_keys); + + Parachains::test_on_new_session(); + } + }), + ); } pub fn run_to_session(n: BlockNumber) { diff --git a/polkadot/runtime/common/src/slots/mod.rs b/polkadot/runtime/common/src/slots/mod.rs index 333f14c6608a..59a1f1870b2d 100644 --- a/polkadot/runtime/common/src/slots/mod.rs +++ b/polkadot/runtime/common/src/slots/mod.rs @@ -584,28 +584,16 @@ mod tests { t.into() } - fn run_to_block(n: BlockNumber) { - while System::block_number() < n { - Slots::on_finalize(System::block_number()); - Balances::on_finalize(System::block_number()); - System::on_finalize(System::block_number()); - System::set_block_number(System::block_number() + 1); - System::on_initialize(System::block_number()); - Balances::on_initialize(System::block_number()); - Slots::on_initialize(System::block_number()); - } - } - #[test] fn basic_setup_works() { new_test_ext().execute_with(|| { - run_to_block(1); + System::run_to_block::(1); assert_eq!(Slots::lease_period_length(), (10, 0)); let now = System::block_number(); assert_eq!(Slots::lease_period_index(now).unwrap().0, 0); assert_eq!(Slots::deposit_held(1.into(), &1), 0); - run_to_block(10); + System::run_to_block::(10); let now = System::block_number(); assert_eq!(Slots::lease_period_index(now).unwrap().0, 1); }); @@ -614,7 +602,7 @@ mod tests { #[test] fn lease_lifecycle_works() { new_test_ext().execute_with(|| { - run_to_block(1); + System::run_to_block::(1); assert_ok!(TestRegistrar::::register( 1, @@ -627,11 +615,11 @@ mod tests { assert_eq!(Slots::deposit_held(1.into(), &1), 1); assert_eq!(Balances::reserved_balance(1), 1); - run_to_block(19); + System::run_to_block::(19); assert_eq!(Slots::deposit_held(1.into(), &1), 1); assert_eq!(Balances::reserved_balance(1), 1); - run_to_block(20); + System::run_to_block::(20); assert_eq!(Slots::deposit_held(1.into(), &1), 0); assert_eq!(Balances::reserved_balance(1), 0); @@ -645,7 +633,7 @@ mod tests { #[test] fn lease_interrupted_lifecycle_works() { new_test_ext().execute_with(|| { - run_to_block(1); + System::run_to_block::(1); assert_ok!(TestRegistrar::::register( 1, @@ -657,19 +645,19 @@ mod tests { assert_ok!(Slots::lease_out(1.into(), &1, 6, 1, 1)); assert_ok!(Slots::lease_out(1.into(), &1, 4, 3, 1)); - run_to_block(19); + System::run_to_block::(19); assert_eq!(Slots::deposit_held(1.into(), &1), 6); assert_eq!(Balances::reserved_balance(1), 6); - run_to_block(20); + System::run_to_block::(20); assert_eq!(Slots::deposit_held(1.into(), &1), 4); assert_eq!(Balances::reserved_balance(1), 4); - run_to_block(39); + System::run_to_block::(39); assert_eq!(Slots::deposit_held(1.into(), &1), 4); assert_eq!(Balances::reserved_balance(1), 4); - run_to_block(40); + System::run_to_block::(40); assert_eq!(Slots::deposit_held(1.into(), &1), 0); assert_eq!(Balances::reserved_balance(1), 0); @@ -688,7 +676,7 @@ mod tests { #[test] fn lease_relayed_lifecycle_works() { new_test_ext().execute_with(|| { - run_to_block(1); + System::run_to_block::(1); assert_ok!(TestRegistrar::::register( 1, @@ -704,25 +692,25 @@ mod tests { assert_eq!(Slots::deposit_held(1.into(), &2), 4); assert_eq!(Balances::reserved_balance(2), 4); - run_to_block(19); + System::run_to_block::(19); assert_eq!(Slots::deposit_held(1.into(), &1), 6); assert_eq!(Balances::reserved_balance(1), 6); assert_eq!(Slots::deposit_held(1.into(), &2), 4); assert_eq!(Balances::reserved_balance(2), 4); - run_to_block(20); + System::run_to_block::(20); assert_eq!(Slots::deposit_held(1.into(), &1), 0); assert_eq!(Balances::reserved_balance(1), 0); assert_eq!(Slots::deposit_held(1.into(), &2), 4); assert_eq!(Balances::reserved_balance(2), 4); - run_to_block(29); + System::run_to_block::(29); assert_eq!(Slots::deposit_held(1.into(), &1), 0); assert_eq!(Balances::reserved_balance(1), 0); assert_eq!(Slots::deposit_held(1.into(), &2), 4); assert_eq!(Balances::reserved_balance(2), 4); - run_to_block(30); + System::run_to_block::(30); assert_eq!(Slots::deposit_held(1.into(), &1), 0); assert_eq!(Balances::reserved_balance(1), 0); assert_eq!(Slots::deposit_held(1.into(), &2), 0); @@ -738,7 +726,7 @@ mod tests { #[test] fn lease_deposit_increase_works() { new_test_ext().execute_with(|| { - run_to_block(1); + System::run_to_block::(1); assert_ok!(TestRegistrar::::register( 1, @@ -755,11 +743,11 @@ mod tests { assert_eq!(Slots::deposit_held(1.into(), &1), 6); assert_eq!(Balances::reserved_balance(1), 6); - run_to_block(29); + System::run_to_block::(29); assert_eq!(Slots::deposit_held(1.into(), &1), 6); assert_eq!(Balances::reserved_balance(1), 6); - run_to_block(30); + System::run_to_block::(30); assert_eq!(Slots::deposit_held(1.into(), &1), 0); assert_eq!(Balances::reserved_balance(1), 0); @@ -773,7 +761,7 @@ mod tests { #[test] fn lease_deposit_decrease_works() { new_test_ext().execute_with(|| { - run_to_block(1); + System::run_to_block::(1); assert_ok!(TestRegistrar::::register( 1, @@ -790,19 +778,19 @@ mod tests { assert_eq!(Slots::deposit_held(1.into(), &1), 6); assert_eq!(Balances::reserved_balance(1), 6); - run_to_block(19); + System::run_to_block::(19); assert_eq!(Slots::deposit_held(1.into(), &1), 6); assert_eq!(Balances::reserved_balance(1), 6); - run_to_block(20); + System::run_to_block::(20); assert_eq!(Slots::deposit_held(1.into(), &1), 4); assert_eq!(Balances::reserved_balance(1), 4); - run_to_block(29); + System::run_to_block::(29); assert_eq!(Slots::deposit_held(1.into(), &1), 4); assert_eq!(Balances::reserved_balance(1), 4); - run_to_block(30); + System::run_to_block::(30); assert_eq!(Slots::deposit_held(1.into(), &1), 0); assert_eq!(Balances::reserved_balance(1), 0); @@ -816,7 +804,7 @@ mod tests { #[test] fn clear_all_leases_works() { new_test_ext().execute_with(|| { - run_to_block(1); + System::run_to_block::(1); assert_ok!(TestRegistrar::::register( 1, @@ -852,7 +840,7 @@ mod tests { #[test] fn lease_out_current_lease_period() { new_test_ext().execute_with(|| { - run_to_block(1); + System::run_to_block::(1); assert_ok!(TestRegistrar::::register( 1, @@ -867,7 +855,7 @@ mod tests { dummy_validation_code() )); - run_to_block(20); + System::run_to_block::(20); let now = System::block_number(); assert_eq!(Slots::lease_period_index(now).unwrap().0, 2); // Can't lease from the past @@ -884,7 +872,7 @@ mod tests { #[test] fn trigger_onboard_works() { new_test_ext().execute_with(|| { - run_to_block(1); + System::run_to_block::(1); assert_ok!(TestRegistrar::::register( 1, ParaId::from(1_u32), diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index cab4394eb5a8..b3f2a0033278 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -1885,7 +1885,8 @@ sp_api::impl_runtime_apis! { } fn query_weight_to_asset_fee(weight: Weight, asset: VersionedAssetId) -> Result { - match asset.try_as::() { + let latest_asset_id: Result = asset.clone().try_into(); + match latest_asset_id { Ok(asset_id) if asset_id.0 == xcm_config::TokenLocation::get() => { // for native token Ok(WeightToFee::weight_to_fee(&weight)) @@ -2276,7 +2277,7 @@ sp_api::impl_runtime_apis! { } fn current_set_id() -> fg_primitives::SetId { - Grandpa::current_set_id() + pallet_grandpa::CurrentSetId::::get() } fn submit_report_equivocation_unsigned_extrinsic( diff --git a/polkadot/runtime/test-runtime/src/lib.rs b/polkadot/runtime/test-runtime/src/lib.rs index 82564d5c278c..4f9ba8d8508c 100644 --- a/polkadot/runtime/test-runtime/src/lib.rs +++ b/polkadot/runtime/test-runtime/src/lib.rs @@ -1186,7 +1186,7 @@ sp_api::impl_runtime_apis! { } fn current_set_id() -> fg_primitives::SetId { - Grandpa::current_set_id() + pallet_grandpa::CurrentSetId::::get() } fn submit_report_equivocation_unsigned_extrinsic( diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index 166f3fc42eef..58d2bdcb7c7d 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -2300,7 +2300,7 @@ sp_api::impl_runtime_apis! { } fn current_set_id() -> fg_primitives::SetId { - Grandpa::current_set_id() + pallet_grandpa::CurrentSetId::::get() } fn submit_report_equivocation_unsigned_extrinsic( @@ -2445,7 +2445,8 @@ sp_api::impl_runtime_apis! { } fn query_weight_to_asset_fee(weight: Weight, asset: VersionedAssetId) -> Result { - match asset.try_as::() { + let latest_asset_id: Result = asset.clone().try_into(); + match latest_asset_id { Ok(asset_id) if asset_id.0 == xcm_config::TokenLocation::get() => { // for native token Ok(WeightToFee::weight_to_fee(&weight)) diff --git a/polkadot/xcm/xcm-builder/src/lib.rs b/polkadot/xcm/xcm-builder/src/lib.rs index 3d68d8ed16ae..e23412a97ebc 100644 --- a/polkadot/xcm/xcm-builder/src/lib.rs +++ b/polkadot/xcm/xcm-builder/src/lib.rs @@ -132,11 +132,13 @@ pub use routing::{ mod transactional; pub use transactional::FrameTransactionalProcessor; +#[allow(deprecated)] +pub use universal_exports::UnpaidLocalExporter; mod universal_exports; pub use universal_exports::{ ensure_is_remote, BridgeBlobDispatcher, BridgeMessage, DispatchBlob, DispatchBlobError, - ExporterFor, HaulBlob, HaulBlobError, HaulBlobExporter, NetworkExportTable, - NetworkExportTableItem, SovereignPaidRemoteExporter, UnpaidLocalExporter, UnpaidRemoteExporter, + ExporterFor, HaulBlob, HaulBlobError, HaulBlobExporter, LocalExporter, NetworkExportTable, + NetworkExportTableItem, SovereignPaidRemoteExporter, UnpaidRemoteExporter, }; mod weight; diff --git a/polkadot/xcm/xcm-builder/src/tests/bridging/local_para_para.rs b/polkadot/xcm/xcm-builder/src/tests/bridging/local_para_para.rs index ea584bf9d485..5e930fe575c2 100644 --- a/polkadot/xcm/xcm-builder/src/tests/bridging/local_para_para.rs +++ b/polkadot/xcm/xcm-builder/src/tests/bridging/local_para_para.rs @@ -28,7 +28,7 @@ parameter_types! { type TheBridge = TestBridge>; type Router = TestTopic< - UnpaidLocalExporter< + LocalExporter< HaulBlobExporter, UniversalLocation, >, diff --git a/polkadot/xcm/xcm-builder/src/tests/bridging/local_relay_relay.rs b/polkadot/xcm/xcm-builder/src/tests/bridging/local_relay_relay.rs index 38ffe2532d58..a41f09721812 100644 --- a/polkadot/xcm/xcm-builder/src/tests/bridging/local_relay_relay.rs +++ b/polkadot/xcm/xcm-builder/src/tests/bridging/local_relay_relay.rs @@ -28,7 +28,7 @@ parameter_types! { type TheBridge = TestBridge>; type Router = TestTopic< - UnpaidLocalExporter< + LocalExporter< HaulBlobExporter, UniversalLocation, >, diff --git a/polkadot/xcm/xcm-builder/src/tests/bridging/mod.rs b/polkadot/xcm/xcm-builder/src/tests/bridging/mod.rs index 767575e7f2dd..90ad9921d65a 100644 --- a/polkadot/xcm/xcm-builder/src/tests/bridging/mod.rs +++ b/polkadot/xcm/xcm-builder/src/tests/bridging/mod.rs @@ -209,7 +209,7 @@ impl, Remote: Get, RemoteExporter: ExportXcm> S let origin = Local::get().relative_to(&Remote::get()); AllowUnpaidFrom::set(vec![origin.clone()]); set_exporter_override(price::, deliver::); - // The we execute it: + // Then we execute it: let mut id = fake_id(); let outcome = XcmExecutor::::prepare_and_execute( origin, diff --git a/polkadot/xcm/xcm-builder/src/universal_exports.rs b/polkadot/xcm/xcm-builder/src/universal_exports.rs index 6b3c3adf737d..e215aea3ab68 100644 --- a/polkadot/xcm/xcm-builder/src/universal_exports.rs +++ b/polkadot/xcm/xcm-builder/src/universal_exports.rs @@ -16,6 +16,8 @@ //! Traits and utilities to help with origin mutation and bridging. +#![allow(deprecated)] + use crate::InspectMessageQueues; use alloc::{vec, vec::Vec}; use codec::{Decode, Encode}; @@ -58,6 +60,8 @@ pub fn ensure_is_remote( /// that the message sending cannot be abused in any way. /// /// This is only useful when the local chain has bridging capabilities. +#[deprecated(note = "Will be removed after July 2025; It uses hard-coded channel `0`, \ + use `xcm_builder::LocalExporter` directly instead.")] pub struct UnpaidLocalExporter( PhantomData<(Exporter, UniversalLocation)>, ); @@ -100,6 +104,54 @@ impl> SendXcm fn ensure_successful_delivery(_: Option) {} } +/// Implementation of `SendXcm` which uses the given `ExportXcm` implementation in order to forward +/// the message over a bridge. +/// +/// This is only useful when the local chain has bridging capabilities. +pub struct LocalExporter(PhantomData<(Exporter, UniversalLocation)>); +impl> SendXcm + for LocalExporter +{ + type Ticket = Exporter::Ticket; + + fn validate( + dest: &mut Option, + msg: &mut Option>, + ) -> SendResult { + // This `clone` ensures that `dest` is not consumed in any case. + let d = dest.clone().take().ok_or(MissingArgument)?; + let universal_source = UniversalLocation::get(); + let devolved = ensure_is_remote(universal_source.clone(), d).map_err(|_| NotApplicable)?; + let (remote_network, remote_location) = devolved; + let xcm = msg.take().ok_or(MissingArgument)?; + + let hash = + (Some(Location::here()), &remote_location).using_encoded(sp_io::hashing::blake2_128); + let channel = u32::decode(&mut hash.as_ref()).unwrap_or(0); + + validate_export::( + remote_network, + channel, + universal_source, + remote_location, + xcm.clone(), + ) + .inspect_err(|err| { + if let NotApplicable = err { + // We need to make sure that msg is not consumed in case of `NotApplicable`. + *msg = Some(xcm); + } + }) + } + + fn deliver(ticket: Exporter::Ticket) -> Result { + Exporter::deliver(ticket) + } + + #[cfg(feature = "runtime-benchmarks")] + fn ensure_successful_delivery(_: Option) {} +} + pub trait ExporterFor { /// Return the locally-routable bridge (if any) capable of forwarding `message` to the /// `remote_location` on the remote `network`, together with the payment which is required. @@ -703,9 +755,9 @@ mod tests { let local_dest: Location = (Parent, Parachain(5678)).into(); assert!(ensure_is_remote(UniversalLocation::get(), local_dest.clone()).is_err()); - // UnpaidLocalExporter + // LocalExporter ensure_validate_does_not_consume_dest_or_msg::< - UnpaidLocalExporter, + LocalExporter, >(local_dest.clone(), |result| assert_eq!(Err(NotApplicable), result)); // 2. check with not applicable from the inner router (using `NotApplicableBridgeSender`) @@ -713,14 +765,14 @@ mod tests { (Parent, Parent, DifferentRemote::get(), RemoteDestination::get()).into(); assert!(ensure_is_remote(UniversalLocation::get(), remote_dest.clone()).is_ok()); - // UnpaidLocalExporter + // LocalExporter ensure_validate_does_not_consume_dest_or_msg::< - UnpaidLocalExporter, + LocalExporter, >(remote_dest.clone(), |result| assert_eq!(Err(NotApplicable), result)); // 3. Ok - deliver // UnpaidRemoteExporter - assert_ok!(send_xcm::>( + assert_ok!(send_xcm::>( remote_dest, Xcm::default() )); diff --git a/prdoc/pr_4529.prdoc b/prdoc/pr_4529.prdoc new file mode 100644 index 000000000000..32beea17ad6b --- /dev/null +++ b/prdoc/pr_4529.prdoc @@ -0,0 +1,22 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Removed `pallet::getter` usage from pallet-grandpa + +doc: + - audience: Runtime Dev + description: | + This PR removed the `pallet::getter`s from `pallet-grandpa`. + The syntax `StorageItem::::get()` should be used instead + +crates: + - name: pallet-grandpa + bump: minor + - name: kitchensink-runtime + bump: none + - name: westend-runtime + bump: none + - name: polkadot-test-runtime + bump: none + - name: rococo-runtime + bump: none diff --git a/prdoc/pr_6647.prdoc b/prdoc/pr_6647.prdoc new file mode 100644 index 000000000000..47af9924ef1c --- /dev/null +++ b/prdoc/pr_6647.prdoc @@ -0,0 +1,8 @@ +title: '`fatxpool`: proper handling of priorities when mempool is full' +doc: +- audience: Node Dev + description: |- + Higher-priority transactions can now replace lower-priority transactions even when the internal _tx_mem_pool_ is full. +crates: +- name: sc-transaction-pool + bump: minor diff --git a/prdoc/pr_6689.prdoc b/prdoc/pr_6689.prdoc new file mode 100644 index 000000000000..2cbb49cd7dd2 --- /dev/null +++ b/prdoc/pr_6689.prdoc @@ -0,0 +1,19 @@ +title: '[pallet-revive] Update gas encoding' +doc: +- audience: Runtime Dev + description: |- + Update the current approach to attach the `ref_time`, `pov` and `deposit` parameters to an Ethereum transaction. +Previously, these three parameters were passed along with the signed payload, and the fees resulting from gas × gas_price were checked to ensure they matched the actual fees paid by the user for the extrinsic + + This approach unfortunately can be attacked. A malicious actor could force such a transaction to fail by injecting low values for some of these extra parameters as they are not part of the signed payload. + + The new approach encodes these 3 extra parameters in the lower digits of the transaction gas, using the log2 of the actual values to encode each components on 2 digits +crates: +- name: pallet-revive-eth-rpc + bump: minor +- name: pallet-revive + bump: minor +- name: asset-hub-westend-runtime + bump: minor +- name: pallet-revive-mock-network + bump: minor diff --git a/prdoc/pr_6807.prdoc b/prdoc/pr_6807.prdoc new file mode 100644 index 000000000000..b9564dfb2fe2 --- /dev/null +++ b/prdoc/pr_6807.prdoc @@ -0,0 +1,19 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Retry approval on availability failure if the check is still needed + +doc: + - audience: Node Dev + description: | + Recovering the POV can fail in situation where the node just restart and the DHT topology + wasn't fully discovered yet, so the current node can't connect to most of its Peers. + This is bad because for gossiping the assignment you need to be connected to just a few + peers, so because we can't approve the candidate other nodes will see this as a no show. + Fix it by retrying to approve a candidate for a fixed number of atttempts if the block is + still needed. + + +crates: + - name: polkadot-node-core-approval-voting + bump: minor diff --git a/prdoc/pr_6825.prdoc b/prdoc/pr_6825.prdoc new file mode 100644 index 000000000000..d57b2b573a10 --- /dev/null +++ b/prdoc/pr_6825.prdoc @@ -0,0 +1,50 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Use relay chain slot for velocity measurement on parachains + +doc: + - audience: Runtime Dev + description: | + The AuraExt pallets `ConsensusHook` is performing checks based on a parachains velocity. It was previously + checking how many blocks where produced in a given parachain slot. This only works well when the parachain + and relay chain slot length is the same. After this PR, we are checking against the relay chain slot. + + **🚨 Action Required:** A migration of name `cumulus_pallet_aura_ext::migration::MigrateV0ToV1` is included + that cleans up a renamed storage item. Parachain must add it to their runtimes. More information is available in + the [reference docs](https://paritytech.github.io/polkadot-sdk/master/polkadot_sdk_docs/reference_docs/frame_runtime_upgrades_and_migrations/index.html#single-block-migrations). + +crates: + - name: cumulus-pallet-parachain-system + bump: minor + - name: cumulus-pallet-aura-ext + bump: major + - name: cumulus-primitives-aura + bump: major + - name: cumulus-client-parachain-inherent + bump: minor + - name: cumulus-client-consensus-aura + bump: minor + - name: xcm-emulator + bump: minor + - name: asset-hub-rococo-runtime + bump: minor + - name: asset-hub-westend-runtime + bump: minor + - name: bridge-hub-rococo-runtime + bump: minor + - name: bridge-hub-westend-runtime + bump: minor + - name: collectives-westend-runtime + bump: minor + - name: coretime-rococo-runtime + bump: minor + - name: coretime-westend-runtime + bump: minor + - name: people-rococo-runtime + bump: minor + - name: people-westend-runtime + bump: minor + - name: contracts-rococo-runtime + bump: minor + diff --git a/prdoc/pr_6836.prdoc b/prdoc/pr_6836.prdoc new file mode 100644 index 000000000000..1de081bbaa40 --- /dev/null +++ b/prdoc/pr_6836.prdoc @@ -0,0 +1,17 @@ +title: '[pallet-revive-eth-rpc] persist eth transaction hash' +doc: +- audience: Runtime Dev + description: |- + Add an option to persist EVM transaction hash to a SQL db. + This make it possible to run a full archive ETH RPC node (assuming the substrate node is also a full archive node) + + Some queries such as eth_getTransactionByHash, eth_getBlockTransactionCountByHash, and other need to work with a transaction hash index, which is not available in Substrate and need to be stored by the eth-rpc proxy. + + The refactoring break down the Client into a `BlockInfoProvider` and `ReceiptProvider` + - BlockInfoProvider does not need any persistence data, as we can fetch all block info from the source substrate chain + - ReceiptProvider comes in two flavor, + - An in memory cache implementation - This is the one we had so far. + - A DB implementation - This one persist rows with the block_hash, the transaction_index and the transaction_hash, so that we can later fetch the block and extrinsic for that receipt and reconstruct the ReceiptInfo object. +crates: +- name: pallet-revive-eth-rpc + bump: minor diff --git a/prdoc/pr_6971.prdoc b/prdoc/pr_6971.prdoc new file mode 100644 index 000000000000..4790d773fee4 --- /dev/null +++ b/prdoc/pr_6971.prdoc @@ -0,0 +1,16 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Make importing of duplicate assignment idempotent + +doc: + - audience: Node Dev + description: | + Normally, approval-voting wouldn't receive duplicate assignments because approval-distribution makes + sure of it, however in the situation where we restart we might receive the same assignment again and + since approval-voting already persisted it we will end up inserting it twice in ApprovalEntry.tranches.assignments + because that's an array. Fix this by inserting only assignments that are not duplicate. + +crates: + - name: polkadot-node-core-approval-voting + bump: minor diff --git a/prdoc/pr_6973.prdoc b/prdoc/pr_6973.prdoc new file mode 100644 index 000000000000..416789b9171a --- /dev/null +++ b/prdoc/pr_6973.prdoc @@ -0,0 +1,16 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: approval-voting fix sending of assignments after restart + +doc: + - audience: Node Dev + description: | + There is a problem on restart where nodes will not trigger their needed assignment if + they were offline and the time of the assignment passed, so after restart always + schedule a wakeup so that nodes a have the opportunity of triggering their assignments + if they are still needed. + +crates: + - name: polkadot-node-core-approval-voting + bump: minor diff --git a/prdoc/pr_7102.prdoc b/prdoc/pr_7102.prdoc new file mode 100644 index 000000000000..b1923aafc3db --- /dev/null +++ b/prdoc/pr_7102.prdoc @@ -0,0 +1,8 @@ +title: '`fatxpool`: rotator cache size now depends on pool''s limits' +doc: +- audience: Node Dev + description: |- + This PR modifies the hard-coded size of extrinsics cache within `PoolRotator` to be inline with pool limits. It only applies to fork-aware transaction pool. For the legacy (single-state) transaction pool the logic remains untouched. +crates: +- name: sc-transaction-pool + bump: minor diff --git a/prdoc/pr_7109.prdoc b/prdoc/pr_7109.prdoc new file mode 100644 index 000000000000..e54ef3295135 --- /dev/null +++ b/prdoc/pr_7109.prdoc @@ -0,0 +1,11 @@ +title: Add "run to block" tools +doc: +- audience: Runtime Dev + description: |- + Introduce `frame_system::Pallet::run_to_block`, `frame_system::Pallet::run_to_block_with`, and `frame_system::RunToBlockHooks` to establish a generic `run_to_block` mechanism for mock tests, minimizing redundant implementations across various pallets. + + Closes #299. + +crates: +- name: frame-system + bump: minor diff --git a/prdoc/pr_7116.prdoc b/prdoc/pr_7116.prdoc new file mode 100644 index 000000000000..95a5254778a4 --- /dev/null +++ b/prdoc/pr_7116.prdoc @@ -0,0 +1,8 @@ +title: Increase the number of pvf execution workers from 2 to 4 +doc: +- audience: Node Dev + description: |- + Increase the number of pvf execution workers from 2 to 4. +crates: +- name: polkadot-service + bump: patch diff --git a/prdoc/pr_7126.prdoc b/prdoc/pr_7126.prdoc new file mode 100644 index 000000000000..1a86af1b2d1d --- /dev/null +++ b/prdoc/pr_7126.prdoc @@ -0,0 +1,7 @@ +title: 'xcm: Fixes for `UnpaidLocalExporter`' +doc: +- audience: Runtime Dev + description: This PR deprecates `UnpaidLocalExporter` in favor of the new `LocalExporter`. First, the name is misleading, as it can be used in both paid and unpaid scenarios. Second, it contains a hard-coded channel 0, whereas `LocalExporter` uses the same algorithm as `xcm-exporter`. +crates: +- name: staging-xcm-builder + bump: minor diff --git a/prdoc/pr_7127.prdoc b/prdoc/pr_7127.prdoc new file mode 100644 index 000000000000..761ddd04dbe1 --- /dev/null +++ b/prdoc/pr_7127.prdoc @@ -0,0 +1,9 @@ +title: 'Forbid v1 descriptors with UMP signals' +doc: +- audience: [Runtime Dev, Node Dev] + description: Adds a check that parachain candidates do not send out UMP signals with v1 descriptors. +crates: +- name: polkadot-node-core-candidate-validation + bump: minor +- name: polkadot-primitives + bump: major diff --git a/prdoc/pr_7133.prdoc b/prdoc/pr_7133.prdoc new file mode 100644 index 000000000000..ca0d2bb0bd48 --- /dev/null +++ b/prdoc/pr_7133.prdoc @@ -0,0 +1,15 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Sufix litep2p to the identify agent version for visibility + +doc: + - audience: [Node Dev, Node Operator] + description: | + This PR adds the `(litep2p)` suffix to the agent version (user agent) of the identify protocol. + The change is needed to gain visibility into network backends and determine exactly the number of validators that are running litep2p. + Using tools like subp2p-explorer, we can determine if the validators are running litep2p nodes. + +crates: +- name: sc-network + bump: patch diff --git a/prdoc/pr_7134.prdoc b/prdoc/pr_7134.prdoc new file mode 100644 index 000000000000..095d4757f438 --- /dev/null +++ b/prdoc/pr_7134.prdoc @@ -0,0 +1,11 @@ +title: 'xcm: convert properly assets in xcmpayment apis' +doc: +- audience: Runtime User + description: |- + Port #6459 changes to relays as well, which were probably forgotten in that PR. + Thanks! +crates: +- name: rococo-runtime + bump: patch +- name: westend-runtime + bump: patch diff --git a/prdoc/pr_7158.prdoc b/prdoc/pr_7158.prdoc new file mode 100644 index 000000000000..e113a7fdcd1c --- /dev/null +++ b/prdoc/pr_7158.prdoc @@ -0,0 +1,12 @@ +title: Reject litep2p inbound requests from banned peers + +doc: + - audience: Node Dev + description: | + This PR rejects inbound requests from banned peers (reputation is below the banned threshold). + This mirrors the request-response implementation from the libp2p side. + While at it, have registered a new inbound failure metric to have visibility into this. + +crates: +- name: sc-network + bump: patch diff --git a/prdoc/pr_7170.prdoc b/prdoc/pr_7170.prdoc new file mode 100644 index 000000000000..fae908f7407d --- /dev/null +++ b/prdoc/pr_7170.prdoc @@ -0,0 +1,8 @@ +title: Fix reversed error message in DispatchInfo +doc: +- audience: Runtime Dev + description: "Fix error message in `DispatchInfo` where post-dispatch and pre-dispatch\ + \ weight was reversed.\r\n" +crates: +- name: frame-support + bump: patch diff --git a/substrate/bin/node/runtime/src/lib.rs b/substrate/bin/node/runtime/src/lib.rs index 93b134e8165f..e11a009c1c3f 100644 --- a/substrate/bin/node/runtime/src/lib.rs +++ b/substrate/bin/node/runtime/src/lib.rs @@ -1468,6 +1468,7 @@ impl pallet_revive::Config for Runtime { type Xcm = (); type ChainId = ConstU64<420_420_420>; type NativeToEthRatio = ConstU32<1_000_000>; // 10^(18 - 12) Eth is 10^18, Native is 10^12. + type EthGasEncoder = (); } impl pallet_sudo::Config for Runtime { @@ -2978,7 +2979,7 @@ impl_runtime_apis! { } fn current_set_id() -> sp_consensus_grandpa::SetId { - Grandpa::current_set_id() + pallet_grandpa::CurrentSetId::::get() } fn submit_report_equivocation_unsigned_extrinsic( diff --git a/substrate/client/network/src/litep2p/discovery.rs b/substrate/client/network/src/litep2p/discovery.rs index 2bea2e5a80dc..b55df374f60e 100644 --- a/substrate/client/network/src/litep2p/discovery.rs +++ b/substrate/client/network/src/litep2p/discovery.rs @@ -254,7 +254,7 @@ impl Discovery { _peerstore_handle: Arc, ) -> (Self, PingConfig, IdentifyConfig, KademliaConfig, Option) { let (ping_config, ping_event_stream) = PingConfig::default(); - let user_agent = format!("{} ({})", config.client_version, config.node_name); + let user_agent = format!("{} ({}) (litep2p)", config.client_version, config.node_name); let (identify_config, identify_event_stream) = IdentifyConfig::new("/substrate/1.0".to_string(), Some(user_agent)); diff --git a/substrate/client/network/src/litep2p/shim/request_response/mod.rs b/substrate/client/network/src/litep2p/shim/request_response/mod.rs index 146f2e4add97..690d5a31e6ad 100644 --- a/substrate/client/network/src/litep2p/shim/request_response/mod.rs +++ b/substrate/client/network/src/litep2p/shim/request_response/mod.rs @@ -273,6 +273,13 @@ impl RequestResponseProtocol { request_id: RequestId, request: Vec, ) { + log::trace!( + target: LOG_TARGET, + "{}: request received from {peer:?} ({fallback:?} {request_id:?}), request size {:?}", + self.protocol, + request.len(), + ); + let Some(inbound_queue) = &self.inbound_queue else { log::trace!( target: LOG_TARGET, @@ -284,12 +291,18 @@ impl RequestResponseProtocol { return; }; - log::trace!( - target: LOG_TARGET, - "{}: request received from {peer:?} ({fallback:?} {request_id:?}), request size {:?}", - self.protocol, - request.len(), - ); + if self.peerstore_handle.is_banned(&peer.into()) { + log::trace!( + target: LOG_TARGET, + "{}: rejecting inbound request from banned {peer:?} ({request_id:?})", + self.protocol, + ); + + self.handle.reject_request(request_id); + self.metrics.register_inbound_request_failure("banned-peer"); + return; + } + let (tx, rx) = oneshot::channel(); match inbound_queue.try_send(IncomingRequest { diff --git a/substrate/client/transaction-pool/benches/basics.rs b/substrate/client/transaction-pool/benches/basics.rs index 5e40b0fb72d6..5ba9dd40c156 100644 --- a/substrate/client/transaction-pool/benches/basics.rs +++ b/substrate/client/transaction-pool/benches/basics.rs @@ -197,14 +197,22 @@ fn benchmark_main(c: &mut Criterion) { c.bench_function("sequential 50 tx", |b| { b.iter(|| { let api = Arc::from(TestApi::new_dependant()); - bench_configured(Pool::new(Default::default(), true.into(), api.clone()), 50, api); + bench_configured( + Pool::new_with_staticly_sized_rotator(Default::default(), true.into(), api.clone()), + 50, + api, + ); }); }); c.bench_function("random 100 tx", |b| { b.iter(|| { let api = Arc::from(TestApi::default()); - bench_configured(Pool::new(Default::default(), true.into(), api.clone()), 100, api); + bench_configured( + Pool::new_with_staticly_sized_rotator(Default::default(), true.into(), api.clone()), + 100, + api, + ); }); }); } diff --git a/substrate/client/transaction-pool/src/common/tests.rs b/substrate/client/transaction-pool/src/common/tests.rs index b00cf5fbfede..7f2cbe24d8ef 100644 --- a/substrate/client/transaction-pool/src/common/tests.rs +++ b/substrate/client/transaction-pool/src/common/tests.rs @@ -222,5 +222,5 @@ pub(crate) fn uxt(transfer: Transfer) -> Extrinsic { pub(crate) fn pool() -> (Pool, Arc) { let api = Arc::new(TestApi::default()); - (Pool::new(Default::default(), true.into(), api.clone()), api) + (Pool::new_with_staticly_sized_rotator(Default::default(), true.into(), api.clone()), api) } diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/dropped_watcher.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/dropped_watcher.rs index 7679e3b169d2..bf61558b00b0 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/dropped_watcher.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/dropped_watcher.rs @@ -53,11 +53,13 @@ pub struct DroppedTransaction { } impl DroppedTransaction { - fn new_usurped(tx_hash: Hash, by: Hash) -> Self { + /// Creates a new instance with reason set to `DroppedReason::Usurped(by)`. + pub fn new_usurped(tx_hash: Hash, by: Hash) -> Self { Self { reason: DroppedReason::Usurped(by), tx_hash } } - fn new_enforced_by_limts(tx_hash: Hash) -> Self { + /// Creates a new instance with reason set to `DroppedReason::LimitsEnforced`. + pub fn new_enforced_by_limts(tx_hash: Hash) -> Self { Self { reason: DroppedReason::LimitsEnforced, tx_hash } } } @@ -256,11 +258,13 @@ where self.future_transaction_views.entry(tx_hash).or_default().insert(block_hash); }, TransactionStatus::Ready | TransactionStatus::InBlock(..) => { - // note: if future transaction was once seens as the ready we may want to treat it - // as ready transactions. Unreferenced future transactions are more likely to be - // removed when the last referencing view is removed then ready transactions. - // Transcaction seen as ready is likely quite close to be included in some - // future fork. + // note: if future transaction was once seen as the ready we may want to treat it + // as ready transaction. The rationale behind this is as follows: we want to remove + // unreferenced future transactions when the last referencing view is removed (to + // avoid clogging mempool). For ready transactions we prefer to keep them in mempool + // even if no view is currently referencing them. Future transcaction once seen as + // ready is likely quite close to be included in some future fork (it is close to be + // ready, so we make exception and treat such transaction as ready). if let Some(mut views) = self.future_transaction_views.remove(&tx_hash) { views.insert(block_hash); self.ready_transaction_views.insert(tx_hash, views); @@ -329,14 +333,14 @@ where let stream_map = futures::stream::unfold(ctx, |mut ctx| async move { loop { if let Some(dropped) = ctx.get_pending_dropped_transaction() { - debug!("dropped_watcher: sending out (pending): {dropped:?}"); + trace!("dropped_watcher: sending out (pending): {dropped:?}"); return Some((dropped, ctx)); } tokio::select! { biased; Some(event) = next_event(&mut ctx.stream_map) => { if let Some(dropped) = ctx.handle_event(event.0, event.1) { - debug!("dropped_watcher: sending out: {dropped:?}"); + trace!("dropped_watcher: sending out: {dropped:?}"); return Some((dropped, ctx)); } }, diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs index 4ec87f1fefa4..766045718252 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs @@ -31,7 +31,10 @@ use crate::{ api::FullChainApi, common::log_xt::log_xt_trace, enactment_state::{EnactmentAction, EnactmentState}, - fork_aware_txpool::{dropped_watcher::DroppedReason, revalidation_worker}, + fork_aware_txpool::{ + dropped_watcher::{DroppedReason, DroppedTransaction}, + revalidation_worker, + }, graph::{ self, base_pool::{TimedTransactionSource, Transaction}, @@ -49,14 +52,16 @@ use futures::{ use parking_lot::Mutex; use prometheus_endpoint::Registry as PrometheusRegistry; use sc_transaction_pool_api::{ - ChainEvent, ImportNotificationStream, MaintainedTransactionPool, PoolStatus, TransactionFor, - TransactionPool, TransactionSource, TransactionStatusStreamFor, TxHash, + error::Error as TxPoolApiError, ChainEvent, ImportNotificationStream, + MaintainedTransactionPool, PoolStatus, TransactionFor, TransactionPool, TransactionPriority, + TransactionSource, TransactionStatusStreamFor, TxHash, }; use sp_blockchain::{HashAndNumber, TreeRoute}; use sp_core::traits::SpawnEssentialNamed; use sp_runtime::{ generic::BlockId, traits::{Block as BlockT, NumberFor}, + transaction_validity::{TransactionValidityError, ValidTransaction}, }; use std::{ collections::{HashMap, HashSet}, @@ -287,7 +292,7 @@ where DroppedReason::LimitsEnforced => {}, }; - mempool.remove_dropped_transaction(&dropped_tx_hash).await; + mempool.remove_transaction(&dropped_tx_hash); view_store.listener.transaction_dropped(dropped); import_notification_sink.clean_notified_items(&[dropped_tx_hash]); } @@ -318,7 +323,7 @@ where pool_api.clone(), listener.clone(), metrics.clone(), - TXMEMPOOL_TRANSACTION_LIMIT_MULTIPLIER * (options.ready.count + options.future.count), + TXMEMPOOL_TRANSACTION_LIMIT_MULTIPLIER * options.total_count(), options.ready.total_bytes + options.future.total_bytes, )); @@ -598,7 +603,7 @@ where /// out: /// [ Ok(xth0), Ok(xth1), Err ] /// ``` -fn reduce_multiview_result(input: HashMap>>) -> Vec> { +fn reduce_multiview_result(input: HashMap>>) -> Vec> { let mut values = input.values(); let Some(first) = values.next() else { return Default::default(); @@ -650,9 +655,28 @@ where let mempool_results = self.mempool.extend_unwatched(source, &xts); if view_store.is_empty() { - return Ok(mempool_results.into_iter().map(|r| r.map(|r| r.hash)).collect::>()) + return Ok(mempool_results + .into_iter() + .map(|r| r.map(|r| r.hash).map_err(Into::into)) + .collect::>()) } + // Submit all the transactions to the mempool + let retries = mempool_results + .into_iter() + .zip(xts.clone()) + .map(|(result, xt)| async move { + match result { + Err(TxPoolApiError::ImmediatelyDropped) => + self.attempt_transaction_replacement(source, false, xt).await, + _ => result, + } + }) + .collect::>(); + + let mempool_results = futures::future::join_all(retries).await; + + // Collect transactions that were successfully submitted to the mempool... let to_be_submitted = mempool_results .iter() .zip(xts) @@ -664,22 +688,47 @@ where self.metrics .report(|metrics| metrics.submitted_transactions.inc_by(to_be_submitted.len() as _)); + // ... and submit them to the view_store. Please note that transactions rejected by mempool + // are not sent here. let mempool = self.mempool.clone(); let results_map = view_store.submit(to_be_submitted.into_iter()).await; let mut submission_results = reduce_multiview_result(results_map).into_iter(); + // Note for composing final result: + // + // For each failed insertion into the mempool, the mempool result should be placed into + // the returned vector. + // + // For each successful insertion into the mempool, the corresponding + // view_store submission result needs to be examined: + // - If there is an error during view_store submission, the transaction is removed from + // the mempool, and the final result recorded in the vector for this transaction is the + // view_store submission error. + // + // - If the view_store submission is successful, the transaction priority is updated in the + // mempool. + // + // Finally, it collects the hashes of updated transactions or submission errors (either + // from the mempool or view_store) into a returned vector. Ok(mempool_results .into_iter() .map(|result| { - result.and_then(|insertion| { - submission_results - .next() - .expect("The number of Ok results in mempool is exactly the same as the size of to-views-submission result. qed.") - .inspect_err(|_| - mempool.remove(insertion.hash) - ) + result + .map_err(Into::into) + .and_then(|insertion| { + submission_results + .next() + .expect("The number of Ok results in mempool is exactly the same as the size of view_store submission result. qed.") + .inspect_err(|_|{ + mempool.remove_transaction(&insertion.hash); + }) }) + }) + .map(|r| r.map(|r| { + mempool.update_transaction_priority(&r); + r.hash() + })) .collect::>()) } @@ -712,10 +761,13 @@ where ) -> Result>>, Self::Error> { log::trace!(target: LOG_TARGET, "[{:?}] fatp::submit_and_watch views:{}", self.tx_hash(&xt), self.active_views_count()); let xt = Arc::from(xt); - let InsertionInfo { hash: xt_hash, source: timed_source } = + + let InsertionInfo { hash: xt_hash, source: timed_source, .. } = match self.mempool.push_watched(source, xt.clone()) { Ok(result) => result, - Err(e) => return Err(e), + Err(TxPoolApiError::ImmediatelyDropped) => + self.attempt_transaction_replacement(source, true, xt.clone()).await?, + Err(e) => return Err(e.into()), }; self.metrics.report(|metrics| metrics.submitted_transactions.inc()); @@ -723,7 +775,13 @@ where self.view_store .submit_and_watch(at, timed_source, xt) .await - .inspect_err(|_| self.mempool.remove(xt_hash)) + .inspect_err(|_| { + self.mempool.remove_transaction(&xt_hash); + }) + .map(|mut outcome| { + self.mempool.update_transaction_priority(&outcome); + outcome.expect_watcher() + }) } /// Intended to remove transactions identified by the given hashes, and any dependent @@ -828,22 +886,16 @@ where } } -impl sc_transaction_pool_api::LocalTransactionPool - for ForkAwareTxPool, Block> +impl sc_transaction_pool_api::LocalTransactionPool + for ForkAwareTxPool where Block: BlockT, + ChainApi: 'static + graph::ChainApi, ::Hash: Unpin, - Client: sp_api::ProvideRuntimeApi - + sc_client_api::BlockBackend - + sc_client_api::blockchain::HeaderBackend - + sp_runtime::traits::BlockIdTo - + sp_blockchain::HeaderMetadata, - Client: Send + Sync + 'static, - Client::Api: sp_transaction_pool::runtime_api::TaggedTransactionQueue, { type Block = Block; - type Hash = ExtrinsicHash>; - type Error = as graph::ChainApi>::Error; + type Hash = ExtrinsicHash; + type Error = ChainApi::Error; fn submit_local( &self, @@ -852,12 +904,29 @@ where ) -> Result { log::debug!(target: LOG_TARGET, "fatp::submit_local views:{}", self.active_views_count()); let xt = Arc::from(xt); - let InsertionInfo { hash: xt_hash, .. } = self - .mempool - .extend_unwatched(TransactionSource::Local, &[xt.clone()]) - .remove(0)?; - self.view_store.submit_local(xt).or_else(|_| Ok(xt_hash)) + let result = + self.mempool.extend_unwatched(TransactionSource::Local, &[xt.clone()]).remove(0); + + let insertion = match result { + Err(TxPoolApiError::ImmediatelyDropped) => self.attempt_transaction_replacement_sync( + TransactionSource::Local, + false, + xt.clone(), + ), + _ => result, + }?; + + self.view_store + .submit_local(xt) + .inspect_err(|_| { + self.mempool.remove_transaction(&insertion.hash); + }) + .map(|outcome| { + self.mempool.update_transaction_priority(&outcome); + outcome.hash() + }) + .or_else(|_| Ok(insertion.hash)) } } @@ -1109,7 +1178,11 @@ where .await .into_iter() .zip(hashes) - .map(|(result, tx_hash)| result.or_else(|_| Err(tx_hash))) + .map(|(result, tx_hash)| { + result + .map(|outcome| self.mempool.update_transaction_priority(&outcome.into())) + .or_else(|_| Err(tx_hash)) + }) .collect::>(); let submitted_count = watched_results.len(); @@ -1131,7 +1204,7 @@ where for result in watched_results { if let Err(tx_hash) = result { self.view_store.listener.invalidate_transactions(&[tx_hash]); - self.mempool.remove(tx_hash); + self.mempool.remove_transaction(&tx_hash); } } } @@ -1263,6 +1336,101 @@ where fn tx_hash(&self, xt: &TransactionFor) -> TxHash { self.api.hash_and_length(xt).0 } + + /// Attempts to find and replace a lower-priority transaction in the transaction pool with a new + /// one. + /// + /// This asynchronous function verifies the new transaction against the most recent view. If a + /// transaction with a lower priority exists in the transaction pool, it is replaced with the + /// new transaction. + /// + /// If no lower-priority transaction is found, the function returns an error indicating the + /// transaction was dropped immediately. + async fn attempt_transaction_replacement( + &self, + source: TransactionSource, + watched: bool, + xt: ExtrinsicFor, + ) -> Result>, TxPoolApiError> { + let at = self + .view_store + .most_recent_view + .read() + .ok_or(TxPoolApiError::ImmediatelyDropped)?; + + let (best_view, _) = self + .view_store + .get_view_at(at, false) + .ok_or(TxPoolApiError::ImmediatelyDropped)?; + + let (xt_hash, validated_tx) = best_view + .pool + .verify_one( + best_view.at.hash, + best_view.at.number, + TimedTransactionSource::from_transaction_source(source, false), + xt.clone(), + crate::graph::CheckBannedBeforeVerify::Yes, + ) + .await; + + let Some(priority) = validated_tx.priority() else { + return Err(TxPoolApiError::ImmediatelyDropped) + }; + + self.attempt_transaction_replacement_inner(xt, xt_hash, priority, source, watched) + } + + /// Sync version of [`Self::attempt_transaction_replacement`]. + fn attempt_transaction_replacement_sync( + &self, + source: TransactionSource, + watched: bool, + xt: ExtrinsicFor, + ) -> Result>, TxPoolApiError> { + let at = self + .view_store + .most_recent_view + .read() + .ok_or(TxPoolApiError::ImmediatelyDropped)?; + + let ValidTransaction { priority, .. } = self + .api + .validate_transaction_blocking(at, TransactionSource::Local, Arc::from(xt.clone())) + .map_err(|_| TxPoolApiError::ImmediatelyDropped)? + .map_err(|e| match e { + TransactionValidityError::Invalid(i) => TxPoolApiError::InvalidTransaction(i), + TransactionValidityError::Unknown(u) => TxPoolApiError::UnknownTransaction(u), + })?; + let xt_hash = self.hash_of(&xt); + self.attempt_transaction_replacement_inner(xt, xt_hash, priority, source, watched) + } + + fn attempt_transaction_replacement_inner( + &self, + xt: ExtrinsicFor, + tx_hash: ExtrinsicHash, + priority: TransactionPriority, + source: TransactionSource, + watched: bool, + ) -> Result>, TxPoolApiError> { + let insertion_info = + self.mempool.try_insert_with_replacement(xt, priority, source, watched)?; + + for worst_hash in &insertion_info.removed { + log::trace!(target: LOG_TARGET, "removed: {worst_hash:?} replaced by {tx_hash:?}"); + self.view_store + .listener + .transaction_dropped(DroppedTransaction::new_enforced_by_limts(*worst_hash)); + + self.view_store + .remove_transaction_subtree(*worst_hash, |listener, removed_tx_hash| { + listener.limits_enforced(&removed_tx_hash); + }); + } + + return Ok(insertion_info) + } } #[async_trait] @@ -1410,7 +1578,7 @@ mod reduce_multiview_result_tests { fn empty() { sp_tracing::try_init_simple(); let input = HashMap::default(); - let r = reduce_multiview_result::(input); + let r = reduce_multiview_result::(input); assert!(r.is_empty()); } diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs index 989ae4425dc4..c8a4d0c72dd3 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs @@ -26,7 +26,10 @@ //! it), while on other forks tx can be valid. Depending on which view is chosen to be cloned, //! such transaction could not be present in the newly created view. -use super::{metrics::MetricsLink as PrometheusMetrics, multi_view_listener::MultiViewListener}; +use super::{ + metrics::MetricsLink as PrometheusMetrics, multi_view_listener::MultiViewListener, + view_store::ViewStoreSubmitOutcome, +}; use crate::{ common::log_xt::log_xt_trace, graph, @@ -35,15 +38,20 @@ use crate::{ }; use futures::FutureExt; use itertools::Itertools; -use sc_transaction_pool_api::TransactionSource; +use parking_lot::RwLock; +use sc_transaction_pool_api::{TransactionPriority, TransactionSource}; use sp_blockchain::HashAndNumber; use sp_runtime::{ traits::Block as BlockT, transaction_validity::{InvalidTransaction, TransactionValidityError}, }; use std::{ + cmp::Ordering, collections::HashMap, - sync::{atomic, atomic::AtomicU64, Arc}, + sync::{ + atomic::{self, AtomicU64}, + Arc, + }, time::Instant, }; @@ -77,6 +85,9 @@ where source: TimedTransactionSource, /// When the transaction was revalidated, used to periodically revalidate the mem pool buffer. validated_at: AtomicU64, + /// Priority of transaction at some block. It is assumed it will not be changed often. None if + /// not known. + priority: RwLock>, //todo: we need to add future / ready status at finalized block. //If future transactions are stuck in tx_mem_pool (due to limits being hit), we need a means // to replace them somehow with newly coming transactions. @@ -101,23 +112,50 @@ where /// Creates a new instance of wrapper for unwatched transaction. fn new_unwatched(source: TransactionSource, tx: ExtrinsicFor, bytes: usize) -> Self { - Self { - watched: false, - tx, - source: TimedTransactionSource::from_transaction_source(source, true), - validated_at: AtomicU64::new(0), - bytes, - } + Self::new(false, source, tx, bytes) } /// Creates a new instance of wrapper for watched transaction. fn new_watched(source: TransactionSource, tx: ExtrinsicFor, bytes: usize) -> Self { + Self::new(true, source, tx, bytes) + } + + /// Creates a new instance of wrapper for a transaction with no priority. + fn new( + watched: bool, + source: TransactionSource, + tx: ExtrinsicFor, + bytes: usize, + ) -> Self { + Self::new_with_optional_priority(watched, source, tx, bytes, None) + } + + /// Creates a new instance of wrapper for a transaction with given priority. + fn new_with_priority( + watched: bool, + source: TransactionSource, + tx: ExtrinsicFor, + bytes: usize, + priority: TransactionPriority, + ) -> Self { + Self::new_with_optional_priority(watched, source, tx, bytes, Some(priority)) + } + + /// Creates a new instance of wrapper for a transaction with optional priority. + fn new_with_optional_priority( + watched: bool, + source: TransactionSource, + tx: ExtrinsicFor, + bytes: usize, + priority: Option, + ) -> Self { Self { - watched: true, + watched, tx, source: TimedTransactionSource::from_transaction_source(source, true), validated_at: AtomicU64::new(0), bytes, + priority: priority.into(), } } @@ -132,6 +170,11 @@ where pub(crate) fn source(&self) -> TimedTransactionSource { self.source.clone() } + + /// Returns the priority of the transaction. + pub(crate) fn priority(&self) -> Option { + *self.priority.read() + } } impl Size for Arc> @@ -191,11 +234,15 @@ where pub(super) struct InsertionInfo { pub(super) hash: Hash, pub(super) source: TimedTransactionSource, + pub(super) removed: Vec, } impl InsertionInfo { fn new(hash: Hash, source: TimedTransactionSource) -> Self { - Self { hash, source } + Self::new_with_removed(hash, source, Default::default()) + } + fn new_with_removed(hash: Hash, source: TimedTransactionSource, removed: Vec) -> Self { + Self { hash, source, removed } } } @@ -279,27 +326,109 @@ where &self, hash: ExtrinsicHash, tx: TxInMemPool, - ) -> Result>, ChainApi::Error> { - let bytes = self.transactions.bytes(); + ) -> Result>, sc_transaction_pool_api::error::Error> { let mut transactions = self.transactions.write(); + + let bytes = self.transactions.bytes(); + let result = match ( - !self.is_limit_exceeded(transactions.len() + 1, bytes + tx.bytes), + self.is_limit_exceeded(transactions.len() + 1, bytes + tx.bytes), transactions.contains_key(&hash), ) { - (true, false) => { + (false, false) => { let source = tx.source(); transactions.insert(hash, Arc::from(tx)); Ok(InsertionInfo::new(hash, source)) }, (_, true) => - Err(sc_transaction_pool_api::error::Error::AlreadyImported(Box::new(hash)).into()), - (false, _) => Err(sc_transaction_pool_api::error::Error::ImmediatelyDropped.into()), + Err(sc_transaction_pool_api::error::Error::AlreadyImported(Box::new(hash))), + (true, _) => Err(sc_transaction_pool_api::error::Error::ImmediatelyDropped), }; log::trace!(target: LOG_TARGET, "[{:?}] mempool::try_insert: {:?}", hash, result.as_ref().map(|r| r.hash)); result } + /// Attempts to insert a new transaction in the memory pool and drop some worse existing + /// transactions. + /// + /// A "worse" transaction means transaction with lower priority, or older transaction with the + /// same prio. + /// + /// This operation will not overflow the limit of the mempool. It means that cumulative + /// size of removed transactions will be equal (or greated) then size of newly inserted + /// transaction. + /// + /// Returns a `Result` containing `InsertionInfo` if the new transaction is successfully + /// inserted; otherwise, returns an appropriate error indicating the failure. + pub(super) fn try_insert_with_replacement( + &self, + new_tx: ExtrinsicFor, + priority: TransactionPriority, + source: TransactionSource, + watched: bool, + ) -> Result>, sc_transaction_pool_api::error::Error> { + let (hash, length) = self.api.hash_and_length(&new_tx); + let new_tx = TxInMemPool::new_with_priority(watched, source, new_tx, length, priority); + if new_tx.bytes > self.max_transactions_total_bytes { + return Err(sc_transaction_pool_api::error::Error::ImmediatelyDropped); + } + + let mut transactions = self.transactions.write(); + + if transactions.contains_key(&hash) { + return Err(sc_transaction_pool_api::error::Error::AlreadyImported(Box::new(hash))); + } + + let mut sorted = transactions + .iter() + .filter_map(|(h, v)| v.priority().map(|_| (*h, v.clone()))) + .collect::>(); + + // When pushing higher prio transaction, we need to find a number of lower prio txs, such + // that the sum of their bytes is ge then size of new tx. Otherwise we could overflow size + // limits. Naive way to do it - rev-sort by priority and eat the tail. + + // reverse (oldest, lowest prio last) + sorted.sort_by(|(_, a), (_, b)| match b.priority().cmp(&a.priority()) { + Ordering::Equal => match (a.source.timestamp, b.source.timestamp) { + (Some(a), Some(b)) => b.cmp(&a), + _ => Ordering::Equal, + }, + ordering => ordering, + }); + + let mut total_size_removed = 0usize; + let mut to_be_removed = vec![]; + let free_bytes = self.max_transactions_total_bytes - self.transactions.bytes(); + + loop { + let Some((worst_hash, worst_tx)) = sorted.pop() else { + return Err(sc_transaction_pool_api::error::Error::ImmediatelyDropped); + }; + + if worst_tx.priority() >= new_tx.priority() { + return Err(sc_transaction_pool_api::error::Error::ImmediatelyDropped); + } + + total_size_removed += worst_tx.bytes; + to_be_removed.push(worst_hash); + + if free_bytes + total_size_removed >= new_tx.bytes { + break; + } + } + + let source = new_tx.source(); + transactions.insert(hash, Arc::from(new_tx)); + for worst_hash in &to_be_removed { + transactions.remove(worst_hash); + } + debug_assert!(!self.is_limit_exceeded(transactions.len(), self.transactions.bytes())); + + Ok(InsertionInfo::new_with_removed(hash, source, to_be_removed)) + } + /// Adds a new unwatched transactions to the internal buffer not exceeding the limit. /// /// Returns the vector of results for each transaction, the order corresponds to the input @@ -308,7 +437,8 @@ where &self, source: TransactionSource, xts: &[ExtrinsicFor], - ) -> Vec>, ChainApi::Error>> { + ) -> Vec>, sc_transaction_pool_api::error::Error>> + { let result = xts .iter() .map(|xt| { @@ -325,20 +455,11 @@ where &self, source: TransactionSource, xt: ExtrinsicFor, - ) -> Result>, ChainApi::Error> { + ) -> Result>, sc_transaction_pool_api::error::Error> { let (hash, length) = self.api.hash_and_length(&xt); self.try_insert(hash, TxInMemPool::new_watched(source, xt.clone(), length)) } - /// Removes transaction from the memory pool which are specified by the given list of hashes. - pub(super) async fn remove_dropped_transaction( - &self, - dropped: &ExtrinsicHash, - ) -> Option>> { - log::debug!(target: LOG_TARGET, "[{:?}] mempool::remove_dropped_transaction", dropped); - self.transactions.write().remove(dropped) - } - /// Clones and returns a `HashMap` of references to all unwatched transactions in the memory /// pool. pub(super) fn clone_unwatched( @@ -362,9 +483,13 @@ where .collect::>() } - /// Removes a transaction from the memory pool based on a given hash. - pub(super) fn remove(&self, hash: ExtrinsicHash) { - let _ = self.transactions.write().remove(&hash); + /// Removes a transaction with given hash from the memory pool. + pub(super) fn remove_transaction( + &self, + hash: &ExtrinsicHash, + ) -> Option>> { + log::debug!(target: LOG_TARGET, "[{hash:?}] mempool::remove_transaction"); + self.transactions.write().remove(hash) } /// Revalidates a batch of transactions against the provided finalized block. @@ -462,6 +587,17 @@ where }); self.listener.invalidate_transactions(&invalid_hashes); } + + /// Updates the priority of transaction stored in mempool using provided view_store submission + /// outcome. + pub(super) fn update_transaction_priority(&self, outcome: &ViewStoreSubmitOutcome) { + outcome.priority().map(|priority| { + self.transactions + .write() + .get_mut(&outcome.hash()) + .map(|p| *p.priority.write() = Some(priority)) + }); + } } #[cfg(test)] @@ -583,6 +719,9 @@ mod tx_mem_pool_tests { assert_eq!(mempool.unwatched_and_watched_count(), (10, 5)); } + /// size of large extrinsic + const LARGE_XT_SIZE: usize = 1129; + fn large_uxt(x: usize) -> Extrinsic { ExtrinsicBuilder::new_include_data(vec![x as u8; 1024]).build() } @@ -592,8 +731,7 @@ mod tx_mem_pool_tests { sp_tracing::try_init_simple(); let max = 10; let api = Arc::from(TestApi::default()); - //size of large extrinsic is: 1129 - let mempool = TxMemPool::new_test(api.clone(), usize::MAX, max * 1129); + let mempool = TxMemPool::new_test(api.clone(), usize::MAX, max * LARGE_XT_SIZE); let xts = (0..max).map(|x| Arc::from(large_uxt(x))).collect::>(); @@ -617,4 +755,200 @@ mod tx_mem_pool_tests { sc_transaction_pool_api::error::Error::ImmediatelyDropped )); } + + #[test] + fn replacing_txs_works_for_same_tx_size() { + sp_tracing::try_init_simple(); + let max = 10; + let api = Arc::from(TestApi::default()); + let mempool = TxMemPool::new_test(api.clone(), usize::MAX, max * LARGE_XT_SIZE); + + let xts = (0..max).map(|x| Arc::from(large_uxt(x))).collect::>(); + + let low_prio = 0u64; + let hi_prio = u64::MAX; + + let total_xts_bytes = xts.iter().fold(0, |r, x| r + api.hash_and_length(&x).1); + let (submit_outcomes, hashes): (Vec<_>, Vec<_>) = xts + .iter() + .map(|t| { + let h = api.hash_and_length(t).0; + (ViewStoreSubmitOutcome::new(h, Some(low_prio)), h) + }) + .unzip(); + + let results = mempool.extend_unwatched(TransactionSource::External, &xts); + assert!(results.iter().all(Result::is_ok)); + assert_eq!(mempool.bytes(), total_xts_bytes); + + submit_outcomes + .into_iter() + .for_each(|o| mempool.update_transaction_priority(&o)); + + let xt = Arc::from(large_uxt(98)); + let hash = api.hash_and_length(&xt).0; + let result = mempool + .try_insert_with_replacement(xt, hi_prio, TransactionSource::External, false) + .unwrap(); + + assert_eq!(result.hash, hash); + assert_eq!(result.removed, hashes[0..1]); + } + + #[test] + fn replacing_txs_removes_proper_size_of_txs() { + sp_tracing::try_init_simple(); + let max = 10; + let api = Arc::from(TestApi::default()); + let mempool = TxMemPool::new_test(api.clone(), usize::MAX, max * LARGE_XT_SIZE); + + let xts = (0..max).map(|x| Arc::from(large_uxt(x))).collect::>(); + + let low_prio = 0u64; + let hi_prio = u64::MAX; + + let total_xts_bytes = xts.iter().fold(0, |r, x| r + api.hash_and_length(&x).1); + let (submit_outcomes, hashes): (Vec<_>, Vec<_>) = xts + .iter() + .map(|t| { + let h = api.hash_and_length(t).0; + (ViewStoreSubmitOutcome::new(h, Some(low_prio)), h) + }) + .unzip(); + + let results = mempool.extend_unwatched(TransactionSource::External, &xts); + assert!(results.iter().all(Result::is_ok)); + assert_eq!(mempool.bytes(), total_xts_bytes); + assert_eq!(total_xts_bytes, max * LARGE_XT_SIZE); + + submit_outcomes + .into_iter() + .for_each(|o| mempool.update_transaction_priority(&o)); + + //this one should drop 2 xts (size: 1130): + let xt = Arc::from(ExtrinsicBuilder::new_include_data(vec![98 as u8; 1025]).build()); + let (hash, length) = api.hash_and_length(&xt); + assert_eq!(length, 1130); + let result = mempool + .try_insert_with_replacement(xt, hi_prio, TransactionSource::External, false) + .unwrap(); + + assert_eq!(result.hash, hash); + assert_eq!(result.removed, hashes[0..2]); + } + + #[test] + fn replacing_txs_removes_proper_size_and_prios() { + sp_tracing::try_init_simple(); + const COUNT: usize = 10; + let api = Arc::from(TestApi::default()); + let mempool = TxMemPool::new_test(api.clone(), usize::MAX, COUNT * LARGE_XT_SIZE); + + let xts = (0..COUNT).map(|x| Arc::from(large_uxt(x))).collect::>(); + + let hi_prio = u64::MAX; + + let total_xts_bytes = xts.iter().fold(0, |r, x| r + api.hash_and_length(&x).1); + let (submit_outcomes, hashes): (Vec<_>, Vec<_>) = xts + .iter() + .enumerate() + .map(|(prio, t)| { + let h = api.hash_and_length(t).0; + (ViewStoreSubmitOutcome::new(h, Some((COUNT - prio).try_into().unwrap())), h) + }) + .unzip(); + + let results = mempool.extend_unwatched(TransactionSource::External, &xts); + assert!(results.iter().all(Result::is_ok)); + assert_eq!(mempool.bytes(), total_xts_bytes); + + submit_outcomes + .into_iter() + .for_each(|o| mempool.update_transaction_priority(&o)); + + //this one should drop 3 xts (each of size 1129) + let xt = Arc::from(ExtrinsicBuilder::new_include_data(vec![98 as u8; 2154]).build()); + let (hash, length) = api.hash_and_length(&xt); + // overhead is 105, thus length: 105 + 2154 + assert_eq!(length, 2 * LARGE_XT_SIZE + 1); + let result = mempool + .try_insert_with_replacement(xt, hi_prio, TransactionSource::External, false) + .unwrap(); + + assert_eq!(result.hash, hash); + assert!(result.removed.iter().eq(hashes[COUNT - 3..COUNT].iter().rev())); + } + + #[test] + fn replacing_txs_skips_lower_prio_tx() { + sp_tracing::try_init_simple(); + const COUNT: usize = 10; + let api = Arc::from(TestApi::default()); + let mempool = TxMemPool::new_test(api.clone(), usize::MAX, COUNT * LARGE_XT_SIZE); + + let xts = (0..COUNT).map(|x| Arc::from(large_uxt(x))).collect::>(); + + let hi_prio = 100u64; + let low_prio = 10u64; + + let total_xts_bytes = xts.iter().fold(0, |r, x| r + api.hash_and_length(&x).1); + let submit_outcomes = xts + .iter() + .map(|t| { + let h = api.hash_and_length(t).0; + ViewStoreSubmitOutcome::new(h, Some(hi_prio)) + }) + .collect::>(); + + let results = mempool.extend_unwatched(TransactionSource::External, &xts); + assert!(results.iter().all(Result::is_ok)); + assert_eq!(mempool.bytes(), total_xts_bytes); + + submit_outcomes + .into_iter() + .for_each(|o| mempool.update_transaction_priority(&o)); + + let xt = Arc::from(large_uxt(98)); + let result = + mempool.try_insert_with_replacement(xt, low_prio, TransactionSource::External, false); + + // lower prio tx is rejected immediately + assert!(matches!( + result.unwrap_err(), + sc_transaction_pool_api::error::Error::ImmediatelyDropped + )); + } + + #[test] + fn replacing_txs_is_skipped_if_prios_are_not_set() { + sp_tracing::try_init_simple(); + const COUNT: usize = 10; + let api = Arc::from(TestApi::default()); + let mempool = TxMemPool::new_test(api.clone(), usize::MAX, COUNT * LARGE_XT_SIZE); + + let xts = (0..COUNT).map(|x| Arc::from(large_uxt(x))).collect::>(); + + let hi_prio = u64::MAX; + + let total_xts_bytes = xts.iter().fold(0, |r, x| r + api.hash_and_length(&x).1); + + let results = mempool.extend_unwatched(TransactionSource::External, &xts); + assert!(results.iter().all(Result::is_ok)); + assert_eq!(mempool.bytes(), total_xts_bytes); + + //this one could drop 3 xts (each of size 1129) + let xt = Arc::from(ExtrinsicBuilder::new_include_data(vec![98 as u8; 2154]).build()); + let length = api.hash_and_length(&xt).1; + // overhead is 105, thus length: 105 + 2154 + assert_eq!(length, 2 * LARGE_XT_SIZE + 1); + + let result = + mempool.try_insert_with_replacement(xt, hi_prio, TransactionSource::External, false); + + // we did not update priorities (update_transaction_priority was not called): + assert!(matches!( + result.unwrap_err(), + sc_transaction_pool_api::error::Error::ImmediatelyDropped + )); + } } diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/view.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/view.rs index 3cbb8fa4871d..a35d68120a3a 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/view.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/view.rs @@ -28,7 +28,7 @@ use crate::{ common::log_xt::log_xt_trace, graph::{ self, base_pool::TimedTransactionSource, watcher::Watcher, ExtrinsicFor, ExtrinsicHash, - IsValidator, ValidatedTransaction, ValidatedTransactionFor, + IsValidator, ValidatedPoolSubmitOutcome, ValidatedTransaction, ValidatedTransactionFor, }, LOG_TARGET, }; @@ -158,7 +158,7 @@ where pub(super) async fn submit_many( &self, xts: impl IntoIterator)>, - ) -> Vec, ChainApi::Error>> { + ) -> Vec, ChainApi::Error>> { if log::log_enabled!(target: LOG_TARGET, log::Level::Trace) { let xts = xts.into_iter().collect::>(); log_xt_trace!(target: LOG_TARGET, xts.iter().map(|(_,xt)| self.pool.validated_pool().api().hash_and_length(xt).0), "[{:?}] view::submit_many at:{}", self.at.hash); @@ -173,7 +173,7 @@ where &self, source: TimedTransactionSource, xt: ExtrinsicFor, - ) -> Result, ExtrinsicHash>, ChainApi::Error> { + ) -> Result, ChainApi::Error> { log::trace!(target: LOG_TARGET, "[{:?}] view::submit_and_watch at:{}", self.pool.validated_pool().api().hash_and_length(&xt).0, self.at.hash); self.pool.submit_and_watch(&self.at, source, xt).await } @@ -182,7 +182,7 @@ where pub(super) fn submit_local( &self, xt: ExtrinsicFor, - ) -> Result, ChainApi::Error> { + ) -> Result, ChainApi::Error> { let (hash, length) = self.pool.validated_pool().api().hash_and_length(&xt); log::trace!(target: LOG_TARGET, "[{:?}] view::submit_local at:{}", hash, self.at.hash); @@ -460,4 +460,18 @@ where const IGNORE_BANNED: bool = false; self.pool.validated_pool().check_is_known(tx_hash, IGNORE_BANNED).is_err() } + + /// Removes the whole transaction subtree from the inner pool. + /// + /// Refer to [`crate::graph::ValidatedPool::remove_subtree`] for more details. + pub fn remove_subtree( + &self, + tx_hash: ExtrinsicHash, + listener_action: F, + ) -> Vec> + where + F: Fn(&mut crate::graph::Listener, ExtrinsicHash), + { + self.pool.validated_pool().remove_subtree(tx_hash, listener_action) + } } diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs index a06c051f0a7e..43ed5bbf8869 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs @@ -27,7 +27,7 @@ use crate::{ graph::{ self, base_pool::{TimedTransactionSource, Transaction}, - ExtrinsicFor, ExtrinsicHash, TransactionFor, + BaseSubmitOutcome, ExtrinsicFor, ExtrinsicHash, TransactionFor, ValidatedPoolSubmitOutcome, }, ReadyIteratorFor, LOG_TARGET, }; @@ -38,20 +38,18 @@ use sc_transaction_pool_api::{error::Error as PoolError, PoolStatus}; use sp_blockchain::TreeRoute; use sp_runtime::{generic::BlockId, traits::Block as BlockT}; use std::{ - collections::{hash_map::Entry, HashMap}, + collections::{hash_map::Entry, HashMap, HashSet}, sync::Arc, time::Instant, }; -/// Helper struct to keep the context for transaction replacements. +/// Helper struct to maintain the context for pending transaction submission, executed for +/// newly inserted views. #[derive(Clone)] -struct PendingTxReplacement +struct PendingTxSubmission where ChainApi: graph::ChainApi, { - /// Indicates if the new transaction was already submitted to all the views in the view_store. - /// If true, it can be removed after inserting any new view. - processed: bool, /// New transaction replacing the old one. xt: ExtrinsicFor, /// Source of the transaction. @@ -60,13 +58,84 @@ where watched: bool, } -impl PendingTxReplacement +/// Helper type representing the callback allowing to trigger per-transaction events on +/// `ValidatedPool`'s listener. +type RemovalListener = + Arc, ExtrinsicHash) + Send + Sync>; + +/// Helper struct to maintain the context for pending transaction removal, executed for +/// newly inserted views. +struct PendingTxRemoval +where + ChainApi: graph::ChainApi, +{ + /// Hash of the transaction that will be removed, + xt_hash: ExtrinsicHash, + /// Action that shall be executed on underlying `ValidatedPool`'s listener. + listener_action: RemovalListener, +} + +/// This enum represents an action that should be executed on the newly built +/// view before this view is inserted into the view store. +enum PreInsertAction +where + ChainApi: graph::ChainApi, +{ + /// Represents the action of submitting a new transaction. Intended to use to handle usurped + /// transactions. + SubmitTx(PendingTxSubmission), + + /// Represents the action of removing a subtree of transactions. + RemoveSubtree(PendingTxRemoval), +} + +/// Represents a task awaiting execution, to be performed immediately prior to the view insertion +/// into the view store. +struct PendingPreInsertTask +where + ChainApi: graph::ChainApi, +{ + /// The action to be applied when inserting a new view. + action: PreInsertAction, + /// Indicates if the action was already applied to all the views in the view_store. + /// If true, it can be removed after inserting any new view. + processed: bool, +} + +impl PendingPreInsertTask where ChainApi: graph::ChainApi, { - /// Creates new unprocessed instance of pending transaction replacement. - fn new(xt: ExtrinsicFor, source: TimedTransactionSource, watched: bool) -> Self { - Self { processed: false, xt, source, watched } + /// Creates new unprocessed instance of pending transaction submission. + fn new_submission_action( + xt: ExtrinsicFor, + source: TimedTransactionSource, + watched: bool, + ) -> Self { + Self { + processed: false, + action: PreInsertAction::SubmitTx(PendingTxSubmission { xt, source, watched }), + } + } + + /// Creates new unprocessed instance of pending transaction removal. + fn new_removal_action( + xt_hash: ExtrinsicHash, + listener: RemovalListener, + ) -> Self { + Self { + processed: false, + action: PreInsertAction::RemoveSubtree(PendingTxRemoval { + xt_hash, + listener_action: listener, + }), + } + } + + /// Marks a task as done for every view present in view store. Basically means that can be + /// removed on new view insertion. + fn mark_processed(&mut self) { + self.processed = true; } } @@ -100,9 +169,20 @@ where /// notifcication threads. It is meant to assure that replaced transaction is also removed from /// newly built views in maintain process. /// - /// The map's key is hash of replaced extrinsic. - pending_txs_replacements: - RwLock, PendingTxReplacement>>, + /// The map's key is hash of actionable extrinsic (to avoid duplicated entries). + pending_txs_tasks: RwLock, PendingPreInsertTask>>, +} + +/// Type alias to outcome of submission to `ViewStore`. +pub(super) type ViewStoreSubmitOutcome = + BaseSubmitOutcome>; + +impl From> + for ViewStoreSubmitOutcome +{ + fn from(value: ValidatedPoolSubmitOutcome) -> Self { + Self::new(value.hash(), value.priority()) + } } impl ViewStore @@ -124,7 +204,7 @@ where listener, most_recent_view: RwLock::from(None), dropped_stream_controller, - pending_txs_replacements: Default::default(), + pending_txs_tasks: Default::default(), } } @@ -132,7 +212,7 @@ where pub(super) async fn submit( &self, xts: impl IntoIterator)> + Clone, - ) -> HashMap, ChainApi::Error>>> { + ) -> HashMap, ChainApi::Error>>> { let submit_futures = { let active_views = self.active_views.read(); active_views @@ -140,7 +220,16 @@ where .map(|(_, view)| { let view = view.clone(); let xts = xts.clone(); - async move { (view.at.hash, view.submit_many(xts).await) } + async move { + ( + view.at.hash, + view.submit_many(xts) + .await + .into_iter() + .map(|r| r.map(Into::into)) + .collect::>(), + ) + } }) .collect::>() }; @@ -153,7 +242,7 @@ where pub(super) fn submit_local( &self, xt: ExtrinsicFor, - ) -> Result, ChainApi::Error> { + ) -> Result, ChainApi::Error> { let active_views = self .active_views .read() @@ -168,12 +257,14 @@ where .map(|view| view.submit_local(xt.clone())) .find_or_first(Result::is_ok); - if let Some(Err(err)) = result { - log::trace!(target: LOG_TARGET, "[{:?}] submit_local: err: {}", tx_hash, err); - return Err(err) - }; - - Ok(tx_hash) + match result { + Some(Err(err)) => { + log::trace!(target: LOG_TARGET, "[{:?}] submit_local: err: {}", tx_hash, err); + Err(err) + }, + None => Ok(ViewStoreSubmitOutcome::new(tx_hash, None)), + Some(Ok(r)) => Ok(r.into()), + } } /// Import a single extrinsic and starts to watch its progress in the pool. @@ -188,7 +279,7 @@ where _at: Block::Hash, source: TimedTransactionSource, xt: ExtrinsicFor, - ) -> Result, ChainApi::Error> { + ) -> Result, ChainApi::Error> { let tx_hash = self.api.hash_and_length(&xt).0; let Some(external_watcher) = self.listener.create_external_watcher_for_tx(tx_hash) else { return Err(PoolError::AlreadyImported(Box::new(tx_hash)).into()) @@ -203,13 +294,13 @@ where let source = source.clone(); async move { match view.submit_and_watch(source, xt).await { - Ok(watcher) => { + Ok(mut result) => { self.listener.add_view_watcher_for_tx( tx_hash, view.at.hash, - watcher.into_stream().boxed(), + result.expect_watcher().into_stream().boxed(), ); - Ok(()) + Ok(result) }, Err(e) => Err(e), } @@ -217,17 +308,20 @@ where }) .collect::>() }; - let maybe_error = futures::future::join_all(submit_and_watch_futures) + let result = futures::future::join_all(submit_and_watch_futures) .await .into_iter() .find_or_first(Result::is_ok); - if let Some(Err(err)) = maybe_error { - log::trace!(target: LOG_TARGET, "[{:?}] submit_and_watch: err: {}", tx_hash, err); - return Err(err); - }; - - Ok(external_watcher) + match result { + Some(Err(err)) => { + log::trace!(target: LOG_TARGET, "[{:?}] submit_and_watch: err: {}", tx_hash, err); + return Err(err); + }, + Some(Ok(result)) => + Ok(ViewStoreSubmitOutcome::from(result).with_watcher(external_watcher)), + None => Ok(ViewStoreSubmitOutcome::new(tx_hash, None).with_watcher(external_watcher)), + } } /// Returns the pool status for every active view. @@ -575,8 +669,12 @@ where replaced: ExtrinsicHash, watched: bool, ) { - if let Entry::Vacant(entry) = self.pending_txs_replacements.write().entry(replaced) { - entry.insert(PendingTxReplacement::new(xt.clone(), source.clone(), watched)); + if let Entry::Vacant(entry) = self.pending_txs_tasks.write().entry(replaced) { + entry.insert(PendingPreInsertTask::new_submission_action( + xt.clone(), + source.clone(), + watched, + )); } else { return }; @@ -586,8 +684,8 @@ where self.replace_transaction_in_views(source, xt, xt_hash, replaced, watched).await; - if let Some(replacement) = self.pending_txs_replacements.write().get_mut(&replaced) { - replacement.processed = true; + if let Some(replacement) = self.pending_txs_tasks.write().get_mut(&replaced) { + replacement.mark_processed(); } } @@ -596,18 +694,25 @@ where /// After application, all already processed replacements are removed. async fn apply_pending_tx_replacements(&self, view: Arc>) { let mut futures = vec![]; - for replacement in self.pending_txs_replacements.read().values() { - let xt_hash = self.api.hash_and_length(&replacement.xt).0; - futures.push(self.replace_transaction_in_view( - view.clone(), - replacement.source.clone(), - replacement.xt.clone(), - xt_hash, - replacement.watched, - )); + for replacement in self.pending_txs_tasks.read().values() { + match replacement.action { + PreInsertAction::SubmitTx(ref submission) => { + let xt_hash = self.api.hash_and_length(&submission.xt).0; + futures.push(self.replace_transaction_in_view( + view.clone(), + submission.source.clone(), + submission.xt.clone(), + xt_hash, + submission.watched, + )); + }, + PreInsertAction::RemoveSubtree(ref removal) => { + view.remove_subtree(removal.xt_hash, &*removal.listener_action); + }, + } } let _results = futures::future::join_all(futures).await; - self.pending_txs_replacements.write().retain(|_, r| r.processed); + self.pending_txs_tasks.write().retain(|_, r| r.processed); } /// Submits `xt` to the given view. @@ -623,11 +728,11 @@ where ) { if watched { match view.submit_and_watch(source, xt).await { - Ok(watcher) => { + Ok(mut result) => { self.listener.add_view_watcher_for_tx( xt_hash, view.at.hash, - watcher.into_stream().boxed(), + result.expect_watcher().into_stream().boxed(), ); }, Err(e) => { @@ -690,4 +795,58 @@ where }; let _results = futures::future::join_all(submit_futures).await; } + + /// Removes a transaction subtree from every view in the view_store, starting from the given + /// transaction hash. + /// + /// This function traverses the dependency graph of transactions and removes the specified + /// transaction along with all its descendant transactions from every view. + /// + /// A `listener_action` callback function is invoked for every transaction that is removed, + /// providing a reference to the pool's listener and the hash of the removed transaction. This + /// allows to trigger the required events. Note that listener may be called multiple times for + /// the same hash. + /// + /// Function will also schedule view pre-insertion actions to ensure that transactions will be + /// removed from newly created view. + /// + /// Returns a vector containing the hashes of all removed transactions, including the root + /// transaction specified by `tx_hash`. Vector contains only unique hashes. + pub(super) fn remove_transaction_subtree( + &self, + xt_hash: ExtrinsicHash, + listener_action: F, + ) -> Vec> + where + F: Fn(&mut crate::graph::Listener, ExtrinsicHash) + + Clone + + Send + + Sync + + 'static, + { + if let Entry::Vacant(entry) = self.pending_txs_tasks.write().entry(xt_hash) { + entry.insert(PendingPreInsertTask::new_removal_action( + xt_hash, + Arc::from(listener_action.clone()), + )); + }; + + let mut seen = HashSet::new(); + + let removed = self + .active_views + .read() + .iter() + .chain(self.inactive_views.read().iter()) + .filter(|(_, view)| view.is_imported(&xt_hash)) + .flat_map(|(_, view)| view.remove_subtree(xt_hash, &listener_action)) + .filter(|xt_hash| seen.insert(*xt_hash)) + .collect(); + + if let Some(removal_action) = self.pending_txs_tasks.write().get_mut(&xt_hash) { + removal_action.mark_processed(); + } + + removed + } } diff --git a/substrate/client/transaction-pool/src/graph/base_pool.rs b/substrate/client/transaction-pool/src/graph/base_pool.rs index 04eaa998f42e..3b4afc88b789 100644 --- a/substrate/client/transaction-pool/src/graph/base_pool.rs +++ b/substrate/client/transaction-pool/src/graph/base_pool.rs @@ -453,27 +453,29 @@ impl BasePool, _>(|worst, current| { - let transaction = ¤t.transaction; - worst - .map(|worst| { - // Here we don't use `TransactionRef`'s ordering implementation because - // while it prefers priority like need here, it also prefers older - // transactions for inclusion purposes and limit enforcement needs to prefer - // newer transactions instead and drop the older ones. - match worst.transaction.priority.cmp(&transaction.transaction.priority) { - Ordering::Less => worst, - Ordering::Equal => - if worst.insertion_id > transaction.insertion_id { - transaction.clone() - } else { - worst - }, - Ordering::Greater => transaction.clone(), - } - }) - .or_else(|| Some(transaction.clone())) - }); + let worst = + self.ready.fold::>, _>(None, |worst, current| { + let transaction = ¤t.transaction; + worst + .map(|worst| { + // Here we don't use `TransactionRef`'s ordering implementation because + // while it prefers priority like need here, it also prefers older + // transactions for inclusion purposes and limit enforcement needs to + // prefer newer transactions instead and drop the older ones. + match worst.transaction.priority.cmp(&transaction.transaction.priority) + { + Ordering::Less => worst, + Ordering::Equal => + if worst.insertion_id > transaction.insertion_id { + transaction.clone() + } else { + worst + }, + Ordering::Greater => transaction.clone(), + } + }) + .or_else(|| Some(transaction.clone())) + }); if let Some(worst) = worst { removed.append(&mut self.remove_subtree(&[worst.transaction.hash.clone()])) diff --git a/substrate/client/transaction-pool/src/graph/listener.rs b/substrate/client/transaction-pool/src/graph/listener.rs index 41daf5491f70..7b09ee4c6409 100644 --- a/substrate/client/transaction-pool/src/graph/listener.rs +++ b/substrate/client/transaction-pool/src/graph/listener.rs @@ -126,8 +126,8 @@ impl Listener usize { + self.ready.count + self.future.count + } +} + /// Should we check that the transaction is banned /// in the pool, before we verify it? #[derive(Copy, Clone)] -enum CheckBannedBeforeVerify { +pub(crate) enum CheckBannedBeforeVerify { Yes, No, } @@ -172,6 +179,21 @@ pub struct Pool { } impl Pool { + /// Create a new transaction pool with statically sized rotator. + pub fn new_with_staticly_sized_rotator( + options: Options, + is_validator: IsValidator, + api: Arc, + ) -> Self { + Self { + validated_pool: Arc::new(ValidatedPool::new_with_staticly_sized_rotator( + options, + is_validator, + api, + )), + } + } + /// Create a new transaction pool. pub fn new(options: Options, is_validator: IsValidator, api: Arc) -> Self { Self { validated_pool: Arc::new(ValidatedPool::new(options, is_validator, api)) } @@ -182,7 +204,7 @@ impl Pool { &self, at: &HashAndNumber, xts: impl IntoIterator)>, - ) -> Vec, B::Error>> { + ) -> Vec, B::Error>> { let validated_transactions = self.verify(at, xts, CheckBannedBeforeVerify::Yes).await; self.validated_pool.submit(validated_transactions.into_values()) } @@ -194,7 +216,7 @@ impl Pool { &self, at: &HashAndNumber, xts: impl IntoIterator)>, - ) -> Vec, B::Error>> { + ) -> Vec, B::Error>> { let validated_transactions = self.verify(at, xts, CheckBannedBeforeVerify::No).await; self.validated_pool.submit(validated_transactions.into_values()) } @@ -205,7 +227,7 @@ impl Pool { at: &HashAndNumber, source: base::TimedTransactionSource, xt: ExtrinsicFor, - ) -> Result, B::Error> { + ) -> Result, B::Error> { let res = self.submit_at(at, std::iter::once((source, xt))).await.pop(); res.expect("One extrinsic passed; one result returned; qed") } @@ -216,7 +238,7 @@ impl Pool { at: &HashAndNumber, source: base::TimedTransactionSource, xt: ExtrinsicFor, - ) -> Result, ExtrinsicHash>, B::Error> { + ) -> Result, B::Error> { let (_, tx) = self .verify_one(at.hash, at.number, source, xt, CheckBannedBeforeVerify::Yes) .await; @@ -284,6 +306,7 @@ impl Pool { let mut validated_counter: usize = 0; let mut future_tags = Vec::new(); + let now = Instant::now(); for (extrinsic, in_pool_tags) in all { match in_pool_tags { // reuse the tags for extrinsics that were found in the pool @@ -319,7 +342,7 @@ impl Pool { } } - log::trace!(target: LOG_TARGET,"prune: validated_counter:{validated_counter}"); + log::debug!(target: LOG_TARGET,"prune: validated_counter:{validated_counter}, took:{:?}", now.elapsed()); self.prune_tags(at, future_tags, in_pool_hashes).await } @@ -351,6 +374,7 @@ impl Pool { tags: impl IntoIterator, known_imported_hashes: impl IntoIterator> + Clone, ) { + let now = Instant::now(); log::trace!(target: LOG_TARGET, "Pruning at {:?}", at); // Prune all transactions that provide given tags let prune_status = self.validated_pool.prune_tags(tags); @@ -369,9 +393,8 @@ impl Pool { let reverified_transactions = self.verify(at, pruned_transactions, CheckBannedBeforeVerify::Yes).await; - let pruned_hashes = reverified_transactions.keys().map(Clone::clone).collect(); - - log::trace!(target: LOG_TARGET, "Pruning at {:?}. Resubmitting transactions: {}", &at, reverified_transactions.len()); + let pruned_hashes = reverified_transactions.keys().map(Clone::clone).collect::>(); + log::debug!(target: LOG_TARGET, "Pruning at {:?}. Resubmitting transactions: {}, reverification took: {:?}", &at, reverified_transactions.len(), now.elapsed()); log_xt_trace!(data: tuple, target: LOG_TARGET, &reverified_transactions, "[{:?}] Resubmitting transaction: {:?}"); // And finally - submit reverified transactions back to the pool @@ -409,7 +432,7 @@ impl Pool { } /// Returns future that validates single transaction at given block. - async fn verify_one( + pub(crate) async fn verify_one( &self, block_hash: ::Hash, block_number: NumberFor, @@ -516,6 +539,7 @@ mod tests { .into(), ), ) + .map(|outcome| outcome.hash()) .unwrap(); // then @@ -544,7 +568,10 @@ mod tests { // when let txs = txs.into_iter().map(|x| (SOURCE, Arc::from(x))).collect::>(); - let hashes = block_on(pool.submit_at(&api.expect_hash_and_number(0), txs)); + let hashes = block_on(pool.submit_at(&api.expect_hash_and_number(0), txs)) + .into_iter() + .map(|r| r.map(|o| o.hash())) + .collect::>(); log::debug!("--> {hashes:#?}"); // then @@ -568,7 +595,8 @@ mod tests { // when pool.validated_pool.ban(&Instant::now(), vec![pool.hash_of(&uxt)]); - let res = block_on(pool.submit_one(&api.expect_hash_and_number(0), SOURCE, uxt.into())); + let res = block_on(pool.submit_one(&api.expect_hash_and_number(0), SOURCE, uxt.into())) + .map(|o| o.hash()); assert_eq!(pool.validated_pool().status().ready, 0); assert_eq!(pool.validated_pool().status().future, 0); @@ -580,7 +608,7 @@ mod tests { fn should_reject_unactionable_transactions() { // given let api = Arc::new(TestApi::default()); - let pool = Pool::new( + let pool = Pool::new_with_staticly_sized_rotator( Default::default(), // the node does not author blocks false.into(), @@ -591,7 +619,8 @@ mod tests { let uxt = ExtrinsicBuilder::new_include_data(vec![42]).build(); // when - let res = block_on(pool.submit_one(&api.expect_hash_and_number(0), SOURCE, uxt.into())); + let res = block_on(pool.submit_one(&api.expect_hash_and_number(0), SOURCE, uxt.into())) + .map(|o| o.hash()); // then assert_matches!(res.unwrap_err(), error::Error::Unactionable); @@ -619,7 +648,8 @@ mod tests { .into(), ), ) - .unwrap(); + .unwrap() + .hash(); let hash1 = block_on( pool.submit_one( &han_of_block0, @@ -633,7 +663,8 @@ mod tests { .into(), ), ) - .unwrap(); + .unwrap() + .hash(); // future doesn't count let _hash = block_on( pool.submit_one( @@ -648,7 +679,8 @@ mod tests { .into(), ), ) - .unwrap(); + .unwrap() + .hash(); assert_eq!(pool.validated_pool().status().ready, 2); assert_eq!(pool.validated_pool().status().future, 1); @@ -681,7 +713,8 @@ mod tests { .into(), ), ) - .unwrap(); + .unwrap() + .hash(); let hash2 = block_on( pool.submit_one( &han_of_block0, @@ -695,7 +728,8 @@ mod tests { .into(), ), ) - .unwrap(); + .unwrap() + .hash(); let hash3 = block_on( pool.submit_one( &han_of_block0, @@ -709,7 +743,8 @@ mod tests { .into(), ), ) - .unwrap(); + .unwrap() + .hash(); // when pool.validated_pool.clear_stale(&api.expect_hash_and_number(5)); @@ -741,7 +776,8 @@ mod tests { .into(), ), ) - .unwrap(); + .unwrap() + .hash(); // when block_on(pool.prune_tags(&api.expect_hash_and_number(1), vec![vec![0]], vec![hash1])); @@ -767,10 +803,11 @@ mod tests { let options = Options { ready: limit.clone(), future: limit.clone(), ..Default::default() }; let api = Arc::new(TestApi::default()); - let pool = Pool::new(options, true.into(), api.clone()); + let pool = Pool::new_with_staticly_sized_rotator(options, true.into(), api.clone()); - let hash1 = - block_on(pool.submit_one(&api.expect_hash_and_number(0), SOURCE, xt.into())).unwrap(); + let hash1 = block_on(pool.submit_one(&api.expect_hash_and_number(0), SOURCE, xt.into())) + .unwrap() + .hash(); assert_eq!(pool.validated_pool().status().future, 1); // when @@ -787,7 +824,8 @@ mod tests { .into(), ), ) - .unwrap(); + .unwrap() + .hash(); // then assert_eq!(pool.validated_pool().status().future, 1); @@ -803,7 +841,7 @@ mod tests { let options = Options { ready: limit.clone(), future: limit.clone(), ..Default::default() }; let api = Arc::new(TestApi::default()); - let pool = Pool::new(options, true.into(), api.clone()); + let pool = Pool::new_with_staticly_sized_rotator(options, true.into(), api.clone()); // when block_on( @@ -819,6 +857,7 @@ mod tests { .into(), ), ) + .map(|o| o.hash()) .unwrap_err(); // then @@ -845,6 +884,7 @@ mod tests { .into(), ), ) + .map(|o| o.hash()) .unwrap_err(); // then @@ -873,7 +913,8 @@ mod tests { .into(), ), ) - .unwrap(); + .unwrap() + .expect_watcher(); assert_eq!(pool.validated_pool().status().ready, 1); assert_eq!(pool.validated_pool().status().future, 0); @@ -910,7 +951,8 @@ mod tests { .into(), ), ) - .unwrap(); + .unwrap() + .expect_watcher(); assert_eq!(pool.validated_pool().status().ready, 1); assert_eq!(pool.validated_pool().status().future, 0); @@ -949,7 +991,8 @@ mod tests { .into(), ), ) - .unwrap(); + .unwrap() + .expect_watcher(); assert_eq!(pool.validated_pool().status().ready, 0); assert_eq!(pool.validated_pool().status().future, 1); @@ -988,7 +1031,8 @@ mod tests { }); let watcher = block_on(pool.submit_and_watch(&api.expect_hash_and_number(0), SOURCE, uxt.into())) - .unwrap(); + .unwrap() + .expect_watcher(); assert_eq!(pool.validated_pool().status().ready, 1); // when @@ -1013,7 +1057,8 @@ mod tests { }); let watcher = block_on(pool.submit_and_watch(&api.expect_hash_and_number(0), SOURCE, uxt.into())) - .unwrap(); + .unwrap() + .expect_watcher(); assert_eq!(pool.validated_pool().status().ready, 1); // when @@ -1036,7 +1081,7 @@ mod tests { Options { ready: limit.clone(), future: limit.clone(), ..Default::default() }; let api = Arc::new(TestApi::default()); - let pool = Pool::new(options, true.into(), api.clone()); + let pool = Pool::new_with_staticly_sized_rotator(options, true.into(), api.clone()); let xt = uxt(Transfer { from: Alice.into(), @@ -1046,7 +1091,8 @@ mod tests { }); let watcher = block_on(pool.submit_and_watch(&api.expect_hash_and_number(0), SOURCE, xt.into())) - .unwrap(); + .unwrap() + .expect_watcher(); assert_eq!(pool.validated_pool().status().ready, 1); // when @@ -1074,7 +1120,7 @@ mod tests { Options { ready: limit.clone(), future: limit.clone(), ..Default::default() }; let api = Arc::new(TestApi::default()); - let pool = Pool::new(options, true.into(), api.clone()); + let pool = Pool::new_with_staticly_sized_rotator(options, true.into(), api.clone()); // after validation `IncludeData` will have priority set to 9001 // (validate_transaction mock) @@ -1106,14 +1152,16 @@ mod tests { Options { ready: limit.clone(), future: limit.clone(), ..Default::default() }; let api = Arc::new(TestApi::default()); - let pool = Pool::new(options, true.into(), api.clone()); + let pool = Pool::new_with_staticly_sized_rotator(options, true.into(), api.clone()); let han_of_block0 = api.expect_hash_and_number(0); // after validation `IncludeData` will have priority set to 9001 // (validate_transaction mock) let xt = ExtrinsicBuilder::new_include_data(Vec::new()).build(); - block_on(pool.submit_and_watch(&han_of_block0, SOURCE, xt.into())).unwrap(); + block_on(pool.submit_and_watch(&han_of_block0, SOURCE, xt.into())) + .unwrap() + .expect_watcher(); assert_eq!(pool.validated_pool().status().ready, 1); // after validation `Transfer` will have priority set to 4 (validate_transaction @@ -1124,8 +1172,9 @@ mod tests { amount: 5, nonce: 0, }); - let watcher = - block_on(pool.submit_and_watch(&han_of_block0, SOURCE, xt.into())).unwrap(); + let watcher = block_on(pool.submit_and_watch(&han_of_block0, SOURCE, xt.into())) + .unwrap() + .expect_watcher(); assert_eq!(pool.validated_pool().status().ready, 2); // when @@ -1151,7 +1200,11 @@ mod tests { let mut api = TestApi::default(); api.delay = Arc::new(Mutex::new(rx.into())); let api = Arc::new(api); - let pool = Arc::new(Pool::new(Default::default(), true.into(), api.clone())); + let pool = Arc::new(Pool::new_with_staticly_sized_rotator( + Default::default(), + true.into(), + api.clone(), + )); let han_of_block0 = api.expect_hash_and_number(0); diff --git a/substrate/client/transaction-pool/src/graph/ready.rs b/substrate/client/transaction-pool/src/graph/ready.rs index 9061d0e25581..b8aef99e638d 100644 --- a/substrate/client/transaction-pool/src/graph/ready.rs +++ b/substrate/client/transaction-pool/src/graph/ready.rs @@ -232,12 +232,10 @@ impl ReadyTransactions { Ok(replaced) } - /// Fold a list of ready transactions to compute a single value. - pub fn fold, &ReadyTx) -> Option>( - &mut self, - f: F, - ) -> Option { - self.ready.read().values().fold(None, f) + /// Fold a list of ready transactions to compute a single value using initial value of + /// accumulator. + pub fn fold) -> R>(&self, init: R, f: F) -> R { + self.ready.read().values().fold(init, f) } /// Returns true if given transaction is part of the queue. diff --git a/substrate/client/transaction-pool/src/graph/rotator.rs b/substrate/client/transaction-pool/src/graph/rotator.rs index 9a2e269b5eed..80d8f24144c8 100644 --- a/substrate/client/transaction-pool/src/graph/rotator.rs +++ b/substrate/client/transaction-pool/src/graph/rotator.rs @@ -31,7 +31,10 @@ use std::{ use super::base_pool::Transaction; /// Expected size of the banned extrinsics cache. -const EXPECTED_SIZE: usize = 2048; +const DEFAULT_EXPECTED_SIZE: usize = 2048; + +/// The default duration, in seconds, for which an extrinsic is banned. +const DEFAULT_BAN_TIME_SECS: u64 = 30 * 60; /// Pool rotator is responsible to only keep fresh extrinsics in the pool. /// @@ -42,18 +45,39 @@ pub struct PoolRotator { ban_time: Duration, /// Currently banned extrinsics. banned_until: RwLock>, + /// Expected size of the banned extrinsics cache. + expected_size: usize, +} + +impl Clone for PoolRotator { + fn clone(&self) -> Self { + Self { + ban_time: self.ban_time, + banned_until: RwLock::new(self.banned_until.read().clone()), + expected_size: self.expected_size, + } + } } impl Default for PoolRotator { fn default() -> Self { - Self { ban_time: Duration::from_secs(60 * 30), banned_until: Default::default() } + Self { + ban_time: Duration::from_secs(DEFAULT_BAN_TIME_SECS), + banned_until: Default::default(), + expected_size: DEFAULT_EXPECTED_SIZE, + } } } impl PoolRotator { /// New rotator instance with specified ban time. pub fn new(ban_time: Duration) -> Self { - Self { ban_time, banned_until: Default::default() } + Self { ban_time, ..Self::default() } + } + + /// New rotator instance with specified ban time and expected cache size. + pub fn new_with_expected_size(ban_time: Duration, expected_size: usize) -> Self { + Self { expected_size, ..Self::new(ban_time) } } /// Returns `true` if extrinsic hash is currently banned. @@ -69,8 +93,8 @@ impl PoolRotator { banned.insert(hash, *now + self.ban_time); } - if banned.len() > 2 * EXPECTED_SIZE { - while banned.len() > EXPECTED_SIZE { + if banned.len() > 2 * self.expected_size { + while banned.len() > self.expected_size { if let Some(key) = banned.keys().next().cloned() { banned.remove(&key); } @@ -201,16 +225,16 @@ mod tests { let past_block = 0; // when - for i in 0..2 * EXPECTED_SIZE { + for i in 0..2 * DEFAULT_EXPECTED_SIZE { let tx = tx_with(i as u64, past_block); assert!(rotator.ban_if_stale(&now, past_block, &tx)); } - assert_eq!(rotator.banned_until.read().len(), 2 * EXPECTED_SIZE); + assert_eq!(rotator.banned_until.read().len(), 2 * DEFAULT_EXPECTED_SIZE); // then - let tx = tx_with(2 * EXPECTED_SIZE as u64, past_block); + let tx = tx_with(2 * DEFAULT_EXPECTED_SIZE as u64, past_block); // trigger a garbage collection assert!(rotator.ban_if_stale(&now, past_block, &tx)); - assert_eq!(rotator.banned_until.read().len(), EXPECTED_SIZE); + assert_eq!(rotator.banned_until.read().len(), DEFAULT_EXPECTED_SIZE); } } diff --git a/substrate/client/transaction-pool/src/graph/tracked_map.rs b/substrate/client/transaction-pool/src/graph/tracked_map.rs index 6c3bbbf34b55..fe15c6eca308 100644 --- a/substrate/client/transaction-pool/src/graph/tracked_map.rs +++ b/substrate/client/transaction-pool/src/graph/tracked_map.rs @@ -173,6 +173,11 @@ where pub fn len(&mut self) -> usize { self.inner_guard.len() } + + /// Returns an iterator over all key-value pairs. + pub fn iter(&self) -> Iter<'_, K, V> { + self.inner_guard.iter() + } } #[cfg(test)] diff --git a/substrate/client/transaction-pool/src/graph/validated_pool.rs b/substrate/client/transaction-pool/src/graph/validated_pool.rs index 14df63d9673e..bc2b07896dba 100644 --- a/substrate/client/transaction-pool/src/graph/validated_pool.rs +++ b/substrate/client/transaction-pool/src/graph/validated_pool.rs @@ -18,25 +18,22 @@ use std::{ collections::{HashMap, HashSet}, - hash, sync::Arc, }; use crate::{common::log_xt::log_xt_trace, LOG_TARGET}; use futures::channel::mpsc::{channel, Sender}; use parking_lot::{Mutex, RwLock}; -use sc_transaction_pool_api::{error, PoolStatus, ReadyTransactions}; -use serde::Serialize; +use sc_transaction_pool_api::{error, PoolStatus, ReadyTransactions, TransactionPriority}; use sp_blockchain::HashAndNumber; use sp_runtime::{ - traits::{self, SaturatedConversion}, + traits::SaturatedConversion, transaction_validity::{TransactionTag as Tag, ValidTransaction}, }; use std::time::Instant; use super::{ base_pool::{self as base, PruneStatus}, - listener::Listener, pool::{ BlockHash, ChainApi, EventStream, ExtrinsicFor, ExtrinsicHash, Options, TransactionFor, }, @@ -79,12 +76,23 @@ impl ValidatedTransaction { valid_till: at.saturated_into::().saturating_add(validity.longevity), }) } + + /// Returns priority for valid transaction, None if transaction is not valid. + pub fn priority(&self) -> Option { + match self { + ValidatedTransaction::Valid(base::Transaction { priority, .. }) => Some(*priority), + _ => None, + } + } } -/// A type of validated transaction stored in the pool. +/// A type of validated transaction stored in the validated pool. pub type ValidatedTransactionFor = ValidatedTransaction, ExtrinsicFor, ::Error>; +/// A type alias representing ValidatedPool listener for given ChainApi type. +pub type Listener = super::listener::Listener, B>; + /// A closure that returns true if the local node is a validator that can author blocks. #[derive(Clone)] pub struct IsValidator(Arc bool + Send + Sync>>); @@ -101,12 +109,56 @@ impl From bool + Send + Sync>> for IsValidator { } } +/// Represents the result of `submit` or `submit_and_watch` operations. +pub struct BaseSubmitOutcome { + /// The hash of the submitted transaction. + hash: ExtrinsicHash, + /// A transaction watcher. This is `Some` for `submit_and_watch` and `None` for `submit`. + watcher: Option, + + /// The priority of the transaction. Defaults to None if unknown. + priority: Option, +} + +/// Type alias to outcome of submission to `ValidatedPool`. +pub type ValidatedPoolSubmitOutcome = + BaseSubmitOutcome, ExtrinsicHash>>; + +impl BaseSubmitOutcome { + /// Creates a new instance with given hash and priority. + pub fn new(hash: ExtrinsicHash, priority: Option) -> Self { + Self { hash, priority, watcher: None } + } + + /// Sets the transaction watcher. + pub fn with_watcher(mut self, watcher: W) -> Self { + self.watcher = Some(watcher); + self + } + + /// Provides priority of submitted transaction. + pub fn priority(&self) -> Option { + self.priority + } + + /// Provides hash of submitted transaction. + pub fn hash(&self) -> ExtrinsicHash { + self.hash + } + + /// Provides a watcher. Should only be called on outcomes of `submit_and_watch`. Otherwise will + /// panic (that would mean logical error in program). + pub fn expect_watcher(&mut self) -> W { + self.watcher.take().expect("watcher was set in submit_and_watch. qed") + } +} + /// Pool that deals with validated transactions. pub struct ValidatedPool { api: Arc, is_validator: IsValidator, options: Options, - listener: RwLock, B>>, + listener: RwLock>, pub(crate) pool: RwLock, ExtrinsicFor>>, import_notification_sinks: Mutex>>>, rotator: PoolRotator>, @@ -121,16 +173,41 @@ impl Clone for ValidatedPool { listener: Default::default(), pool: RwLock::from(self.pool.read().clone()), import_notification_sinks: Default::default(), - rotator: PoolRotator::default(), + rotator: self.rotator.clone(), } } } impl ValidatedPool { + /// Create a new transaction pool with statically sized rotator. + pub fn new_with_staticly_sized_rotator( + options: Options, + is_validator: IsValidator, + api: Arc, + ) -> Self { + let ban_time = options.ban_time; + Self::new_with_rotator(options, is_validator, api, PoolRotator::new(ban_time)) + } + /// Create a new transaction pool. pub fn new(options: Options, is_validator: IsValidator, api: Arc) -> Self { - let base_pool = base::BasePool::new(options.reject_future_transactions); let ban_time = options.ban_time; + let total_count = options.total_count(); + Self::new_with_rotator( + options, + is_validator, + api, + PoolRotator::new_with_expected_size(ban_time, total_count), + ) + } + + fn new_with_rotator( + options: Options, + is_validator: IsValidator, + api: Arc, + rotator: PoolRotator>, + ) -> Self { + let base_pool = base::BasePool::new(options.reject_future_transactions); Self { is_validator, options, @@ -138,7 +215,7 @@ impl ValidatedPool { api, pool: RwLock::new(base_pool), import_notification_sinks: Default::default(), - rotator: PoolRotator::new(ban_time), + rotator, } } @@ -175,7 +252,7 @@ impl ValidatedPool { pub fn submit( &self, txs: impl IntoIterator>, - ) -> Vec, B::Error>> { + ) -> Vec, B::Error>> { let results = txs .into_iter() .map(|validated_tx| self.submit_one(validated_tx)) @@ -191,7 +268,7 @@ impl ValidatedPool { results .into_iter() .map(|res| match res { - Ok(ref hash) if removed.contains(hash) => + Ok(outcome) if removed.contains(&outcome.hash) => Err(error::Error::ImmediatelyDropped.into()), other => other, }) @@ -199,9 +276,13 @@ impl ValidatedPool { } /// Submit single pre-validated transaction to the pool. - fn submit_one(&self, tx: ValidatedTransactionFor) -> Result, B::Error> { + fn submit_one( + &self, + tx: ValidatedTransactionFor, + ) -> Result, B::Error> { match tx { ValidatedTransaction::Valid(tx) => { + let priority = tx.priority; log::trace!(target: LOG_TARGET, "[{:?}] ValidatedPool::submit_one", tx.hash); if !tx.propagate && !(self.is_validator.0)() { return Err(error::Error::Unactionable.into()) @@ -229,7 +310,7 @@ impl ValidatedPool { let mut listener = self.listener.write(); fire_events(&mut *listener, &imported); - Ok(*imported.hash()) + Ok(ValidatedPoolSubmitOutcome::new(*imported.hash(), Some(priority))) }, ValidatedTransaction::Invalid(hash, err) => { log::trace!(target: LOG_TARGET, "[{:?}] ValidatedPool::submit_one invalid: {:?}", hash, err); @@ -280,7 +361,7 @@ impl ValidatedPool { // run notifications let mut listener = self.listener.write(); for h in &removed { - listener.limit_enforced(h); + listener.limits_enforced(h); } removed @@ -293,7 +374,7 @@ impl ValidatedPool { pub fn submit_and_watch( &self, tx: ValidatedTransactionFor, - ) -> Result, ExtrinsicHash>, B::Error> { + ) -> Result, B::Error> { match tx { ValidatedTransaction::Valid(tx) => { let hash = self.api.hash_and_length(&tx.data).0; @@ -301,7 +382,7 @@ impl ValidatedPool { self.submit(std::iter::once(ValidatedTransaction::Valid(tx))) .pop() .expect("One extrinsic passed; one result returned; qed") - .map(|_| watcher) + .map(|outcome| outcome.with_watcher(watcher)) }, ValidatedTransaction::Invalid(hash, err) => { self.rotator.ban(&Instant::now(), std::iter::once(hash)); @@ -686,11 +767,42 @@ impl ValidatedPool { listener.future(&f.hash); }); } + + /// Removes a transaction subtree from the pool, starting from the given transaction hash. + /// + /// This function traverses the dependency graph of transactions and removes the specified + /// transaction along with all its descendant transactions from the pool. + /// + /// A `listener_action` callback function is invoked for every transaction that is removed, + /// providing a reference to the pool's listener and the hash of the removed transaction. This + /// allows to trigger the required events. + /// + /// Returns a vector containing the hashes of all removed transactions, including the root + /// transaction specified by `tx_hash`. + pub fn remove_subtree( + &self, + tx_hash: ExtrinsicHash, + listener_action: F, + ) -> Vec> + where + F: Fn(&mut Listener, ExtrinsicHash), + { + self.pool + .write() + .remove_subtree(&[tx_hash]) + .into_iter() + .map(|tx| { + let removed_tx_hash = tx.hash; + let mut listener = self.listener.write(); + listener_action(&mut *listener, removed_tx_hash); + removed_tx_hash + }) + .collect::>() + } } -fn fire_events(listener: &mut Listener, imported: &base::Imported) +fn fire_events(listener: &mut Listener, imported: &base::Imported, Ex>) where - H: hash::Hash + Eq + traits::Member + Serialize, B: ChainApi, { match *imported { diff --git a/substrate/client/transaction-pool/src/single_state_txpool/revalidation.rs b/substrate/client/transaction-pool/src/single_state_txpool/revalidation.rs index f22fa2ddabde..2a691ae35eaf 100644 --- a/substrate/client/transaction-pool/src/single_state_txpool/revalidation.rs +++ b/substrate/client/transaction-pool/src/single_state_txpool/revalidation.rs @@ -384,7 +384,11 @@ mod tests { #[test] fn revalidation_queue_works() { let api = Arc::new(TestApi::default()); - let pool = Arc::new(Pool::new(Default::default(), true.into(), api.clone())); + let pool = Arc::new(Pool::new_with_staticly_sized_rotator( + Default::default(), + true.into(), + api.clone(), + )); let queue = Arc::new(RevalidationQueue::new(api.clone(), pool.clone())); let uxt = uxt(Transfer { @@ -401,7 +405,8 @@ mod tests { TimedTransactionSource::new_external(false), uxt.clone().into(), )) - .expect("Should be valid"); + .expect("Should be valid") + .hash(); block_on(queue.revalidate_later(han_of_block0.hash, vec![uxt_hash])); @@ -414,7 +419,11 @@ mod tests { #[test] fn revalidation_queue_skips_revalidation_for_unknown_block_hash() { let api = Arc::new(TestApi::default()); - let pool = Arc::new(Pool::new(Default::default(), true.into(), api.clone())); + let pool = Arc::new(Pool::new_with_staticly_sized_rotator( + Default::default(), + true.into(), + api.clone(), + )); let queue = Arc::new(RevalidationQueue::new(api.clone(), pool.clone())); let uxt0 = uxt(Transfer { @@ -440,7 +449,7 @@ mod tests { vec![(source.clone(), uxt0.into()), (source, uxt1.into())], )) .into_iter() - .map(|r| r.expect("Should be valid")) + .map(|r| r.expect("Should be valid").hash()) .collect::>(); assert_eq!(api.validation_requests().len(), 2); diff --git a/substrate/client/transaction-pool/src/single_state_txpool/single_state_txpool.rs b/substrate/client/transaction-pool/src/single_state_txpool/single_state_txpool.rs index e7504012ca67..3598f9dbc2af 100644 --- a/substrate/client/transaction-pool/src/single_state_txpool/single_state_txpool.rs +++ b/substrate/client/transaction-pool/src/single_state_txpool/single_state_txpool.rs @@ -141,7 +141,11 @@ where finalized_hash: Block::Hash, options: graph::Options, ) -> (Self, Pin + Send>>) { - let pool = Arc::new(graph::Pool::new(options, true.into(), pool_api.clone())); + let pool = Arc::new(graph::Pool::new_with_staticly_sized_rotator( + options, + true.into(), + pool_api.clone(), + )); let (revalidation_queue, background_task) = revalidation::RevalidationQueue::new_background( pool_api.clone(), pool.clone(), @@ -177,7 +181,11 @@ where best_block_hash: Block::Hash, finalized_hash: Block::Hash, ) -> Self { - let pool = Arc::new(graph::Pool::new(options, is_validator, pool_api.clone())); + let pool = Arc::new(graph::Pool::new_with_staticly_sized_rotator( + options, + is_validator, + pool_api.clone(), + )); let (revalidation_queue, background_task) = match revalidation_type { RevalidationType::Light => (revalidation::RevalidationQueue::new(pool_api.clone(), pool.clone()), None), @@ -266,7 +274,12 @@ where let number = self.api.resolve_block_number(at); let at = HashAndNumber { hash: at, number: number? }; - Ok(pool.submit_at(&at, xts).await) + Ok(pool + .submit_at(&at, xts) + .await + .into_iter() + .map(|result| result.map(|outcome| outcome.hash())) + .collect()) } async fn submit_one( @@ -284,6 +297,7 @@ where let at = HashAndNumber { hash: at, number: number? }; pool.submit_one(&at, TimedTransactionSource::from_transaction_source(source, false), xt) .await + .map(|outcome| outcome.hash()) } async fn submit_and_watch( @@ -300,15 +314,13 @@ where let number = self.api.resolve_block_number(at); let at = HashAndNumber { hash: at, number: number? }; - let watcher = pool - .submit_and_watch( - &at, - TimedTransactionSource::from_transaction_source(source, false), - xt, - ) - .await?; - - Ok(watcher.into_stream().boxed()) + pool.submit_and_watch( + &at, + TimedTransactionSource::from_transaction_source(source, false), + xt, + ) + .await + .map(|mut outcome| outcome.expect_watcher().into_stream().boxed()) } fn remove_invalid(&self, hashes: &[TxHash]) -> Vec> { @@ -476,7 +488,11 @@ where validity, ); - self.pool.validated_pool().submit(vec![validated]).remove(0) + self.pool + .validated_pool() + .submit(vec![validated]) + .remove(0) + .map(|outcome| outcome.hash()) } } diff --git a/substrate/client/transaction-pool/tests/fatp.rs b/substrate/client/transaction-pool/tests/fatp.rs index 8bf08122995c..dd82c52a6047 100644 --- a/substrate/client/transaction-pool/tests/fatp.rs +++ b/substrate/client/transaction-pool/tests/fatp.rs @@ -2199,7 +2199,7 @@ fn import_sink_works3() { pool.submit_one(genesis, SOURCE, xt1.clone()), ]; - let x = block_on(futures::future::join_all(submissions)); + block_on(futures::future::join_all(submissions)); let header01a = api.push_block(1, vec![], true); let header01b = api.push_block(1, vec![], true); @@ -2213,8 +2213,6 @@ fn import_sink_works3() { assert_pool_status!(header01a.hash(), &pool, 1, 1); assert_pool_status!(header01b.hash(), &pool, 1, 1); - log::debug!("xxx {x:#?}"); - let import_events = futures::executor::block_on_stream(import_stream).take(1).collect::>(); diff --git a/substrate/client/transaction-pool/tests/fatp_common/mod.rs b/substrate/client/transaction-pool/tests/fatp_common/mod.rs index aaffebc0db0a..530c25caf88e 100644 --- a/substrate/client/transaction-pool/tests/fatp_common/mod.rs +++ b/substrate/client/transaction-pool/tests/fatp_common/mod.rs @@ -192,12 +192,9 @@ macro_rules! assert_ready_iterator { let output: Vec<_> = ready_iterator.collect(); log::debug!(target:LOG_TARGET, "expected: {:#?}", expected); log::debug!(target:LOG_TARGET, "output: {:#?}", output); + let output = output.into_iter().map(|t|t.hash).collect::>(); assert_eq!(expected.len(), output.len()); - assert!( - output.iter().zip(expected.iter()).all(|(o,e)| { - o.hash == *e - }) - ); + assert_eq!(output,expected); }}; } @@ -215,6 +212,18 @@ macro_rules! assert_future_iterator { }}; } +#[macro_export] +macro_rules! assert_watcher_stream { + ($stream:ident, [$( $event:expr ),*]) => {{ + let expected = vec![ $($event),*]; + log::debug!(target:LOG_TARGET, "expected: {:#?} {}, block now:", expected, expected.len()); + let output = futures::executor::block_on_stream($stream).take(expected.len()).collect::>(); + log::debug!(target:LOG_TARGET, "output: {:#?}", output); + assert_eq!(expected.len(), output.len()); + assert_eq!(output, expected); + }}; +} + pub const SOURCE: TransactionSource = TransactionSource::External; #[cfg(test)] diff --git a/substrate/client/transaction-pool/tests/fatp_prios.rs b/substrate/client/transaction-pool/tests/fatp_prios.rs index 4ed9b4503861..af5e7e8c5a6a 100644 --- a/substrate/client/transaction-pool/tests/fatp_prios.rs +++ b/substrate/client/transaction-pool/tests/fatp_prios.rs @@ -20,13 +20,15 @@ pub mod fatp_common; -use fatp_common::{new_best_block_event, TestPoolBuilder, LOG_TARGET, SOURCE}; +use fatp_common::{invalid_hash, new_best_block_event, TestPoolBuilder, LOG_TARGET, SOURCE}; use futures::{executor::block_on, FutureExt}; use sc_transaction_pool::ChainApi; -use sc_transaction_pool_api::{MaintainedTransactionPool, TransactionPool, TransactionStatus}; +use sc_transaction_pool_api::{ + error::Error as TxPoolError, LocalTransactionPool, MaintainedTransactionPool, TransactionPool, + TransactionStatus, +}; use substrate_test_runtime_client::Sr25519Keyring::*; use substrate_test_runtime_transaction_pool::uxt; - #[test] fn fatp_prio_ready_higher_evicts_lower() { sp_tracing::try_init_simple(); @@ -247,3 +249,312 @@ fn fatp_prio_watcher_future_lower_prio_gets_dropped_from_all_views() { assert_ready_iterator!(header01.hash(), pool, [xt2, xt1]); assert_ready_iterator!(header02.hash(), pool, [xt2, xt1]); } + +#[test] +fn fatp_prios_watcher_full_mempool_higher_prio_is_accepted() { + sp_tracing::try_init_simple(); + + let builder = TestPoolBuilder::new(); + let (pool, api, _) = builder.with_mempool_count_limit(4).with_ready_count(2).build(); + api.set_nonce(api.genesis_hash(), Bob.into(), 300); + api.set_nonce(api.genesis_hash(), Charlie.into(), 400); + api.set_nonce(api.genesis_hash(), Dave.into(), 500); + api.set_nonce(api.genesis_hash(), Eve.into(), 600); + api.set_nonce(api.genesis_hash(), Ferdie.into(), 700); + + let header01 = api.push_block(1, vec![], true); + let event = new_best_block_event(&pool, None, header01.hash()); + block_on(pool.maintain(event)); + + let xt0 = uxt(Alice, 200); + let xt1 = uxt(Bob, 300); + let xt2 = uxt(Charlie, 400); + + let xt3 = uxt(Dave, 500); + + let xt4 = uxt(Eve, 600); + let xt5 = uxt(Ferdie, 700); + + api.set_priority(&xt0, 1); + api.set_priority(&xt1, 2); + api.set_priority(&xt2, 3); + api.set_priority(&xt3, 4); + + api.set_priority(&xt4, 5); + api.set_priority(&xt5, 6); + + let xt0_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt0.clone())).unwrap(); + let xt1_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt1.clone())).unwrap(); + + assert_pool_status!(header01.hash(), &pool, 2, 0); + assert_eq!(pool.mempool_len().1, 2); + + let header02 = api.push_block_with_parent(header01.hash(), vec![], true); + block_on(pool.maintain(new_best_block_event(&pool, Some(header01.hash()), header02.hash()))); + + let _xt2_watcher = + block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt2.clone())).unwrap(); + let _xt3_watcher = + block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt3.clone())).unwrap(); + + assert_pool_status!(header02.hash(), &pool, 2, 0); + assert_eq!(pool.mempool_len().1, 4); + + let header03 = api.push_block_with_parent(header02.hash(), vec![], true); + block_on(pool.maintain(new_best_block_event(&pool, Some(header02.hash()), header03.hash()))); + + let _xt4_watcher = + block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt4.clone())).unwrap(); + let _xt5_watcher = + block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt5.clone())).unwrap(); + + assert_pool_status!(header03.hash(), &pool, 2, 0); + assert_eq!(pool.mempool_len().1, 4); + + assert_watcher_stream!(xt0_watcher, [TransactionStatus::Ready, TransactionStatus::Dropped]); + assert_watcher_stream!(xt1_watcher, [TransactionStatus::Ready, TransactionStatus::Dropped]); + + assert_ready_iterator!(header01.hash(), pool, []); + assert_ready_iterator!(header02.hash(), pool, [xt3, xt2]); + assert_ready_iterator!(header03.hash(), pool, [xt5, xt4]); +} + +#[test] +fn fatp_prios_watcher_full_mempool_higher_prio_is_accepted_with_subtree() { + sp_tracing::try_init_simple(); + + let builder = TestPoolBuilder::new(); + let (pool, api, _) = builder.with_mempool_count_limit(4).with_ready_count(4).build(); + api.set_nonce(api.genesis_hash(), Bob.into(), 300); + api.set_nonce(api.genesis_hash(), Charlie.into(), 400); + + let header01 = api.push_block(1, vec![], true); + let event = new_best_block_event(&pool, None, header01.hash()); + block_on(pool.maintain(event)); + + let xt0 = uxt(Alice, 200); + let xt1 = uxt(Alice, 201); + let xt2 = uxt(Alice, 202); + let xt3 = uxt(Bob, 300); + let xt4 = uxt(Charlie, 400); + + api.set_priority(&xt0, 1); + api.set_priority(&xt1, 3); + api.set_priority(&xt2, 3); + api.set_priority(&xt3, 2); + api.set_priority(&xt4, 2); + + let xt0_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt0.clone())).unwrap(); + let xt1_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt1.clone())).unwrap(); + let xt2_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt2.clone())).unwrap(); + let xt3_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt3.clone())).unwrap(); + + assert_ready_iterator!(header01.hash(), pool, [xt3, xt0, xt1, xt2]); + assert_pool_status!(header01.hash(), &pool, 4, 0); + assert_eq!(pool.mempool_len().1, 4); + + let xt4_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt4.clone())).unwrap(); + assert_pool_status!(header01.hash(), &pool, 2, 0); + assert_ready_iterator!(header01.hash(), pool, [xt3, xt4]); + + assert_watcher_stream!(xt0_watcher, [TransactionStatus::Ready, TransactionStatus::Dropped]); + assert_watcher_stream!(xt1_watcher, [TransactionStatus::Ready, TransactionStatus::Dropped]); + assert_watcher_stream!(xt2_watcher, [TransactionStatus::Ready, TransactionStatus::Dropped]); + assert_watcher_stream!(xt3_watcher, [TransactionStatus::Ready]); + assert_watcher_stream!(xt4_watcher, [TransactionStatus::Ready]); +} + +#[test] +fn fatp_prios_watcher_full_mempool_higher_prio_is_accepted_with_subtree2() { + sp_tracing::try_init_simple(); + + let builder = TestPoolBuilder::new(); + let (pool, api, _) = builder.with_mempool_count_limit(4).with_ready_count(4).build(); + api.set_nonce(api.genesis_hash(), Bob.into(), 300); + api.set_nonce(api.genesis_hash(), Charlie.into(), 400); + + let header01 = api.push_block(1, vec![], true); + let event = new_best_block_event(&pool, None, header01.hash()); + block_on(pool.maintain(event)); + + let xt0 = uxt(Alice, 200); + let xt1 = uxt(Alice, 201); + let xt2 = uxt(Alice, 202); + let xt3 = uxt(Bob, 300); + let xt4 = uxt(Charlie, 400); + + api.set_priority(&xt0, 1); + api.set_priority(&xt1, 3); + api.set_priority(&xt2, 3); + api.set_priority(&xt3, 2); + api.set_priority(&xt4, 2); + + let xt0_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt0.clone())).unwrap(); + let xt1_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt1.clone())).unwrap(); + let xt2_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt2.clone())).unwrap(); + let xt3_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt3.clone())).unwrap(); + + assert_ready_iterator!(header01.hash(), pool, [xt3, xt0, xt1, xt2]); + assert_pool_status!(header01.hash(), &pool, 4, 0); + assert_eq!(pool.mempool_len().1, 4); + + let header02 = api.push_block_with_parent(header01.hash(), vec![], true); + block_on(pool.maintain(new_best_block_event(&pool, Some(header01.hash()), header02.hash()))); + + let xt4_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt4.clone())).unwrap(); + assert_ready_iterator!(header01.hash(), pool, [xt3]); + assert_pool_status!(header02.hash(), &pool, 2, 0); + assert_ready_iterator!(header02.hash(), pool, [xt3, xt4]); + + assert_watcher_stream!(xt0_watcher, [TransactionStatus::Ready, TransactionStatus::Dropped]); + assert_watcher_stream!(xt1_watcher, [TransactionStatus::Ready, TransactionStatus::Dropped]); + assert_watcher_stream!(xt2_watcher, [TransactionStatus::Ready, TransactionStatus::Dropped]); + assert_watcher_stream!(xt3_watcher, [TransactionStatus::Ready]); + assert_watcher_stream!(xt4_watcher, [TransactionStatus::Ready]); +} + +#[test] +fn fatp_prios_watcher_full_mempool_lower_prio_gets_rejected() { + sp_tracing::try_init_simple(); + + let builder = TestPoolBuilder::new(); + let (pool, api, _) = builder.with_mempool_count_limit(2).with_ready_count(2).build(); + api.set_nonce(api.genesis_hash(), Bob.into(), 300); + api.set_nonce(api.genesis_hash(), Charlie.into(), 400); + api.set_nonce(api.genesis_hash(), Dave.into(), 500); + + let header01 = api.push_block(1, vec![], true); + let event = new_best_block_event(&pool, None, header01.hash()); + block_on(pool.maintain(event)); + + let xt0 = uxt(Alice, 200); + let xt1 = uxt(Bob, 300); + let xt2 = uxt(Charlie, 400); + let xt3 = uxt(Dave, 500); + + api.set_priority(&xt0, 2); + api.set_priority(&xt1, 2); + api.set_priority(&xt2, 2); + api.set_priority(&xt3, 1); + + let _xt0_watcher = + block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt0.clone())).unwrap(); + let _xt1_watcher = + block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt1.clone())).unwrap(); + + assert_pool_status!(header01.hash(), &pool, 2, 0); + assert_eq!(pool.mempool_len().1, 2); + + let header02 = api.push_block_with_parent(header01.hash(), vec![], true); + block_on(pool.maintain(new_best_block_event(&pool, Some(header01.hash()), header02.hash()))); + + assert_pool_status!(header02.hash(), &pool, 2, 0); + assert_eq!(pool.mempool_len().1, 2); + + assert_ready_iterator!(header01.hash(), pool, [xt0, xt1]); + assert_ready_iterator!(header02.hash(), pool, [xt0, xt1]); + + let result2 = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt2.clone())).map(|_| ()); + assert!(matches!(result2.as_ref().unwrap_err().0, TxPoolError::ImmediatelyDropped)); + let result3 = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt3.clone())).map(|_| ()); + assert!(matches!(result3.as_ref().unwrap_err().0, TxPoolError::ImmediatelyDropped)); +} + +#[test] +fn fatp_prios_watcher_full_mempool_does_not_keep_dropped_transaction() { + sp_tracing::try_init_simple(); + + let builder = TestPoolBuilder::new(); + let (pool, api, _) = builder.with_mempool_count_limit(4).with_ready_count(2).build(); + api.set_nonce(api.genesis_hash(), Bob.into(), 300); + api.set_nonce(api.genesis_hash(), Charlie.into(), 400); + api.set_nonce(api.genesis_hash(), Dave.into(), 500); + + let header01 = api.push_block(1, vec![], true); + let event = new_best_block_event(&pool, None, header01.hash()); + block_on(pool.maintain(event)); + + let xt0 = uxt(Alice, 200); + let xt1 = uxt(Bob, 300); + let xt2 = uxt(Charlie, 400); + let xt3 = uxt(Dave, 500); + + api.set_priority(&xt0, 2); + api.set_priority(&xt1, 2); + api.set_priority(&xt2, 2); + api.set_priority(&xt3, 2); + + let xt0_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt0.clone())).unwrap(); + let xt1_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt1.clone())).unwrap(); + let xt2_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt2.clone())).unwrap(); + let xt3_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt3.clone())).unwrap(); + + assert_pool_status!(header01.hash(), &pool, 2, 0); + assert_ready_iterator!(header01.hash(), pool, [xt2, xt3]); + + assert_watcher_stream!(xt0_watcher, [TransactionStatus::Ready, TransactionStatus::Dropped]); + assert_watcher_stream!(xt1_watcher, [TransactionStatus::Ready, TransactionStatus::Dropped]); + assert_watcher_stream!(xt2_watcher, [TransactionStatus::Ready]); + assert_watcher_stream!(xt3_watcher, [TransactionStatus::Ready]); +} + +#[test] +fn fatp_prios_submit_local_full_mempool_higher_prio_is_accepted() { + sp_tracing::try_init_simple(); + + let builder = TestPoolBuilder::new(); + let (pool, api, _) = builder.with_mempool_count_limit(4).with_ready_count(2).build(); + api.set_nonce(api.genesis_hash(), Bob.into(), 300); + api.set_nonce(api.genesis_hash(), Charlie.into(), 400); + api.set_nonce(api.genesis_hash(), Dave.into(), 500); + api.set_nonce(api.genesis_hash(), Eve.into(), 600); + api.set_nonce(api.genesis_hash(), Ferdie.into(), 700); + + let header01 = api.push_block(1, vec![], true); + let event = new_best_block_event(&pool, None, header01.hash()); + block_on(pool.maintain(event)); + + let xt0 = uxt(Alice, 200); + let xt1 = uxt(Bob, 300); + let xt2 = uxt(Charlie, 400); + + let xt3 = uxt(Dave, 500); + + let xt4 = uxt(Eve, 600); + let xt5 = uxt(Ferdie, 700); + + api.set_priority(&xt0, 1); + api.set_priority(&xt1, 2); + api.set_priority(&xt2, 3); + api.set_priority(&xt3, 4); + + api.set_priority(&xt4, 5); + api.set_priority(&xt5, 6); + pool.submit_local(invalid_hash(), xt0.clone()).unwrap(); + pool.submit_local(invalid_hash(), xt1.clone()).unwrap(); + + assert_pool_status!(header01.hash(), &pool, 2, 0); + assert_eq!(pool.mempool_len().0, 2); + + let header02 = api.push_block_with_parent(header01.hash(), vec![], true); + block_on(pool.maintain(new_best_block_event(&pool, Some(header01.hash()), header02.hash()))); + + pool.submit_local(invalid_hash(), xt2.clone()).unwrap(); + pool.submit_local(invalid_hash(), xt3.clone()).unwrap(); + + assert_pool_status!(header02.hash(), &pool, 2, 0); + assert_eq!(pool.mempool_len().0, 4); + + let header03 = api.push_block_with_parent(header02.hash(), vec![], true); + block_on(pool.maintain(new_best_block_event(&pool, Some(header02.hash()), header03.hash()))); + + pool.submit_local(invalid_hash(), xt4.clone()).unwrap(); + pool.submit_local(invalid_hash(), xt5.clone()).unwrap(); + + assert_pool_status!(header03.hash(), &pool, 2, 0); + assert_eq!(pool.mempool_len().0, 4); + + assert_ready_iterator!(header01.hash(), pool, []); + assert_ready_iterator!(header02.hash(), pool, [xt3, xt2]); + assert_ready_iterator!(header03.hash(), pool, [xt5, xt4]); +} diff --git a/substrate/client/transaction-pool/tests/pool.rs b/substrate/client/transaction-pool/tests/pool.rs index 20997606c607..c70f45483314 100644 --- a/substrate/client/transaction-pool/tests/pool.rs +++ b/substrate/client/transaction-pool/tests/pool.rs @@ -49,7 +49,7 @@ const LOG_TARGET: &str = "txpool"; fn pool() -> (Pool, Arc) { let api = Arc::new(TestApi::with_alice_nonce(209)); - (Pool::new(Default::default(), true.into(), api.clone()), api) + (Pool::new_with_staticly_sized_rotator(Default::default(), true.into(), api.clone()), api) } fn maintained_pool() -> (BasicPool, Arc, futures::executor::ThreadPool) { @@ -158,6 +158,7 @@ fn prune_tags_should_work() { let (pool, api) = pool(); let hash209 = block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt(Alice, 209).into())) + .map(|o| o.hash()) .unwrap(); block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt(Alice, 210).into())) .unwrap(); @@ -184,10 +185,13 @@ fn prune_tags_should_work() { fn should_ban_invalid_transactions() { let (pool, api) = pool(); let uxt = Arc::from(uxt(Alice, 209)); - let hash = - block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt.clone())).unwrap(); + let hash = block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt.clone())) + .unwrap() + .hash(); pool.validated_pool().remove_invalid(&[hash]); - block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt.clone())).unwrap_err(); + block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt.clone())) + .map(|_| ()) + .unwrap_err(); // when let pending: Vec<_> = pool @@ -198,7 +202,9 @@ fn should_ban_invalid_transactions() { assert_eq!(pending, Vec::::new()); // then - block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt.clone())).unwrap_err(); + block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt.clone())) + .map(|_| ()) + .unwrap_err(); } #[test] @@ -224,7 +230,7 @@ fn should_correctly_prune_transactions_providing_more_than_one_tag() { api.set_valid_modifier(Box::new(|v: &mut ValidTransaction| { v.provides.push(vec![155]); })); - let pool = Pool::new(Default::default(), true.into(), api.clone()); + let pool = Pool::new_with_staticly_sized_rotator(Default::default(), true.into(), api.clone()); let xt0 = Arc::from(uxt(Alice, 209)); block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, xt0.clone())) .expect("1. Imported"); diff --git a/substrate/frame/examples/multi-block-migrations/src/mock.rs b/substrate/frame/examples/multi-block-migrations/src/mock.rs index b2a946e1c505..64940db080c4 100644 --- a/substrate/frame/examples/multi-block-migrations/src/mock.rs +++ b/substrate/frame/examples/multi-block-migrations/src/mock.rs @@ -25,10 +25,7 @@ //! using the [`Migrations`] type. use frame_support::{ - construct_runtime, derive_impl, - migrations::MultiStepMigrator, - pallet_prelude::Weight, - traits::{OnFinalize, OnInitialize}, + construct_runtime, derive_impl, migrations::MultiStepMigrator, pallet_prelude::Weight, }; type Block = frame_system::mocking::MockBlock; @@ -81,13 +78,11 @@ pub fn new_test_ext() -> sp_io::TestExternalities { #[allow(dead_code)] pub fn run_to_block(n: u64) { - assert!(System::block_number() < n); - while System::block_number() < n { - let b = System::block_number(); - AllPalletsWithSystem::on_finalize(b); - // Done by Executive: - ::MultiBlockMigrator::step(); - System::set_block_number(b + 1); - AllPalletsWithSystem::on_initialize(b + 1); - } + System::run_to_block_with::( + n, + frame_system::RunToBlockHooks::default().after_initialize(|_| { + // Done by Executive: + ::MultiBlockMigrator::step(); + }), + ); } diff --git a/substrate/frame/fast-unstake/src/mock.rs b/substrate/frame/fast-unstake/src/mock.rs index 757052e230a1..f044fc610187 100644 --- a/substrate/frame/fast-unstake/src/mock.rs +++ b/substrate/frame/fast-unstake/src/mock.rs @@ -266,22 +266,19 @@ impl ExtBuilder { } pub(crate) fn run_to_block(n: u64, on_idle: bool) { - let current_block = System::block_number(); - assert!(n > current_block); - while System::block_number() < n { - Balances::on_finalize(System::block_number()); - Staking::on_finalize(System::block_number()); - FastUnstake::on_finalize(System::block_number()); - - System::set_block_number(System::block_number() + 1); - - Balances::on_initialize(System::block_number()); - Staking::on_initialize(System::block_number()); - FastUnstake::on_initialize(System::block_number()); - if on_idle { - FastUnstake::on_idle(System::block_number(), BlockWeights::get().max_block); - } - } + System::run_to_block_with::( + n, + frame_system::RunToBlockHooks::default() + .before_finalize(|_| { + // Satisfy the timestamp pallet. + Timestamp::set_timestamp(0); + }) + .after_initialize(|bn| { + if on_idle { + FastUnstake::on_idle(bn, BlockWeights::get().max_block); + } + }), + ); } pub(crate) fn next_block(on_idle: bool) { diff --git a/substrate/frame/grandpa/src/benchmarking.rs b/substrate/frame/grandpa/src/benchmarking.rs index 0a10e5882776..56048efa22ca 100644 --- a/substrate/frame/grandpa/src/benchmarking.rs +++ b/substrate/frame/grandpa/src/benchmarking.rs @@ -17,7 +17,7 @@ //! Benchmarks for the GRANDPA pallet. -use super::{Pallet as Grandpa, *}; +use super::*; use frame_benchmarking::v2::*; use frame_system::RawOrigin; use sp_core::H256; @@ -69,7 +69,7 @@ mod benchmarks { #[extrinsic_call] _(RawOrigin::Root, delay, best_finalized_block_number); - assert!(Grandpa::::stalled().is_some()); + assert!(Stalled::::get().is_some()); } impl_benchmark_test_suite!( diff --git a/substrate/frame/grandpa/src/equivocation.rs b/substrate/frame/grandpa/src/equivocation.rs index 2366c957e9ab..4ebdbc1eecd3 100644 --- a/substrate/frame/grandpa/src/equivocation.rs +++ b/substrate/frame/grandpa/src/equivocation.rs @@ -177,7 +177,7 @@ where evidence: (EquivocationProof>, T::KeyOwnerProof), ) -> Result<(), DispatchError> { let (equivocation_proof, key_owner_proof) = evidence; - let reporter = reporter.or_else(|| >::author()); + let reporter = reporter.or_else(|| pallet_authorship::Pallet::::author()); let offender = equivocation_proof.offender().clone(); // We check the equivocation within the context of its set id (and diff --git a/substrate/frame/grandpa/src/lib.rs b/substrate/frame/grandpa/src/lib.rs index 4f69aeaef523..9017eec2ca8f 100644 --- a/substrate/frame/grandpa/src/lib.rs +++ b/substrate/frame/grandpa/src/lib.rs @@ -127,7 +127,7 @@ pub mod pallet { impl Hooks> for Pallet { fn on_finalize(block_number: BlockNumberFor) { // check for scheduled pending authority set changes - if let Some(pending_change) = >::get() { + if let Some(pending_change) = PendingChange::::get() { // emit signal if we're at the block that scheduled the change if block_number == pending_change.scheduled_at { let next_authorities = pending_change.next_authorities.to_vec(); @@ -150,12 +150,12 @@ pub mod pallet { Self::deposit_event(Event::NewAuthorities { authority_set: pending_change.next_authorities.into_inner(), }); - >::kill(); + PendingChange::::kill(); } } // check for scheduled pending state changes - match >::get() { + match State::::get() { StoredState::PendingPause { scheduled_at, delay } => { // signal change to pause if block_number == scheduled_at { @@ -164,7 +164,7 @@ pub mod pallet { // enact change to paused state if block_number == scheduled_at + delay { - >::put(StoredState::Paused); + State::::put(StoredState::Paused); Self::deposit_event(Event::Paused); } }, @@ -176,7 +176,7 @@ pub mod pallet { // enact change to live state if block_number == scheduled_at + delay { - >::put(StoredState::Live); + State::::put(StoredState::Live); Self::deposit_event(Event::Resumed); } }, @@ -297,37 +297,32 @@ pub mod pallet { } #[pallet::type_value] - pub(super) fn DefaultForState() -> StoredState> { + pub fn DefaultForState() -> StoredState> { StoredState::Live } /// State of the current authority set. #[pallet::storage] - #[pallet::getter(fn state)] - pub(super) type State = + pub type State = StorageValue<_, StoredState>, ValueQuery, DefaultForState>; /// Pending change: (signaled at, scheduled change). #[pallet::storage] - #[pallet::getter(fn pending_change)] - pub(super) type PendingChange = + pub type PendingChange = StorageValue<_, StoredPendingChange, T::MaxAuthorities>>; /// next block number where we can force a change. #[pallet::storage] - #[pallet::getter(fn next_forced)] - pub(super) type NextForced = StorageValue<_, BlockNumberFor>; + pub type NextForced = StorageValue<_, BlockNumberFor>; /// `true` if we are currently stalled. #[pallet::storage] - #[pallet::getter(fn stalled)] - pub(super) type Stalled = StorageValue<_, (BlockNumberFor, BlockNumberFor)>; + pub type Stalled = StorageValue<_, (BlockNumberFor, BlockNumberFor)>; /// The number of changes (both in terms of keys and underlying economic responsibilities) /// in the "set" of Grandpa validators from genesis. #[pallet::storage] - #[pallet::getter(fn current_set_id)] - pub(super) type CurrentSetId = StorageValue<_, SetId, ValueQuery>; + pub type CurrentSetId = StorageValue<_, SetId, ValueQuery>; /// A mapping from grandpa set ID to the index of the *most recent* session for which its /// members were responsible. @@ -340,12 +335,11 @@ pub mod pallet { /// /// TWOX-NOTE: `SetId` is not under user control. #[pallet::storage] - #[pallet::getter(fn session_for_set)] - pub(super) type SetIdSession = StorageMap<_, Twox64Concat, SetId, SessionIndex>; + pub type SetIdSession = StorageMap<_, Twox64Concat, SetId, SessionIndex>; /// The current list of authorities. #[pallet::storage] - pub(crate) type Authorities = + pub type Authorities = StorageValue<_, BoundedAuthorityList, ValueQuery>; #[derive(frame_support::DefaultNoBound)] @@ -432,6 +426,44 @@ pub enum StoredState { } impl Pallet { + /// State of the current authority set. + pub fn state() -> StoredState> { + State::::get() + } + + /// Pending change: (signaled at, scheduled change). + pub fn pending_change() -> Option, T::MaxAuthorities>> { + PendingChange::::get() + } + + /// next block number where we can force a change. + pub fn next_forced() -> Option> { + NextForced::::get() + } + + /// `true` if we are currently stalled. + pub fn stalled() -> Option<(BlockNumberFor, BlockNumberFor)> { + Stalled::::get() + } + + /// The number of changes (both in terms of keys and underlying economic responsibilities) + /// in the "set" of Grandpa validators from genesis. + pub fn current_set_id() -> SetId { + CurrentSetId::::get() + } + + /// A mapping from grandpa set ID to the index of the *most recent* session for which its + /// members were responsible. + /// + /// This is only used for validating equivocation proofs. An equivocation proof must + /// contains a key-ownership proof for a given session, therefore we need a way to tie + /// together sessions and GRANDPA set ids, i.e. we need to validate that a validator + /// was the owner of a given key on a given session, and what the active set ID was + /// during that session. + pub fn session_for_set(set_id: SetId) -> Option { + SetIdSession::::get(set_id) + } + /// Get the current set of authorities, along with their respective weights. pub fn grandpa_authorities() -> AuthorityList { Authorities::::get().into_inner() @@ -440,9 +472,9 @@ impl Pallet { /// Schedule GRANDPA to pause starting in the given number of blocks. /// Cannot be done when already paused. pub fn schedule_pause(in_blocks: BlockNumberFor) -> DispatchResult { - if let StoredState::Live = >::get() { - let scheduled_at = >::block_number(); - >::put(StoredState::PendingPause { delay: in_blocks, scheduled_at }); + if let StoredState::Live = State::::get() { + let scheduled_at = frame_system::Pallet::::block_number(); + State::::put(StoredState::PendingPause { delay: in_blocks, scheduled_at }); Ok(()) } else { @@ -452,9 +484,9 @@ impl Pallet { /// Schedule a resume of GRANDPA after pausing. pub fn schedule_resume(in_blocks: BlockNumberFor) -> DispatchResult { - if let StoredState::Paused = >::get() { - let scheduled_at = >::block_number(); - >::put(StoredState::PendingResume { delay: in_blocks, scheduled_at }); + if let StoredState::Paused = State::::get() { + let scheduled_at = frame_system::Pallet::::block_number(); + State::::put(StoredState::PendingResume { delay: in_blocks, scheduled_at }); Ok(()) } else { @@ -481,17 +513,17 @@ impl Pallet { in_blocks: BlockNumberFor, forced: Option>, ) -> DispatchResult { - if !>::exists() { - let scheduled_at = >::block_number(); + if !PendingChange::::exists() { + let scheduled_at = frame_system::Pallet::::block_number(); if forced.is_some() { - if Self::next_forced().map_or(false, |next| next > scheduled_at) { + if NextForced::::get().map_or(false, |next| next > scheduled_at) { return Err(Error::::TooSoon.into()) } // only allow the next forced change when twice the window has passed since // this one. - >::put(scheduled_at + in_blocks * 2u32.into()); + NextForced::::put(scheduled_at + in_blocks * 2u32.into()); } let next_authorities = WeakBoundedVec::<_, T::MaxAuthorities>::force_from( @@ -502,7 +534,7 @@ impl Pallet { ), ); - >::put(StoredPendingChange { + PendingChange::::put(StoredPendingChange { delay: in_blocks, scheduled_at, next_authorities, @@ -518,7 +550,7 @@ impl Pallet { /// Deposit one of this module's logs. fn deposit_log(log: ConsensusLog>) { let log = DigestItem::Consensus(GRANDPA_ENGINE_ID, log.encode()); - >::deposit_log(log); + frame_system::Pallet::::deposit_log(log); } // Perform module initialization, abstracted so that it can be called either through genesis @@ -554,7 +586,7 @@ impl Pallet { // when we record old authority sets we could try to figure out _who_ // failed. until then, we can't meaningfully guard against // `next == last` the way that normal session changes do. - >::put((further_wait, median)); + Stalled::::put((further_wait, median)); } } @@ -583,10 +615,10 @@ where // Always issue a change if `session` says that the validators have changed. // Even if their session keys are the same as before, the underlying economic // identities have changed. - let current_set_id = if changed || >::exists() { + let current_set_id = if changed || Stalled::::exists() { let next_authorities = validators.map(|(_, k)| (k, 1)).collect::>(); - let res = if let Some((further_wait, median)) = >::take() { + let res = if let Some((further_wait, median)) = Stalled::::take() { Self::schedule_change(next_authorities, further_wait, Some(median)) } else { Self::schedule_change(next_authorities, Zero::zero(), None) @@ -608,17 +640,17 @@ where // either the session module signalled that the validators have changed // or the set was stalled. but since we didn't successfully schedule // an authority set change we do not increment the set id. - Self::current_set_id() + CurrentSetId::::get() } } else { // nothing's changed, neither economic conditions nor session keys. update the pointer // of the current set. - Self::current_set_id() + CurrentSetId::::get() }; // update the mapping to note that the current set corresponds to the // latest equivalent session (i.e. now). - let session_index = >::current_index(); + let session_index = pallet_session::Pallet::::current_index(); SetIdSession::::insert(current_set_id, &session_index); } diff --git a/substrate/frame/grandpa/src/tests.rs b/substrate/frame/grandpa/src/tests.rs index 383f77f00de7..f4720966b179 100644 --- a/substrate/frame/grandpa/src/tests.rs +++ b/substrate/frame/grandpa/src/tests.rs @@ -110,7 +110,7 @@ fn cannot_schedule_change_when_one_pending() { new_test_ext(vec![(1, 1), (2, 1), (3, 1)]).execute_with(|| { initialize_block(1, Default::default()); Grandpa::schedule_change(to_authorities(vec![(4, 1), (5, 1), (6, 1)]), 1, None).unwrap(); - assert!(>::exists()); + assert!(PendingChange::::exists()); assert_noop!( Grandpa::schedule_change(to_authorities(vec![(5, 1)]), 1, None), Error::::ChangePending @@ -120,7 +120,7 @@ fn cannot_schedule_change_when_one_pending() { let header = System::finalize(); initialize_block(2, header.hash()); - assert!(>::exists()); + assert!(PendingChange::::exists()); assert_noop!( Grandpa::schedule_change(to_authorities(vec![(5, 1)]), 1, None), Error::::ChangePending @@ -130,7 +130,7 @@ fn cannot_schedule_change_when_one_pending() { let header = System::finalize(); initialize_block(3, header.hash()); - assert!(!>::exists()); + assert!(!PendingChange::::exists()); assert_ok!(Grandpa::schedule_change(to_authorities(vec![(5, 1)]), 1, None)); Grandpa::on_finalize(3); @@ -144,7 +144,7 @@ fn dispatch_forced_change() { initialize_block(1, Default::default()); Grandpa::schedule_change(to_authorities(vec![(4, 1), (5, 1), (6, 1)]), 5, Some(0)).unwrap(); - assert!(>::exists()); + assert!(PendingChange::::exists()); assert_noop!( Grandpa::schedule_change(to_authorities(vec![(5, 1)]), 1, Some(0)), Error::::ChangePending @@ -155,8 +155,8 @@ fn dispatch_forced_change() { for i in 2..7 { initialize_block(i, header.hash()); - assert!(>::get().unwrap().forced.is_some()); - assert_eq!(Grandpa::next_forced(), Some(11)); + assert!(PendingChange::::get().unwrap().forced.is_some()); + assert_eq!(NextForced::::get(), Some(11)); assert_noop!( Grandpa::schedule_change(to_authorities(vec![(5, 1)]), 1, None), Error::::ChangePending @@ -174,7 +174,7 @@ fn dispatch_forced_change() { // add a normal change. { initialize_block(7, header.hash()); - assert!(!>::exists()); + assert!(!PendingChange::::exists()); assert_eq!( Grandpa::grandpa_authorities(), to_authorities(vec![(4, 1), (5, 1), (6, 1)]) @@ -187,7 +187,7 @@ fn dispatch_forced_change() { // run the normal change. { initialize_block(8, header.hash()); - assert!(>::exists()); + assert!(PendingChange::::exists()); assert_eq!( Grandpa::grandpa_authorities(), to_authorities(vec![(4, 1), (5, 1), (6, 1)]) @@ -204,9 +204,9 @@ fn dispatch_forced_change() { // time. for i in 9..11 { initialize_block(i, header.hash()); - assert!(!>::exists()); + assert!(!PendingChange::::exists()); assert_eq!(Grandpa::grandpa_authorities(), to_authorities(vec![(5, 1)])); - assert_eq!(Grandpa::next_forced(), Some(11)); + assert_eq!(NextForced::::get(), Some(11)); assert_noop!( Grandpa::schedule_change(to_authorities(vec![(5, 1), (6, 1)]), 5, Some(0)), Error::::TooSoon @@ -217,13 +217,13 @@ fn dispatch_forced_change() { { initialize_block(11, header.hash()); - assert!(!>::exists()); + assert!(!PendingChange::::exists()); assert_ok!(Grandpa::schedule_change( to_authorities(vec![(5, 1), (6, 1), (7, 1)]), 5, Some(0) )); - assert_eq!(Grandpa::next_forced(), Some(21)); + assert_eq!(NextForced::::get(), Some(21)); Grandpa::on_finalize(11); header = System::finalize(); } @@ -239,7 +239,10 @@ fn schedule_pause_only_when_live() { Grandpa::schedule_pause(1).unwrap(); // we've switched to the pending pause state - assert_eq!(Grandpa::state(), StoredState::PendingPause { scheduled_at: 1u64, delay: 1 }); + assert_eq!( + State::::get(), + StoredState::PendingPause { scheduled_at: 1u64, delay: 1 } + ); Grandpa::on_finalize(1); let _ = System::finalize(); @@ -253,7 +256,7 @@ fn schedule_pause_only_when_live() { let _ = System::finalize(); // after finalizing block 2 the set should have switched to paused state - assert_eq!(Grandpa::state(), StoredState::Paused); + assert_eq!(State::::get(), StoredState::Paused); }); } @@ -265,14 +268,14 @@ fn schedule_resume_only_when_paused() { // the set is currently live, resuming it is an error assert_noop!(Grandpa::schedule_resume(1), Error::::ResumeFailed); - assert_eq!(Grandpa::state(), StoredState::Live); + assert_eq!(State::::get(), StoredState::Live); // we schedule a pause to be applied instantly Grandpa::schedule_pause(0).unwrap(); Grandpa::on_finalize(1); let _ = System::finalize(); - assert_eq!(Grandpa::state(), StoredState::Paused); + assert_eq!(State::::get(), StoredState::Paused); // we schedule the set to go back live in 2 blocks initialize_block(2, Default::default()); @@ -289,7 +292,7 @@ fn schedule_resume_only_when_paused() { let _ = System::finalize(); // it should be live at block 4 - assert_eq!(Grandpa::state(), StoredState::Live); + assert_eq!(State::::get(), StoredState::Live); }); } @@ -342,7 +345,7 @@ fn report_equivocation_current_set_works() { let equivocation_key = &authorities[equivocation_authority_index].0; let equivocation_keyring = extract_keyring(equivocation_key); - let set_id = Grandpa::current_set_id(); + let set_id = CurrentSetId::::get(); // generate an equivocation proof, with two votes in the same round for // different block hashes signed by the same key @@ -424,7 +427,7 @@ fn report_equivocation_old_set_works() { let equivocation_keyring = extract_keyring(equivocation_key); - let set_id = Grandpa::current_set_id(); + let set_id = CurrentSetId::::get(); // generate an equivocation proof for the old set, let equivocation_proof = generate_equivocation_proof( @@ -487,7 +490,7 @@ fn report_equivocation_invalid_set_id() { let key_owner_proof = Historical::prove((sp_consensus_grandpa::KEY_TYPE, &equivocation_key)).unwrap(); - let set_id = Grandpa::current_set_id(); + let set_id = CurrentSetId::::get(); // generate an equivocation for a future set let equivocation_proof = generate_equivocation_proof( @@ -527,7 +530,7 @@ fn report_equivocation_invalid_session() { start_era(2); - let set_id = Grandpa::current_set_id(); + let set_id = CurrentSetId::::get(); // generate an equivocation proof at set id = 2 let equivocation_proof = generate_equivocation_proof( @@ -568,7 +571,7 @@ fn report_equivocation_invalid_key_owner_proof() { let equivocation_key = &authorities[equivocation_authority_index].0; let equivocation_keyring = extract_keyring(equivocation_key); - let set_id = Grandpa::current_set_id(); + let set_id = CurrentSetId::::get(); // generate an equivocation proof for the authority at index 0 let equivocation_proof = generate_equivocation_proof( @@ -611,7 +614,7 @@ fn report_equivocation_invalid_equivocation_proof() { let key_owner_proof = Historical::prove((sp_consensus_grandpa::KEY_TYPE, &equivocation_key)).unwrap(); - let set_id = Grandpa::current_set_id(); + let set_id = CurrentSetId::::get(); let assert_invalid_equivocation_proof = |equivocation_proof| { assert_err!( @@ -675,7 +678,7 @@ fn report_equivocation_validate_unsigned_prevents_duplicates() { let equivocation_authority_index = 0; let equivocation_key = &authorities[equivocation_authority_index].0; let equivocation_keyring = extract_keyring(equivocation_key); - let set_id = Grandpa::current_set_id(); + let set_id = CurrentSetId::::get(); let equivocation_proof = generate_equivocation_proof( set_id, @@ -748,12 +751,12 @@ fn report_equivocation_validate_unsigned_prevents_duplicates() { #[test] fn on_new_session_doesnt_start_new_set_if_schedule_change_failed() { new_test_ext(vec![(1, 1), (2, 1), (3, 1)]).execute_with(|| { - assert_eq!(Grandpa::current_set_id(), 0); + assert_eq!(CurrentSetId::::get(), 0); // starting a new era should lead to a change in the session // validators and trigger a new set start_era(1); - assert_eq!(Grandpa::current_set_id(), 1); + assert_eq!(CurrentSetId::::get(), 1); // we schedule a change delayed by 2 blocks, this should make it so that // when we try to rotate the session at the beginning of the era we will @@ -761,22 +764,22 @@ fn on_new_session_doesnt_start_new_set_if_schedule_change_failed() { // not increment the set id. Grandpa::schedule_change(to_authorities(vec![(1, 1)]), 2, None).unwrap(); start_era(2); - assert_eq!(Grandpa::current_set_id(), 1); + assert_eq!(CurrentSetId::::get(), 1); // everything should go back to normal after. start_era(3); - assert_eq!(Grandpa::current_set_id(), 2); + assert_eq!(CurrentSetId::::get(), 2); // session rotation might also fail to schedule a change if it's for a // forced change (i.e. grandpa is stalled) and it is too soon. - >::put(1000); - >::put((30, 1)); + NextForced::::put(1000); + Stalled::::put((30, 1)); // NOTE: we cannot go through normal era rotation since having `Stalled` // defined will also trigger a new set (regardless of whether the // session validators changed) Grandpa::on_new_session(true, std::iter::empty(), std::iter::empty()); - assert_eq!(Grandpa::current_set_id(), 2); + assert_eq!(CurrentSetId::::get(), 2); }); } @@ -790,19 +793,19 @@ fn cleans_up_old_set_id_session_mappings() { // we should have a session id mapping for all the set ids from // `max_set_id_session_entries` eras we have observed for i in 1..=max_set_id_session_entries { - assert!(Grandpa::session_for_set(i as u64).is_some()); + assert!(SetIdSession::::get(i as u64).is_some()); } start_era(max_set_id_session_entries * 2); // we should keep tracking the new mappings for new eras for i in max_set_id_session_entries + 1..=max_set_id_session_entries * 2 { - assert!(Grandpa::session_for_set(i as u64).is_some()); + assert!(SetIdSession::::get(i as u64).is_some()); } // but the old ones should have been pruned by now for i in 1..=max_set_id_session_entries { - assert!(Grandpa::session_for_set(i as u64).is_none()); + assert!(SetIdSession::::get(i as u64).is_none()); } }); } @@ -812,24 +815,24 @@ fn always_schedules_a_change_on_new_session_when_stalled() { new_test_ext(vec![(1, 1), (2, 1), (3, 1)]).execute_with(|| { start_era(1); - assert!(Grandpa::pending_change().is_none()); - assert_eq!(Grandpa::current_set_id(), 1); + assert!(PendingChange::::get().is_none()); + assert_eq!(CurrentSetId::::get(), 1); // if the session handler reports no change then we should not schedule // any pending change Grandpa::on_new_session(false, std::iter::empty(), std::iter::empty()); - assert!(Grandpa::pending_change().is_none()); - assert_eq!(Grandpa::current_set_id(), 1); + assert!(PendingChange::::get().is_none()); + assert_eq!(CurrentSetId::::get(), 1); // if grandpa is stalled then we should **always** schedule a forced // change on a new session - >::put((10, 1)); + Stalled::::put((10, 1)); Grandpa::on_new_session(false, std::iter::empty(), std::iter::empty()); - assert!(Grandpa::pending_change().is_some()); - assert!(Grandpa::pending_change().unwrap().forced.is_some()); - assert_eq!(Grandpa::current_set_id(), 2); + assert!(PendingChange::::get().is_some()); + assert!(PendingChange::::get().unwrap().forced.is_some()); + assert_eq!(CurrentSetId::::get(), 2); }); } @@ -861,7 +864,7 @@ fn valid_equivocation_reports_dont_pay_fees() { let equivocation_key = &Grandpa::grandpa_authorities()[0].0; let equivocation_keyring = extract_keyring(equivocation_key); - let set_id = Grandpa::current_set_id(); + let set_id = CurrentSetId::::get(); // generate an equivocation proof. let equivocation_proof = generate_equivocation_proof( diff --git a/substrate/frame/identity/src/tests.rs b/substrate/frame/identity/src/tests.rs index 7bf5b2a72760..01bc312723aa 100644 --- a/substrate/frame/identity/src/tests.rs +++ b/substrate/frame/identity/src/tests.rs @@ -26,7 +26,7 @@ use crate::{ use codec::{Decode, Encode}; use frame_support::{ assert_err, assert_noop, assert_ok, derive_impl, parameter_types, - traits::{ConstU32, ConstU64, Get, OnFinalize, OnInitialize}, + traits::{ConstU32, ConstU64, Get}, BoundedVec, }; use frame_system::EnsureRoot; @@ -114,18 +114,6 @@ pub fn new_test_ext() -> sp_io::TestExternalities { ext } -fn run_to_block(n: u64) { - while System::block_number() < n { - Identity::on_finalize(System::block_number()); - Balances::on_finalize(System::block_number()); - System::on_finalize(System::block_number()); - System::set_block_number(System::block_number() + 1); - System::on_initialize(System::block_number()); - Balances::on_initialize(System::block_number()); - Identity::on_initialize(System::block_number()); - } -} - fn account(id: u8) -> AccountIdOf { [id; 32].into() } @@ -1714,7 +1702,7 @@ fn unaccepted_usernames_through_grant_should_expire() { Some((who.clone(), expiration, Provider::Allocation)) ); - run_to_block(now + expiration - 1); + System::run_to_block::(now + expiration - 1); // Cannot be removed assert_noop!( @@ -1722,7 +1710,7 @@ fn unaccepted_usernames_through_grant_should_expire() { Error::::NotExpired ); - run_to_block(now + expiration); + System::run_to_block::(now + expiration); // Anyone can remove assert_ok!(Identity::remove_expired_approval( @@ -1782,7 +1770,7 @@ fn unaccepted_usernames_through_deposit_should_expire() { Some((who.clone(), expiration, Provider::AuthorityDeposit(username_deposit))) ); - run_to_block(now + expiration - 1); + System::run_to_block::(now + expiration - 1); // Cannot be removed assert_noop!( @@ -1790,7 +1778,7 @@ fn unaccepted_usernames_through_deposit_should_expire() { Error::::NotExpired ); - run_to_block(now + expiration); + System::run_to_block::(now + expiration); // Anyone can remove assert_eq!( diff --git a/substrate/frame/lottery/src/mock.rs b/substrate/frame/lottery/src/mock.rs index d2c442e2ac6e..b771ed0849f6 100644 --- a/substrate/frame/lottery/src/mock.rs +++ b/substrate/frame/lottery/src/mock.rs @@ -20,10 +20,7 @@ use super::*; use crate as pallet_lottery; -use frame_support::{ - derive_impl, parameter_types, - traits::{ConstU32, OnFinalize, OnInitialize}, -}; +use frame_support::{derive_impl, parameter_types, traits::ConstU32}; use frame_support_test::TestRandomness; use frame_system::EnsureRoot; use sp_runtime::{BuildStorage, Perbill}; @@ -83,16 +80,3 @@ pub fn new_test_ext() -> sp_io::TestExternalities { .unwrap(); t.into() } - -/// Run until a particular block. -pub fn run_to_block(n: u64) { - while System::block_number() < n { - if System::block_number() > 1 { - Lottery::on_finalize(System::block_number()); - System::on_finalize(System::block_number()); - } - System::set_block_number(System::block_number() + 1); - System::on_initialize(System::block_number()); - Lottery::on_initialize(System::block_number()); - } -} diff --git a/substrate/frame/lottery/src/tests.rs b/substrate/frame/lottery/src/tests.rs index ae3a6c858f24..119be5df4925 100644 --- a/substrate/frame/lottery/src/tests.rs +++ b/substrate/frame/lottery/src/tests.rs @@ -17,12 +17,11 @@ //! Tests for the module. -use super::*; -use frame_support::{assert_noop, assert_ok, assert_storage_noop}; -use mock::{ - new_test_ext, run_to_block, Balances, BalancesCall, Lottery, RuntimeCall, RuntimeOrigin, - SystemCall, Test, +use crate::{ + mock::{Lottery, *}, + *, }; +use frame_support::{assert_noop, assert_ok, assert_storage_noop}; use sp_runtime::{traits::BadOrigin, TokenError}; #[test] @@ -74,13 +73,13 @@ fn basic_end_to_end_works() { assert_eq!(TicketsCount::::get(), 4); // Go to end - run_to_block(20); + System::run_to_block::(20); assert_ok!(Lottery::buy_ticket(RuntimeOrigin::signed(5), call.clone())); // Ticket isn't bought assert_eq!(TicketsCount::::get(), 4); // Go to payout - run_to_block(25); + System::run_to_block::(25); // User 1 wins assert_eq!(Balances::free_balance(&1), 70 + 40); // Lottery is reset and restarted @@ -115,11 +114,11 @@ fn stop_repeat_works() { // Lottery still exists. assert!(crate::Lottery::::get().is_some()); // End and pick a winner. - run_to_block(length + delay); + System::run_to_block::(length + delay); // Lottery stays dead and does not repeat. assert!(crate::Lottery::::get().is_none()); - run_to_block(length + delay + 1); + System::run_to_block::(length + delay + 1); assert!(crate::Lottery::::get().is_none()); }); } @@ -281,7 +280,7 @@ fn buy_ticket_works() { assert_ok!(Lottery::start_lottery(RuntimeOrigin::root(), 1, 20, 5, false)); // Go to start, buy ticket for transfer - run_to_block(5); + System::run_to_block::(5); assert_ok!(Lottery::buy_ticket(RuntimeOrigin::signed(1), call)); assert_eq!(TicketsCount::::get(), 1); @@ -300,12 +299,12 @@ fn buy_ticket_works() { assert_eq!(TicketsCount::::get(), 2); // Go to end, can't buy tickets anymore - run_to_block(20); + System::run_to_block::(20); assert_ok!(Lottery::buy_ticket(RuntimeOrigin::signed(2), call.clone())); assert_eq!(TicketsCount::::get(), 2); // Go to payout, can't buy tickets when there is no lottery open - run_to_block(25); + System::run_to_block::(25); assert_ok!(Lottery::buy_ticket(RuntimeOrigin::signed(2), call.clone())); assert_eq!(TicketsCount::::get(), 0); assert_eq!(LotteryIndex::::get(), 1); @@ -409,7 +408,7 @@ fn no_participants_works() { assert_ok!(Lottery::start_lottery(RuntimeOrigin::root(), 10, length, delay, false)); // End the lottery, no one wins. - run_to_block(length + delay); + System::run_to_block::(length + delay); }); } diff --git a/substrate/frame/migrations/src/mock.rs b/substrate/frame/migrations/src/mock.rs index 48ff175f8137..ea86899cad83 100644 --- a/substrate/frame/migrations/src/mock.rs +++ b/substrate/frame/migrations/src/mock.rs @@ -21,12 +21,7 @@ use crate::{mock_helpers::*, Event, Historic}; -use frame_support::{ - derive_impl, - migrations::*, - traits::{OnFinalize, OnInitialize}, - weights::Weight, -}; +use frame_support::{derive_impl, migrations::*, weights::Weight}; use frame_system::EventRecord; use sp_core::H256; @@ -113,18 +108,18 @@ pub fn test_closure(f: impl FnOnce() -> R) -> R { ext.execute_with(f) } -pub fn run_to_block(n: u32) { - while System::block_number() < n as u64 { - log::debug!("Block {}", System::block_number()); - System::set_block_number(System::block_number() + 1); - System::on_initialize(System::block_number()); - Migrations::on_initialize(System::block_number()); - // Executive calls this: - ::step(); - - Migrations::on_finalize(System::block_number()); - System::on_finalize(System::block_number()); - } +pub fn run_to_block(n: u64) { + System::run_to_block_with::( + n, + frame_system::RunToBlockHooks::default() + .before_initialize(|bn| { + log::debug!("Block {bn}"); + }) + .after_initialize(|_| { + // Executive calls this: + ::step(); + }), + ); } /// Returns the historic migrations, sorted by their identifier. diff --git a/substrate/frame/nis/src/mock.rs b/substrate/frame/nis/src/mock.rs index 2b008f8ec2a4..08e69ef0de05 100644 --- a/substrate/frame/nis/src/mock.rs +++ b/substrate/frame/nis/src/mock.rs @@ -21,7 +21,7 @@ use crate::{self as pallet_nis, Perquintill, WithMaximumOf}; use frame_support::{ derive_impl, ord_parameter_types, parameter_types, - traits::{fungible::Inspect, ConstU32, ConstU64, OnFinalize, OnInitialize, StorageMapShim}, + traits::{fungible::Inspect, ConstU32, ConstU64, StorageMapShim}, weights::Weight, PalletId, }; @@ -145,15 +145,3 @@ pub fn new_test_ext() -> sp_io::TestExternalities { pub fn new_test_ext_empty() -> sp_io::TestExternalities { frame_system::GenesisConfig::::default().build_storage().unwrap().into() } - -pub fn run_to_block(n: u64) { - while System::block_number() < n { - Nis::on_finalize(System::block_number()); - Balances::on_finalize(System::block_number()); - System::on_finalize(System::block_number()); - System::set_block_number(System::block_number() + 1); - System::on_initialize(System::block_number()); - Balances::on_initialize(System::block_number()); - Nis::on_initialize(System::block_number()); - } -} diff --git a/substrate/frame/nis/src/tests.rs b/substrate/frame/nis/src/tests.rs index a17aaf421827..10c39a0d48ed 100644 --- a/substrate/frame/nis/src/tests.rs +++ b/substrate/frame/nis/src/tests.rs @@ -55,7 +55,7 @@ fn enlarge(amount: Balance, max_bids: u32) { #[test] fn basic_setup_works() { new_test_ext().execute_with(|| { - run_to_block(1); + System::run_to_block::(1); for q in 0..3 { assert!(Queues::::get(q).is_empty()); @@ -76,7 +76,7 @@ fn basic_setup_works() { #[test] fn place_bid_works() { new_test_ext().execute_with(|| { - run_to_block(1); + System::run_to_block::(1); assert_noop!(Nis::place_bid(signed(1), 1, 2), Error::::AmountTooSmall); assert_noop!(Nis::place_bid(signed(1), 101, 2), FundsUnavailable); assert_noop!(Nis::place_bid(signed(1), 10, 4), Error::::DurationTooBig); @@ -90,7 +90,7 @@ fn place_bid_works() { #[test] fn place_bid_queuing_works() { new_test_ext().execute_with(|| { - run_to_block(1); + System::run_to_block::(1); assert_ok!(Nis::place_bid(signed(1), 20, 2)); assert_ok!(Nis::place_bid(signed(1), 10, 2)); assert_ok!(Nis::place_bid(signed(1), 5, 2)); @@ -116,7 +116,7 @@ fn place_bid_queuing_works() { #[test] fn place_bid_fails_when_queue_full() { new_test_ext().execute_with(|| { - run_to_block(1); + System::run_to_block::(1); assert_ok!(Nis::place_bid(signed(1), 10, 2)); assert_ok!(Nis::place_bid(signed(2), 10, 2)); assert_ok!(Nis::place_bid(signed(3), 10, 2)); @@ -128,7 +128,7 @@ fn place_bid_fails_when_queue_full() { #[test] fn multiple_place_bids_works() { new_test_ext().execute_with(|| { - run_to_block(1); + System::run_to_block::(1); assert_ok!(Nis::place_bid(signed(1), 10, 1)); assert_ok!(Nis::place_bid(signed(1), 10, 2)); assert_ok!(Nis::place_bid(signed(1), 10, 2)); @@ -154,7 +154,7 @@ fn multiple_place_bids_works() { #[test] fn retract_single_item_queue_works() { new_test_ext().execute_with(|| { - run_to_block(1); + System::run_to_block::(1); assert_ok!(Nis::place_bid(signed(1), 10, 1)); assert_ok!(Nis::place_bid(signed(1), 10, 2)); assert_ok!(Nis::retract_bid(signed(1), 10, 1)); @@ -169,7 +169,7 @@ fn retract_single_item_queue_works() { #[test] fn retract_with_other_and_duplicate_works() { new_test_ext().execute_with(|| { - run_to_block(1); + System::run_to_block::(1); assert_ok!(Nis::place_bid(signed(1), 10, 1)); assert_ok!(Nis::place_bid(signed(1), 10, 2)); assert_ok!(Nis::place_bid(signed(1), 10, 2)); @@ -190,7 +190,7 @@ fn retract_with_other_and_duplicate_works() { #[test] fn retract_non_existent_item_fails() { new_test_ext().execute_with(|| { - run_to_block(1); + System::run_to_block::(1); assert_noop!(Nis::retract_bid(signed(1), 10, 1), Error::::UnknownBid); assert_ok!(Nis::place_bid(signed(1), 10, 1)); assert_noop!(Nis::retract_bid(signed(1), 20, 1), Error::::UnknownBid); @@ -202,7 +202,7 @@ fn retract_non_existent_item_fails() { #[test] fn basic_enlarge_works() { new_test_ext().execute_with(|| { - run_to_block(1); + System::run_to_block::(1); assert_ok!(Nis::place_bid(signed(1), 40, 1)); assert_ok!(Nis::place_bid(signed(2), 40, 2)); enlarge(40, 2); @@ -240,7 +240,7 @@ fn basic_enlarge_works() { #[test] fn enlarge_respects_bids_limit() { new_test_ext().execute_with(|| { - run_to_block(1); + System::run_to_block::(1); assert_ok!(Nis::place_bid(signed(1), 40, 1)); assert_ok!(Nis::place_bid(signed(2), 40, 2)); assert_ok!(Nis::place_bid(signed(3), 40, 2)); @@ -285,7 +285,7 @@ fn enlarge_respects_bids_limit() { #[test] fn enlarge_respects_amount_limit_and_will_split() { new_test_ext().execute_with(|| { - run_to_block(1); + System::run_to_block::(1); assert_ok!(Nis::place_bid(signed(1), 80, 1)); enlarge(40, 2); @@ -317,7 +317,7 @@ fn enlarge_respects_amount_limit_and_will_split() { #[test] fn basic_thaw_works() { new_test_ext().execute_with(|| { - run_to_block(1); + System::run_to_block::(1); assert_ok!(Nis::place_bid(signed(1), 40, 1)); assert_eq!(Nis::issuance().effective, 400); assert_eq!(Balances::free_balance(1), 60); @@ -330,9 +330,9 @@ fn basic_thaw_works() { assert_eq!(Balances::reserved_balance(1), 40); assert_eq!(holdings(), 40); - run_to_block(3); + System::run_to_block::(3); assert_noop!(Nis::thaw_private(signed(1), 0, None), Error::::NotExpired); - run_to_block(4); + System::run_to_block::(4); assert_noop!(Nis::thaw_private(signed(1), 1, None), Error::::UnknownReceipt); assert_noop!(Nis::thaw_private(signed(2), 0, None), Error::::NotOwner); @@ -359,12 +359,12 @@ fn basic_thaw_works() { #[test] fn partial_thaw_works() { new_test_ext().execute_with(|| { - run_to_block(1); + System::run_to_block::(1); assert_ok!(Nis::place_bid(signed(1), 80, 1)); enlarge(80, 1); assert_eq!(holdings(), 80); - run_to_block(4); + System::run_to_block::(4); let prop = Perquintill::from_rational(4_100_000, 21_000_000u64); assert_noop!(Nis::thaw_private(signed(1), 0, Some(prop)), Error::::MakesDust); let prop = Perquintill::from_rational(1_050_000, 21_000_000u64); @@ -402,10 +402,10 @@ fn partial_thaw_works() { #[test] fn thaw_respects_transfers() { new_test_ext().execute_with(|| { - run_to_block(1); + System::run_to_block::(1); assert_ok!(Nis::place_bid(signed(1), 40, 1)); enlarge(40, 1); - run_to_block(4); + System::run_to_block::(4); assert_eq!(Nis::owner(&0), Some(1)); assert_eq!(Balances::reserved_balance(&1), 40); @@ -428,10 +428,10 @@ fn thaw_respects_transfers() { #[test] fn communify_works() { new_test_ext().execute_with(|| { - run_to_block(1); + System::run_to_block::(1); assert_ok!(Nis::place_bid(signed(1), 40, 1)); enlarge(40, 1); - run_to_block(4); + System::run_to_block::(4); assert_eq!(Nis::owner(&0), Some(1)); assert_eq!(Balances::reserved_balance(&1), 40); @@ -479,10 +479,10 @@ fn communify_works() { #[test] fn privatize_works() { new_test_ext().execute_with(|| { - run_to_block(1); + System::run_to_block::(1); assert_ok!(Nis::place_bid(signed(1), 40, 1)); enlarge(40, 1); - run_to_block(4); + System::run_to_block::(4); assert_noop!(Nis::privatize(signed(2), 0), Error::::AlreadyPrivate); assert_ok!(Nis::communify(signed(1), 0)); @@ -503,11 +503,11 @@ fn privatize_works() { #[test] fn privatize_and_thaw_with_another_receipt_works() { new_test_ext().execute_with(|| { - run_to_block(1); + System::run_to_block::(1); assert_ok!(Nis::place_bid(signed(1), 40, 1)); assert_ok!(Nis::place_bid(signed(2), 40, 1)); enlarge(80, 2); - run_to_block(4); + System::run_to_block::(4); assert_ok!(Nis::communify(signed(1), 0)); assert_ok!(Nis::communify(signed(2), 1)); @@ -535,7 +535,7 @@ fn privatize_and_thaw_with_another_receipt_works() { #[test] fn communal_thaw_when_issuance_higher_works() { new_test_ext().execute_with(|| { - run_to_block(1); + System::run_to_block::(1); assert_ok!(Balances::transfer_allow_death(signed(2), 1, 1)); assert_ok!(Nis::place_bid(signed(1), 100, 1)); enlarge(100, 1); @@ -552,7 +552,7 @@ fn communal_thaw_when_issuance_higher_works() { assert_ok!(Balances::mint_into(&3, 50)); assert_ok!(Balances::mint_into(&4, 50)); - run_to_block(4); + System::run_to_block::(4); // Unfunded initially... assert_noop!(Nis::thaw_communal(signed(1), 0), Error::::Unfunded); @@ -581,7 +581,7 @@ fn communal_thaw_when_issuance_higher_works() { #[test] fn private_thaw_when_issuance_higher_works() { new_test_ext().execute_with(|| { - run_to_block(1); + System::run_to_block::(1); assert_ok!(Balances::transfer_allow_death(signed(2), 1, 1)); assert_ok!(Nis::place_bid(signed(1), 100, 1)); enlarge(100, 1); @@ -591,7 +591,7 @@ fn private_thaw_when_issuance_higher_works() { assert_ok!(Balances::mint_into(&3, 50)); assert_ok!(Balances::mint_into(&4, 50)); - run_to_block(4); + System::run_to_block::(4); // Unfunded initially... assert_noop!(Nis::thaw_private(signed(1), 0, None), Error::::Unfunded); @@ -609,7 +609,7 @@ fn private_thaw_when_issuance_higher_works() { #[test] fn thaw_with_ignored_issuance_works() { new_test_ext().execute_with(|| { - run_to_block(1); + System::run_to_block::(1); // Give account zero some balance. assert_ok!(Balances::mint_into(&0, 200)); @@ -622,7 +622,7 @@ fn thaw_with_ignored_issuance_works() { assert_ok!(Balances::transfer_allow_death(signed(0), 3, 50)); assert_ok!(Balances::transfer_allow_death(signed(0), 4, 50)); - run_to_block(4); + System::run_to_block::(4); // Unfunded initially... assert_noop!(Nis::thaw_private(signed(1), 0, None), Error::::Unfunded); // ...so we fund... @@ -640,7 +640,7 @@ fn thaw_with_ignored_issuance_works() { #[test] fn thaw_when_issuance_lower_works() { new_test_ext().execute_with(|| { - run_to_block(1); + System::run_to_block::(1); assert_ok!(Balances::transfer_allow_death(signed(2), 1, 1)); assert_ok!(Nis::place_bid(signed(1), 100, 1)); enlarge(100, 1); @@ -650,7 +650,7 @@ fn thaw_when_issuance_lower_works() { assert_ok!(Balances::burn_from(&3, 25, Expendable, Exact, Force)); assert_ok!(Balances::burn_from(&4, 25, Expendable, Exact, Force)); - run_to_block(4); + System::run_to_block::(4); assert_ok!(Nis::thaw_private(signed(1), 0, None)); assert_ok!(Balances::transfer_allow_death(signed(1), 2, 1)); @@ -662,7 +662,7 @@ fn thaw_when_issuance_lower_works() { #[test] fn multiple_thaws_works() { new_test_ext().execute_with(|| { - run_to_block(1); + System::run_to_block::(1); assert_ok!(Balances::transfer_allow_death(signed(3), 1, 1)); assert_ok!(Nis::place_bid(signed(1), 40, 1)); assert_ok!(Nis::place_bid(signed(1), 60, 1)); @@ -675,11 +675,11 @@ fn multiple_thaws_works() { assert_ok!(Balances::mint_into(&4, 100)); assert_ok!(Nis::fund_deficit(signed(1))); - run_to_block(4); + System::run_to_block::(4); assert_ok!(Nis::thaw_private(signed(1), 0, None)); assert_ok!(Nis::thaw_private(signed(1), 1, None)); assert_noop!(Nis::thaw_private(signed(2), 2, None), Error::::Throttled); - run_to_block(5); + System::run_to_block::(5); assert_ok!(Nis::thaw_private(signed(2), 2, None)); assert_ok!(Balances::transfer_allow_death(signed(1), 3, 1)); @@ -693,7 +693,7 @@ fn multiple_thaws_works() { #[test] fn multiple_thaws_works_in_alternative_thaw_order() { new_test_ext().execute_with(|| { - run_to_block(1); + System::run_to_block::(1); assert_ok!(Balances::transfer_allow_death(signed(3), 1, 1)); assert_ok!(Nis::place_bid(signed(1), 40, 1)); assert_ok!(Nis::place_bid(signed(1), 60, 1)); @@ -706,12 +706,12 @@ fn multiple_thaws_works_in_alternative_thaw_order() { assert_ok!(Balances::mint_into(&4, 100)); assert_ok!(Nis::fund_deficit(signed(1))); - run_to_block(4); + System::run_to_block::(4); assert_ok!(Nis::thaw_private(signed(2), 2, None)); assert_noop!(Nis::thaw_private(signed(1), 1, None), Error::::Throttled); assert_ok!(Nis::thaw_private(signed(1), 0, None)); - run_to_block(5); + System::run_to_block::(5); assert_ok!(Nis::thaw_private(signed(1), 1, None)); assert_ok!(Balances::transfer_allow_death(signed(1), 3, 1)); @@ -725,7 +725,7 @@ fn multiple_thaws_works_in_alternative_thaw_order() { #[test] fn enlargement_to_target_works() { new_test_ext().execute_with(|| { - run_to_block(2); + System::run_to_block::(2); let w = <() as WeightInfo>::process_queues() + <() as WeightInfo>::process_queue() + (<() as WeightInfo>::process_bid() * 2); @@ -737,7 +737,7 @@ fn enlargement_to_target_works() { assert_ok!(Nis::place_bid(signed(3), 40, 3)); Target::set(Perquintill::from_percent(40)); - run_to_block(3); + System::run_to_block::(3); assert_eq!(Queues::::get(1), vec![Bid { amount: 40, who: 1 },]); assert_eq!( Queues::::get(2), @@ -749,7 +749,7 @@ fn enlargement_to_target_works() { ); assert_eq!(QueueTotals::::get(), vec![(1, 40), (2, 80), (2, 80)]); - run_to_block(4); + System::run_to_block::(4); // Two new items should have been issued to 2 & 3 for 40 each & duration of 3. assert_eq!( Receipts::::get(0).unwrap(), @@ -778,7 +778,7 @@ fn enlargement_to_target_works() { } ); - run_to_block(5); + System::run_to_block::(5); // No change assert_eq!( Summary::::get(), @@ -791,7 +791,7 @@ fn enlargement_to_target_works() { } ); - run_to_block(6); + System::run_to_block::(6); // Two new items should have been issued to 1 & 2 for 40 each & duration of 2. assert_eq!( Receipts::::get(2).unwrap(), @@ -820,7 +820,7 @@ fn enlargement_to_target_works() { } ); - run_to_block(8); + System::run_to_block::(8); // No change now. assert_eq!( Summary::::get(), @@ -835,7 +835,7 @@ fn enlargement_to_target_works() { // Set target a bit higher to use up the remaining bid. Target::set(Perquintill::from_percent(60)); - run_to_block(10); + System::run_to_block::(10); // One new item should have been issued to 1 for 40 each & duration of 2. assert_eq!( diff --git a/substrate/frame/nomination-pools/src/mock.rs b/substrate/frame/nomination-pools/src/mock.rs index cc942039760c..f544e79ec481 100644 --- a/substrate/frame/nomination-pools/src/mock.rs +++ b/substrate/frame/nomination-pools/src/mock.rs @@ -435,18 +435,7 @@ parameter_types! { /// Helper to run a specified amount of blocks. pub fn run_blocks(n: u64) { let current_block = System::block_number(); - run_to_block(n + current_block); -} - -/// Helper to run to a specific block. -pub fn run_to_block(n: u64) { - let current_block = System::block_number(); - assert!(n > current_block); - while System::block_number() < n { - Pools::on_finalize(System::block_number()); - System::set_block_number(System::block_number() + 1); - Pools::on_initialize(System::block_number()); - } + System::run_to_block::(n + current_block); } /// All events of this pallet. diff --git a/substrate/frame/recovery/src/mock.rs b/substrate/frame/recovery/src/mock.rs index 3930db82d6c7..86f13b0da4f7 100644 --- a/substrate/frame/recovery/src/mock.rs +++ b/substrate/frame/recovery/src/mock.rs @@ -20,10 +20,7 @@ use super::*; use crate as recovery; -use frame_support::{ - derive_impl, parameter_types, - traits::{OnFinalize, OnInitialize}, -}; +use frame_support::{derive_impl, parameter_types}; use sp_runtime::BuildStorage; type Block = frame_system::mocking::MockBlock; @@ -86,14 +83,3 @@ pub fn new_test_ext() -> sp_io::TestExternalities { .unwrap(); t.into() } - -/// Run until a particular block. -pub fn run_to_block(n: u64) { - while System::block_number() < n { - if System::block_number() > 1 { - System::on_finalize(System::block_number()); - } - System::set_block_number(System::block_number() + 1); - System::on_initialize(System::block_number()); - } -} diff --git a/substrate/frame/recovery/src/tests.rs b/substrate/frame/recovery/src/tests.rs index 93df07015852..97085df2ae78 100644 --- a/substrate/frame/recovery/src/tests.rs +++ b/substrate/frame/recovery/src/tests.rs @@ -17,12 +17,8 @@ //! Tests for the module. -use super::*; +use crate::{mock::*, *}; use frame_support::{assert_noop, assert_ok, traits::Currency}; -use mock::{ - new_test_ext, run_to_block, Balances, BalancesCall, MaxFriends, Recovery, RecoveryCall, - RuntimeCall, RuntimeOrigin, Test, -}; use sp_runtime::{bounded_vec, traits::BadOrigin}; #[test] @@ -70,7 +66,7 @@ fn recovery_life_cycle_works() { delay_period )); // Some time has passed, and the user lost their keys! - run_to_block(10); + System::run_to_block::(10); // Using account 1, the user begins the recovery process to recover the lost account assert_ok!(Recovery::initiate_recovery(RuntimeOrigin::signed(1), 5)); // Off chain, the user contacts their friends and asks them to vouch for the recovery @@ -84,7 +80,7 @@ fn recovery_life_cycle_works() { Error::::DelayPeriod ); // We need to wait at least the delay_period number of blocks before we can recover - run_to_block(20); + System::run_to_block::(20); assert_ok!(Recovery::claim_recovery(RuntimeOrigin::signed(1), 5)); // Account 1 can use account 5 to close the active recovery process, claiming the deposited // funds used to initiate the recovery process into account 5. @@ -128,7 +124,7 @@ fn malicious_recovery_fails() { delay_period )); // Some time has passed, and account 1 wants to try and attack this account! - run_to_block(10); + System::run_to_block::(10); // Using account 1, the malicious user begins the recovery process on account 5 assert_ok!(Recovery::initiate_recovery(RuntimeOrigin::signed(1), 5)); // Off chain, the user **tricks** their friends and asks them to vouch for the recovery @@ -144,7 +140,7 @@ fn malicious_recovery_fails() { Error::::DelayPeriod ); // Account 1 needs to wait... - run_to_block(19); + System::run_to_block::(19); // One more block to wait! assert_noop!( Recovery::claim_recovery(RuntimeOrigin::signed(1), 5), @@ -158,7 +154,7 @@ fn malicious_recovery_fails() { // Thanks for the free money! assert_eq!(Balances::total_balance(&5), 110); // The recovery process has been closed, so account 1 can't make the claim - run_to_block(20); + System::run_to_block::(20); assert_noop!( Recovery::claim_recovery(RuntimeOrigin::signed(1), 5), Error::::NotStarted @@ -397,7 +393,7 @@ fn claim_recovery_handles_basic_errors() { Recovery::claim_recovery(RuntimeOrigin::signed(1), 5), Error::::DelayPeriod ); - run_to_block(11); + System::run_to_block::(11); // Cannot claim an account which has not passed the threshold number of votes assert_ok!(Recovery::vouch_recovery(RuntimeOrigin::signed(2), 5, 1)); assert_ok!(Recovery::vouch_recovery(RuntimeOrigin::signed(3), 5, 1)); @@ -427,7 +423,7 @@ fn claim_recovery_works() { assert_ok!(Recovery::vouch_recovery(RuntimeOrigin::signed(3), 5, 1)); assert_ok!(Recovery::vouch_recovery(RuntimeOrigin::signed(4), 5, 1)); - run_to_block(11); + System::run_to_block::(11); // Account can be recovered. assert_ok!(Recovery::claim_recovery(RuntimeOrigin::signed(1), 5)); @@ -439,7 +435,7 @@ fn claim_recovery_works() { assert_ok!(Recovery::vouch_recovery(RuntimeOrigin::signed(3), 5, 4)); assert_ok!(Recovery::vouch_recovery(RuntimeOrigin::signed(4), 5, 4)); - run_to_block(21); + System::run_to_block::(21); // Account is re-recovered. assert_ok!(Recovery::claim_recovery(RuntimeOrigin::signed(4), 5)); diff --git a/substrate/frame/revive/rpc/.sqlx/query-027a434a38822c2ba4439e8f9f9c1135227c1150f2c5083d1c7c6086b717ada0.json b/substrate/frame/revive/rpc/.sqlx/query-027a434a38822c2ba4439e8f9f9c1135227c1150f2c5083d1c7c6086b717ada0.json new file mode 100644 index 000000000000..016276144901 --- /dev/null +++ b/substrate/frame/revive/rpc/.sqlx/query-027a434a38822c2ba4439e8f9f9c1135227c1150f2c5083d1c7c6086b717ada0.json @@ -0,0 +1,12 @@ +{ + "db_name": "SQLite", + "query": "\n\t\t\t\tINSERT INTO transaction_hashes (transaction_hash, block_hash, transaction_index)\n\t\t\t\tVALUES ($1, $2, $3)\n\n\t\t\t\tON CONFLICT(transaction_hash) DO UPDATE SET\n\t\t\t\tblock_hash = EXCLUDED.block_hash,\n\t\t\t\ttransaction_index = EXCLUDED.transaction_index\n\t\t\t\t", + "describe": { + "columns": [], + "parameters": { + "Right": 3 + }, + "nullable": [] + }, + "hash": "027a434a38822c2ba4439e8f9f9c1135227c1150f2c5083d1c7c6086b717ada0" +} diff --git a/substrate/frame/revive/rpc/.sqlx/query-2348bd412ca114197996e4395fd68c427245f94b80d37ec3aef04cd96fb36298.json b/substrate/frame/revive/rpc/.sqlx/query-2348bd412ca114197996e4395fd68c427245f94b80d37ec3aef04cd96fb36298.json new file mode 100644 index 000000000000..507564cd05c5 --- /dev/null +++ b/substrate/frame/revive/rpc/.sqlx/query-2348bd412ca114197996e4395fd68c427245f94b80d37ec3aef04cd96fb36298.json @@ -0,0 +1,20 @@ +{ + "db_name": "SQLite", + "query": "\n SELECT COUNT(*) as count\n FROM transaction_hashes\n WHERE block_hash = $1\n ", + "describe": { + "columns": [ + { + "name": "count", + "ordinal": 0, + "type_info": "Integer" + } + ], + "parameters": { + "Right": 1 + }, + "nullable": [ + false + ] + }, + "hash": "2348bd412ca114197996e4395fd68c427245f94b80d37ec3aef04cd96fb36298" +} diff --git a/substrate/frame/revive/rpc/.sqlx/query-29af64347f700919dc2ee12463f332be50096d4e37be04ed8b6f46ac5c242043.json b/substrate/frame/revive/rpc/.sqlx/query-29af64347f700919dc2ee12463f332be50096d4e37be04ed8b6f46ac5c242043.json new file mode 100644 index 000000000000..2443035c433d --- /dev/null +++ b/substrate/frame/revive/rpc/.sqlx/query-29af64347f700919dc2ee12463f332be50096d4e37be04ed8b6f46ac5c242043.json @@ -0,0 +1,26 @@ +{ + "db_name": "SQLite", + "query": "\n\t\t\tSELECT block_hash, transaction_index\n\t\t\tFROM transaction_hashes\n\t\t\tWHERE transaction_hash = $1\n\t\t\t", + "describe": { + "columns": [ + { + "name": "block_hash", + "ordinal": 0, + "type_info": "Text" + }, + { + "name": "transaction_index", + "ordinal": 1, + "type_info": "Integer" + } + ], + "parameters": { + "Right": 1 + }, + "nullable": [ + false, + false + ] + }, + "hash": "29af64347f700919dc2ee12463f332be50096d4e37be04ed8b6f46ac5c242043" +} diff --git a/substrate/frame/revive/rpc/Cargo.toml b/substrate/frame/revive/rpc/Cargo.toml index cfaaa102fc3d..9d822f5ff8e2 100644 --- a/substrate/frame/revive/rpc/Cargo.toml +++ b/substrate/frame/revive/rpc/Cargo.toml @@ -7,11 +7,16 @@ license = "Apache-2.0" homepage.workspace = true repository.workspace = true description = "An Ethereum JSON-RPC server for pallet-revive." +default-run = "eth-rpc" [[bin]] name = "eth-rpc" path = "src/main.rs" +[[bin]] +name = "eth-indexer" +path = "src/eth-indexer.rs" + [[example]] name = "deploy" path = "examples/rust/deploy.rs" @@ -53,9 +58,15 @@ sc-cli = { workspace = true, default-features = true } sc-rpc = { workspace = true, default-features = true } sc-rpc-api = { workspace = true, default-features = true } sc-service = { workspace = true, default-features = true } +sp-arithmetic = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } sp-crypto-hashing = { workspace = true } sp-weights = { workspace = true, default-features = true } +sqlx = { version = "0.8.2", features = [ + "macros", + "runtime-tokio", + "sqlite", +] } subxt = { workspace = true, default-features = true, features = ["reconnecting-rpc-client"] } subxt-signer = { workspace = true, optional = true, features = [ "unstable-eth", diff --git a/substrate/frame/revive/rpc/dockerfiles/eth-indexer/Dockerfile b/substrate/frame/revive/rpc/dockerfiles/eth-indexer/Dockerfile new file mode 100644 index 000000000000..77fa846a145c --- /dev/null +++ b/substrate/frame/revive/rpc/dockerfiles/eth-indexer/Dockerfile @@ -0,0 +1,28 @@ +FROM rust AS builder + +RUN apt-get update && \ + DEBIAN_FRONTEND=noninteractive apt-get install -y \ + protobuf-compiler \ + clang libclang-dev + +WORKDIR /polkadot +COPY . /polkadot +RUN rustup component add rust-src +RUN cargo build --locked --profile production -p pallet-revive-eth-rpc --bin eth-indexer + +FROM docker.io/parity/base-bin:latest +COPY --from=builder /polkadot/target/production/eth-indexer /usr/local/bin + +USER root +RUN useradd -m -u 1001 -U -s /bin/sh -d /polkadot polkadot && \ +# unclutter and minimize the attack surface + rm -rf /usr/bin /usr/sbin && \ +# check if executable works in this container + /usr/local/bin/eth-indexer --help + +USER polkadot + +ENTRYPOINT ["/usr/local/bin/eth-indexer"] + +# We call the help by default +CMD ["--help"] diff --git a/substrate/frame/revive/rpc/Dockerfile b/substrate/frame/revive/rpc/dockerfiles/eth-rpc/Dockerfile similarity index 100% rename from substrate/frame/revive/rpc/Dockerfile rename to substrate/frame/revive/rpc/dockerfiles/eth-rpc/Dockerfile diff --git a/substrate/frame/revive/rpc/examples/js/bun.lockb b/substrate/frame/revive/rpc/examples/js/bun.lockb index 46994bb14754..39a1d0906b70 100755 Binary files a/substrate/frame/revive/rpc/examples/js/bun.lockb and b/substrate/frame/revive/rpc/examples/js/bun.lockb differ diff --git a/substrate/frame/revive/rpc/examples/js/package.json b/substrate/frame/revive/rpc/examples/js/package.json index 6d8d00fd4214..f2c4b8d78093 100644 --- a/substrate/frame/revive/rpc/examples/js/package.json +++ b/substrate/frame/revive/rpc/examples/js/package.json @@ -9,15 +9,15 @@ "preview": "vite preview" }, "dependencies": { - "ethers": "^6.13.4", + "@parity/revive": "^0.0.9", + "ethers": "^6.13.5", "solc": "^0.8.28", - "viem": "^2.21.47", - "@parity/revive": "^0.0.5" + "viem": "^2.22.4" }, "devDependencies": { - "prettier": "^3.3.3", - "@types/bun": "^1.1.13", - "typescript": "^5.5.3", - "vite": "^5.4.8" + "prettier": "^3.4.2", + "@types/bun": "^1.1.15", + "typescript": "^5.7.2", + "vite": "^5.4.11" } } diff --git a/substrate/frame/revive/rpc/examples/js/src/build-contracts.ts b/substrate/frame/revive/rpc/examples/js/src/build-contracts.ts index a37b850214b8..f26f275ec3d5 100644 --- a/substrate/frame/revive/rpc/examples/js/src/build-contracts.ts +++ b/substrate/frame/revive/rpc/examples/js/src/build-contracts.ts @@ -55,17 +55,14 @@ for (const file of input) { } console.log('Compiling with revive...') - const reviveOut = await compile(input) + const reviveOut = await compile(input, { bin: 'resolc' }) for (const contracts of Object.values(reviveOut.contracts)) { for (const [name, contract] of Object.entries(contracts)) { console.log(`📜 Add PVM contract ${name}`) const abi = contract.abi const abiName = `${name}Abi` - writeFileSync( - join(abiDir, `${name}.json`), - JSON.stringify(abi, null, 2) - ) + writeFileSync(join(abiDir, `${name}.json`), JSON.stringify(abi, null, 2)) writeFileSync( join(abiDir, `${name}.ts`), diff --git a/substrate/frame/revive/rpc/examples/js/src/geth-diff.test.ts b/substrate/frame/revive/rpc/examples/js/src/geth-diff.test.ts index b9ee877927bb..86b8ec50bd63 100644 --- a/substrate/frame/revive/rpc/examples/js/src/geth-diff.test.ts +++ b/substrate/frame/revive/rpc/examples/js/src/geth-diff.test.ts @@ -1,9 +1,73 @@ -import { jsonRpcErrors, procs, createEnv, getByteCode } from './geth-diff-setup.ts' +import { + jsonRpcErrors, + createEnv, + getByteCode, + killProcessOnPort, + waitForHealth, + polkadotSdkPath, +} from './util.ts' import { afterAll, afterEach, beforeAll, describe, expect, test } from 'bun:test' import { encodeFunctionData, Hex, parseEther } from 'viem' import { ErrorsAbi } from '../abi/Errors' import { FlipperCallerAbi } from '../abi/FlipperCaller' import { FlipperAbi } from '../abi/Flipper' +import { Subprocess, spawn } from 'bun' + +const procs: Subprocess[] = [] +beforeAll(async () => { + if (!process.env.USE_LIVE_SERVERS) { + procs.push( + // Run geth on port 8546 + await (async () => { + killProcessOnPort(8546) + const proc = spawn( + 'geth --http --http.api web3,eth,debug,personal,net --http.port 8546 --dev --verbosity 0'.split( + ' ' + ), + { stdout: Bun.file('/tmp/geth.out.log'), stderr: Bun.file('/tmp/geth.err.log') } + ) + + await waitForHealth('http://localhost:8546').catch() + return proc + })(), + //Run the substate node + (() => { + killProcessOnPort(9944) + return spawn( + [ + './target/debug/substrate-node', + '--dev', + '-l=error,evm=debug,sc_rpc_server=info,runtime::revive=debug', + ], + { + stdout: Bun.file('/tmp/kitchensink.out.log'), + stderr: Bun.file('/tmp/kitchensink.err.log'), + cwd: polkadotSdkPath, + } + ) + })(), + // Run eth-rpc on 8545 + await (async () => { + killProcessOnPort(8545) + const proc = spawn( + [ + './target/debug/eth-rpc', + '--dev', + '--node-rpc-url=ws://localhost:9944', + '-l=rpc-metrics=debug,eth-rpc=debug', + ], + { + stdout: Bun.file('/tmp/eth-rpc.out.log'), + stderr: Bun.file('/tmp/eth-rpc.err.log'), + cwd: polkadotSdkPath, + } + ) + await waitForHealth('http://localhost:8545').catch() + return proc + })() + ) + } +}) afterEach(() => { jsonRpcErrors.length = 0 @@ -289,27 +353,5 @@ for (const env of envs) { ], }) }) - - test.only('eth_estimate (no gas specified) child_call', async () => { - let balance = await env.serverWallet.getBalance(env.accountWallet.account) - expect(balance).toBe(0n) - - const data = encodeFunctionData({ - abi: FlipperCallerAbi, - functionName: 'callFlip', - }) - - await env.accountWallet.request({ - method: 'eth_estimateGas', - params: [ - { - data, - from: env.accountWallet.account.address, - to: flipperCallerAddr, - gas: `0x${Number(1000000).toString(16)}`, - }, - ], - }) - }) }) } diff --git a/substrate/frame/revive/rpc/examples/js/src/lib.ts b/substrate/frame/revive/rpc/examples/js/src/lib.ts index e1f0e780d95b..1470f492e34d 100644 --- a/substrate/frame/revive/rpc/examples/js/src/lib.ts +++ b/substrate/frame/revive/rpc/examples/js/src/lib.ts @@ -50,7 +50,6 @@ if (geth) { child.unref() await new Promise((resolve) => setTimeout(resolve, 500)) } - const rpcUrl = proxy ? 'http://localhost:8080' : westend diff --git a/substrate/frame/revive/rpc/examples/js/src/piggy-bank.ts b/substrate/frame/revive/rpc/examples/js/src/piggy-bank.ts index 0040b0c78dc4..4983a6f3b301 100644 --- a/substrate/frame/revive/rpc/examples/js/src/piggy-bank.ts +++ b/substrate/frame/revive/rpc/examples/js/src/piggy-bank.ts @@ -1,10 +1,10 @@ import { assert, getByteCode, walletClient } from './lib.ts' -import { abi } from '../abi/piggyBank.ts' +import { PiggyBankAbi } from '../abi/piggyBank.ts' import { parseEther } from 'viem' const hash = await walletClient.deployContract({ - abi, - bytecode: getByteCode('piggyBank'), + abi: PiggyBankAbi, + bytecode: getByteCode('PiggyBank'), }) const deployReceipt = await walletClient.waitForTransactionReceipt({ hash }) const contractAddress = deployReceipt.contractAddress @@ -16,7 +16,7 @@ assert(contractAddress, 'Contract address should be set') const result = await walletClient.estimateContractGas({ account: walletClient.account, address: contractAddress, - abi, + abi: PiggyBankAbi, functionName: 'deposit', value: parseEther('10'), }) @@ -26,19 +26,14 @@ assert(contractAddress, 'Contract address should be set') const { request } = await walletClient.simulateContract({ account: walletClient.account, address: contractAddress, - abi, + abi: PiggyBankAbi, functionName: 'deposit', value: parseEther('10'), }) - request.nonce = 0 const hash = await walletClient.writeContract(request) - const receipt = await walletClient.waitForTransactionReceipt({ hash }) console.log(`Deposit receipt: ${receipt.status}`) - if (process.env.STOP) { - process.exit(0) - } } // Withdraw 5 WST @@ -46,7 +41,7 @@ assert(contractAddress, 'Contract address should be set') const { request } = await walletClient.simulateContract({ account: walletClient.account, address: contractAddress, - abi, + abi: PiggyBankAbi, functionName: 'withdraw', args: [parseEther('5')], }) @@ -58,7 +53,7 @@ assert(contractAddress, 'Contract address should be set') // Check remaining balance const balance = await walletClient.readContract({ address: contractAddress, - abi, + abi: PiggyBankAbi, functionName: 'getDeposit', }) diff --git a/substrate/frame/revive/rpc/examples/js/src/spammer.ts b/substrate/frame/revive/rpc/examples/js/src/spammer.ts new file mode 100644 index 000000000000..c038afa71f0a --- /dev/null +++ b/substrate/frame/revive/rpc/examples/js/src/spammer.ts @@ -0,0 +1,104 @@ +import { spawn } from 'bun' +import { + createEnv, + getByteCode, + killProcessOnPort, + polkadotSdkPath, + timeout, + wait, + waitForHealth, +} from './util' +import { FlipperAbi } from '../abi/Flipper' + +//Run the substate node +console.log('🚀 Start kitchensink...') +killProcessOnPort(9944) +spawn( + [ + './target/debug/substrate-node', + '--dev', + '-l=error,evm=debug,sc_rpc_server=info,runtime::revive=debug', + ], + { + stdout: Bun.file('/tmp/kitchensink.out.log'), + stderr: Bun.file('/tmp/kitchensink.err.log'), + cwd: polkadotSdkPath, + } +) + +// Run eth-indexer +console.log('🔍 Start indexer...') +spawn( + [ + './target/debug/eth-indexer', + '--node-rpc-url=ws://localhost:9944', + '-l=eth-rpc=debug', + '--database-url ${polkadotSdkPath}/substrate/frame/revive/rpc/tx_hashes.db', + ], + { + stdout: Bun.file('/tmp/eth-indexer.out.log'), + stderr: Bun.file('/tmp/eth-indexer.err.log'), + cwd: polkadotSdkPath, + } +) + +// Run eth-rpc on 8545 +console.log('💻 Start eth-rpc...') +killProcessOnPort(8545) +spawn( + [ + './target/debug/eth-rpc', + '--dev', + '--node-rpc-url=ws://localhost:9944', + '-l=rpc-metrics=debug,eth-rpc=debug', + ], + { + stdout: Bun.file('/tmp/eth-rpc.out.log'), + stderr: Bun.file('/tmp/eth-rpc.err.log'), + cwd: polkadotSdkPath, + } +) +await waitForHealth('http://localhost:8545').catch() + +const env = await createEnv('kitchensink') +const wallet = env.accountWallet + +console.log('🚀 Deploy flipper...') +const hash = await wallet.deployContract({ + abi: FlipperAbi, + bytecode: getByteCode('Flipper'), +}) + +const deployReceipt = await wallet.waitForTransactionReceipt({ hash }) +if (!deployReceipt.contractAddress) throw new Error('Contract address should be set') +const flipperAddr = deployReceipt.contractAddress + +let nonce = await wallet.getTransactionCount(wallet.account) +let callCount = 0 + +console.log('🔄 Starting nonce:', nonce) +console.log('🔄 Starting loop...') +try { + while (true) { + callCount++ + console.log(`🔄 Call flip (${callCount})...`) + const { request } = await wallet.simulateContract({ + account: wallet.account, + address: flipperAddr, + abi: FlipperAbi, + functionName: 'flip', + }) + + console.log(`🔄 Submit flip (call ${callCount}...`) + + await Promise.race([ + (async () => { + const hash = await wallet.writeContract(request) + await wallet.waitForTransactionReceipt({ hash }) + })(), + timeout(15_000), + ]) + } +} catch (err) { + console.error('Failed with error:', err) +} diff --git a/substrate/frame/revive/rpc/examples/js/src/geth-diff-setup.ts b/substrate/frame/revive/rpc/examples/js/src/util.ts similarity index 62% rename from substrate/frame/revive/rpc/examples/js/src/geth-diff-setup.ts rename to substrate/frame/revive/rpc/examples/js/src/util.ts index 3db2453f2475..bdc64eea1ef5 100644 --- a/substrate/frame/revive/rpc/examples/js/src/geth-diff-setup.ts +++ b/substrate/frame/revive/rpc/examples/js/src/util.ts @@ -1,10 +1,10 @@ -import { spawn, spawnSync, Subprocess } from 'bun' +import { spawnSync } from 'bun' import { resolve } from 'path' import { readFileSync } from 'fs' import { createWalletClient, defineChain, Hex, http, publicActions } from 'viem' -import { privateKeyToAccount } from 'viem/accounts' +import { privateKeyToAccount, nonceManager } from 'viem/accounts' -export function getByteCode(name: string, evm: boolean): Hex { +export function getByteCode(name: string, evm: boolean = false): Hex { const bytecode = evm ? readFileSync(`evm/${name}.bin`) : readFileSync(`pvm/${name}.polkavm`) return `0x${Buffer.from(bytecode).toString('hex')}` } @@ -15,6 +15,8 @@ export type JsonRpcError = { data: Hex } +export const polkadotSdkPath = resolve(__dirname, '../../../../../../..') + export function killProcessOnPort(port: number) { // Check which process is using the specified port const result = spawnSync(['lsof', '-ti', `:${port}`]) @@ -76,7 +78,8 @@ export async function createEnv(name: 'geth' | 'kitchensink') { const accountWallet = createWalletClient({ account: privateKeyToAccount( - '0xa872f6cbd25a0e04a08b1e21098017a9e6194d101d75e13111f71410c59cd57f' + '0x5fb92d6e98884f76de468fa3f6278f8807c48bebc13595d45af5bdc4da702133', + { nonceManager } ), transport, chain, @@ -85,6 +88,14 @@ export async function createEnv(name: 'geth' | 'kitchensink') { return { serverWallet, accountWallet, evm: name == 'geth' } } +export function wait(ms: number) { + return new Promise((resolve) => setTimeout(resolve, ms)) +} + +export function timeout(ms: number) { + return new Promise((_resolve, reject) => setTimeout(() => reject(new Error('timeout hit')), ms)) +} + // wait for http request to return 200 export function waitForHealth(url: string) { return new Promise((resolve, reject) => { @@ -120,58 +131,3 @@ export function waitForHealth(url: string) { }, 1000) }) } - -export const procs: Subprocess[] = [] -const polkadotSdkPath = resolve(__dirname, '../../../../../../..') -if (!process.env.USE_LIVE_SERVERS) { - procs.push( - // Run geth on port 8546 - await (async () => { - killProcessOnPort(8546) - const proc = spawn( - 'geth --http --http.api web3,eth,debug,personal,net --http.port 8546 --dev --verbosity 0'.split( - ' ' - ), - { stdout: Bun.file('/tmp/geth.out.log'), stderr: Bun.file('/tmp/geth.err.log') } - ) - - await waitForHealth('http://localhost:8546').catch() - return proc - })(), - //Run the substate node - (() => { - killProcessOnPort(9944) - return spawn( - [ - './target/debug/substrate-node', - '--dev', - '-l=error,evm=debug,sc_rpc_server=info,runtime::revive=debug', - ], - { - stdout: Bun.file('/tmp/kitchensink.out.log'), - stderr: Bun.file('/tmp/kitchensink.err.log'), - cwd: polkadotSdkPath, - } - ) - })(), - // Run eth-rpc on 8545 - await (async () => { - killProcessOnPort(8545) - const proc = spawn( - [ - './target/debug/eth-rpc', - '--dev', - '--node-rpc-url=ws://localhost:9944', - '-l=rpc-metrics=debug,eth-rpc=debug', - ], - { - stdout: Bun.file('/tmp/eth-rpc.out.log'), - stderr: Bun.file('/tmp/eth-rpc.err.log'), - cwd: polkadotSdkPath, - } - ) - await waitForHealth('http://localhost:8545').catch() - return proc - })() - ) -} diff --git a/substrate/frame/revive/rpc/examples/westend_local_network.toml b/substrate/frame/revive/rpc/examples/westend_local_network.toml index 28295db76133..76561be814ec 100644 --- a/substrate/frame/revive/rpc/examples/westend_local_network.toml +++ b/substrate/frame/revive/rpc/examples/westend_local_network.toml @@ -29,13 +29,9 @@ name = "asset-hub-westend-collator1" rpc_port = 9011 ws_port = 9944 command = "{{POLKADOT_PARACHAIN_BINARY}}" -args = [ - "-lparachain=debug,runtime::revive=debug", -] +args = ["-lparachain=debug,runtime::revive=debug"] [[parachains.collators]] name = "asset-hub-westend-collator2" command = "{{POLKADOT_PARACHAIN_BINARY}}" -args = [ - "-lparachain=debug,runtime::revive=debug", -] +args = ["-lparachain=debug,runtime::revive=debug"] diff --git a/substrate/frame/revive/rpc/migrations/20241205165418_create_transaction_hashes.sql b/substrate/frame/revive/rpc/migrations/20241205165418_create_transaction_hashes.sql new file mode 100644 index 000000000000..43405bea9d04 --- /dev/null +++ b/substrate/frame/revive/rpc/migrations/20241205165418_create_transaction_hashes.sql @@ -0,0 +1,15 @@ +-- Create DB: +-- DATABASE_URL="..." cargo sqlx database create +-- +-- Run migration: +-- DATABASE_URL="..." cargo sqlx migrate run +-- +-- Update compile time artifacts: +-- DATABASE_URL="..." cargo sqlx prepare +CREATE TABLE transaction_hashes ( + transaction_hash CHAR(64) NOT NULL PRIMARY KEY, + transaction_index INTEGER NOT NULL, + block_hash CHAR(64) NOT NULL +); + +CREATE INDEX idx_block_hash ON transaction_hashes (block_hash); diff --git a/substrate/frame/revive/rpc/revive_chain.metadata b/substrate/frame/revive/rpc/revive_chain.metadata index 64b1f2014dd0..402e8c2d22b2 100644 Binary files a/substrate/frame/revive/rpc/revive_chain.metadata and b/substrate/frame/revive/rpc/revive_chain.metadata differ diff --git a/substrate/frame/revive/rpc/src/block_info_provider.rs b/substrate/frame/revive/rpc/src/block_info_provider.rs new file mode 100644 index 000000000000..0e91869cddaa --- /dev/null +++ b/substrate/frame/revive/rpc/src/block_info_provider.rs @@ -0,0 +1,250 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::{ + client::{SubstrateBlock, SubstrateBlockNumber}, + subxt_client::SrcChainConfig, + ClientError, +}; +use jsonrpsee::core::async_trait; +use sp_core::H256; +use std::{ + collections::{HashMap, VecDeque}, + sync::Arc, +}; +use subxt::{backend::legacy::LegacyRpcMethods, OnlineClient}; +use tokio::sync::RwLock; + +/// BlockInfoProvider cache and retrieves information about blocks. +#[async_trait] +pub trait BlockInfoProvider: Send + Sync { + /// Cache a new block and return the pruned block hash. + async fn cache_block(&self, block: SubstrateBlock) -> Option; + + /// Return the latest ingested block. + async fn latest_block(&self) -> Option>; + + /// Get block by block_number. + async fn block_by_number( + &self, + block_number: SubstrateBlockNumber, + ) -> Result>, ClientError>; + + /// Get block by block hash. + async fn block_by_hash(&self, hash: &H256) -> Result>, ClientError>; +} + +/// Provides information about blocks. +#[derive(Clone)] +pub struct BlockInfoProviderImpl { + /// The shared in memory cache. + cache: Arc>>, + + /// The rpc client, used to fetch blocks not in the cache. + rpc: LegacyRpcMethods, + + /// The api client, used to fetch blocks not in the cache. + api: OnlineClient, +} + +impl BlockInfoProviderImpl { + pub fn new( + cache_size: usize, + api: OnlineClient, + rpc: LegacyRpcMethods, + ) -> Self { + Self { api, rpc, cache: Arc::new(RwLock::new(BlockCache::new(cache_size))) } + } + + async fn cache(&self) -> tokio::sync::RwLockReadGuard<'_, BlockCache> { + self.cache.read().await + } +} + +#[async_trait] +impl BlockInfoProvider for BlockInfoProviderImpl { + async fn cache_block(&self, block: SubstrateBlock) -> Option { + let mut cache = self.cache.write().await; + cache.insert(block) + } + + async fn latest_block(&self) -> Option> { + let cache = self.cache().await; + cache.buffer.back().cloned() + } + + async fn block_by_number( + &self, + block_number: SubstrateBlockNumber, + ) -> Result>, ClientError> { + let cache = self.cache().await; + if let Some(block) = cache.blocks_by_number.get(&block_number).cloned() { + return Ok(Some(block)); + } + + let Some(hash) = self.rpc.chain_get_block_hash(Some(block_number.into())).await? else { + return Ok(None); + }; + + self.block_by_hash(&hash).await + } + + async fn block_by_hash(&self, hash: &H256) -> Result>, ClientError> { + let cache = self.cache().await; + if let Some(block) = cache.blocks_by_hash.get(hash).cloned() { + return Ok(Some(block)); + } + + match self.api.blocks().at(*hash).await { + Ok(block) => Ok(Some(Arc::new(block))), + Err(subxt::Error::Block(subxt::error::BlockError::NotFound(_))) => Ok(None), + Err(err) => Err(err.into()), + } + } +} + +/// The cache maintains a buffer of the last N blocks, +struct BlockCache { + /// The maximum buffer's size. + max_cache_size: usize, + + /// A double-ended queue of the last N blocks. + /// The most recent block is at the back of the queue, and the oldest block is at the front. + buffer: VecDeque>, + + /// A map of blocks by block number. + blocks_by_number: HashMap>, + + /// A map of blocks by block hash. + blocks_by_hash: HashMap>, +} + +/// Provides information about a block, +/// This is an abstratction on top of [`SubstrateBlock`] used to test the [`BlockCache`]. +/// Can be removed once https://github.com/paritytech/subxt/issues/1883 is fixed. +trait BlockInfo { + /// Returns the block hash. + fn hash(&self) -> H256; + /// Returns the block number. + fn number(&self) -> SubstrateBlockNumber; +} + +impl BlockInfo for SubstrateBlock { + fn hash(&self) -> H256 { + SubstrateBlock::hash(self) + } + fn number(&self) -> u32 { + SubstrateBlock::number(self) + } +} + +impl BlockCache { + /// Create a new cache with the given maximum buffer size. + pub fn new(max_cache_size: usize) -> Self { + Self { + max_cache_size, + buffer: Default::default(), + blocks_by_number: Default::default(), + blocks_by_hash: Default::default(), + } + } + + /// Insert an entry into the cache, and prune the oldest entry if the cache is full. + pub fn insert(&mut self, block: B) -> Option { + let mut pruned_block_hash = None; + if self.buffer.len() >= self.max_cache_size { + if let Some(block) = self.buffer.pop_front() { + let hash = block.hash(); + self.blocks_by_hash.remove(&hash); + self.blocks_by_number.remove(&block.number()); + pruned_block_hash = Some(hash); + } + } + + let block = Arc::new(block); + self.buffer.push_back(block.clone()); + self.blocks_by_number.insert(block.number(), block.clone()); + self.blocks_by_hash.insert(block.hash(), block); + pruned_block_hash + } +} + +#[cfg(test)] +pub mod test { + use super::*; + + struct MockBlock { + block_number: SubstrateBlockNumber, + block_hash: H256, + } + + impl BlockInfo for MockBlock { + fn hash(&self) -> H256 { + self.block_hash + } + + fn number(&self) -> u32 { + self.block_number + } + } + + #[test] + fn cache_insert_works() { + let mut cache = BlockCache::::new(2); + + let pruned = cache.insert(MockBlock { block_number: 1, block_hash: H256::from([1; 32]) }); + assert_eq!(pruned, None); + + let pruned = cache.insert(MockBlock { block_number: 2, block_hash: H256::from([2; 32]) }); + assert_eq!(pruned, None); + + let pruned = cache.insert(MockBlock { block_number: 3, block_hash: H256::from([3; 32]) }); + assert_eq!(pruned, Some(H256::from([1; 32]))); + + assert_eq!(cache.buffer.len(), 2); + assert_eq!(cache.blocks_by_number.len(), 2); + assert_eq!(cache.blocks_by_hash.len(), 2); + } + + /// A Noop BlockInfoProvider used to test [`db::DBReceiptProvider`]. + pub struct MockBlockInfoProvider; + + #[async_trait] + impl BlockInfoProvider for MockBlockInfoProvider { + async fn cache_block(&self, _block: SubstrateBlock) -> Option { + None + } + + async fn latest_block(&self) -> Option> { + None + } + + async fn block_by_number( + &self, + _block_number: SubstrateBlockNumber, + ) -> Result>, ClientError> { + Ok(None) + } + + async fn block_by_hash( + &self, + _hash: &H256, + ) -> Result>, ClientError> { + Ok(None) + } + } +} diff --git a/substrate/frame/revive/rpc/src/cli.rs b/substrate/frame/revive/rpc/src/cli.rs index c0f81fcafd77..d63d596ab7a8 100644 --- a/substrate/frame/revive/rpc/src/cli.rs +++ b/substrate/frame/revive/rpc/src/cli.rs @@ -16,7 +16,9 @@ // limitations under the License. //! The Ethereum JSON-RPC server. use crate::{ - client::Client, EthRpcServer, EthRpcServerImpl, SystemHealthRpcServer, + client::{connect, Client}, + BlockInfoProvider, BlockInfoProviderImpl, CacheReceiptProvider, DBReceiptProvider, + EthRpcServer, EthRpcServerImpl, ReceiptProvider, SystemHealthRpcServer, SystemHealthRpcServerImpl, }; use clap::Parser; @@ -27,6 +29,7 @@ use sc_service::{ config::{PrometheusConfig, RpcConfiguration}, start_rpc_servers, TaskManager, }; +use std::sync::Arc; // Default port if --prometheus-port is not specified const DEFAULT_PROMETHEUS_PORT: u16 = 9616; @@ -42,6 +45,21 @@ pub struct CliCommand { #[clap(long, default_value = "ws://127.0.0.1:9944")] pub node_rpc_url: String, + /// The maximum number of blocks to cache in memory. + #[clap(long, default_value = "256")] + pub cache_size: usize, + + /// The database used to store Ethereum transaction hashes. + /// This is only useful if the node needs to act as an archive node and respond to Ethereum RPC + /// queries for transactions that are not in the in memory cache. + #[clap(long)] + pub database_url: Option, + + /// If true, we will only read from the database and not write to it. + /// Only useful if `--database-url` is specified. + #[clap(long, default_value = "true")] + pub database_read_only: bool, + #[allow(missing_docs)] #[clap(flatten)] pub shared_params: SharedParams, @@ -78,7 +96,16 @@ fn init_logger(params: &SharedParams) -> anyhow::Result<()> { /// Start the JSON-RPC server using the given command line arguments. pub fn run(cmd: CliCommand) -> anyhow::Result<()> { - let CliCommand { rpc_params, prometheus_params, node_rpc_url, shared_params, .. } = cmd; + let CliCommand { + rpc_params, + prometheus_params, + node_rpc_url, + cache_size, + database_url, + database_read_only, + shared_params, + .. + } = cmd; #[cfg(not(test))] init_logger(&shared_params)?; @@ -110,19 +137,42 @@ pub fn run(cmd: CliCommand) -> anyhow::Result<()> { let tokio_runtime = sc_cli::build_runtime()?; let tokio_handle = tokio_runtime.handle(); - let signals = tokio_runtime.block_on(async { Signals::capture() })?; let mut task_manager = TaskManager::new(tokio_handle.clone(), prometheus_registry)?; let essential_spawn_handle = task_manager.spawn_essential_handle(); let gen_rpc_module = || { let signals = tokio_runtime.block_on(async { Signals::capture() })?; - let fut = Client::from_url(&node_rpc_url, &essential_spawn_handle).fuse(); + let fut = async { + let (api, rpc_client, rpc) = connect(&node_rpc_url).await?; + let block_provider: Arc = + Arc::new(BlockInfoProviderImpl::new(cache_size, api.clone(), rpc.clone())); + let receipt_provider: Arc = + if let Some(database_url) = database_url.as_ref() { + Arc::new(( + CacheReceiptProvider::default(), + DBReceiptProvider::new( + database_url, + database_read_only, + block_provider.clone(), + ) + .await?, + )) + } else { + Arc::new(CacheReceiptProvider::default()) + }; + + let client = + Client::new(api, rpc_client, rpc, block_provider, receipt_provider).await?; + client.subscribe_and_cache_blocks(&essential_spawn_handle); + Ok::<_, crate::ClientError>(client) + } + .fuse(); pin_mut!(fut); match tokio_handle.block_on(signals.try_until_signal(fut)) { Ok(Ok(client)) => rpc_module(is_dev, client), Ok(Err(err)) => { - log::error!("Error connecting to the node at {node_rpc_url}: {err}"); + log::error!("Error initializing: {err:?}"); Err(sc_service::Error::Application(err.into())) }, Err(_) => Err(sc_service::Error::Application("Client connection interrupted".into())), @@ -142,6 +192,7 @@ pub fn run(cmd: CliCommand) -> anyhow::Result<()> { start_rpc_servers(&rpc_config, prometheus_registry, tokio_handle, gen_rpc_module, None)?; task_manager.keep_alive(rpc_server_handle); + let signals = tokio_runtime.block_on(async { Signals::capture() })?; tokio_runtime.block_on(signals.run_until_signal(task_manager.future().fuse()))?; Ok(()) } diff --git a/substrate/frame/revive/rpc/src/client.rs b/substrate/frame/revive/rpc/src/client.rs index 901c15e9756b..cd0effe7faf2 100644 --- a/substrate/frame/revive/rpc/src/client.rs +++ b/substrate/frame/revive/rpc/src/client.rs @@ -17,30 +17,23 @@ //! The client connects to the source substrate chain //! and is used by the rpc server to query and send transactions to the substrate chain. use crate::{ - runtime::GAS_PRICE, + extract_receipts_from_block, + runtime::gas_from_fee, subxt_client::{ - revive::{calls::types::EthTransact, events::ContractEmitted}, - runtime_types::pallet_revive::storage::ContractInfo, + revive::calls::types::EthTransact, runtime_types::pallet_revive::storage::ContractInfo, }, - LOG_TARGET, + BlockInfoProvider, ReceiptProvider, TransactionInfo, LOG_TARGET, }; -use futures::{stream, StreamExt}; use jsonrpsee::types::{error::CALL_EXECUTION_FAILED_CODE, ErrorObjectOwned}; use pallet_revive::{ - create1, evm::{ - Block, BlockNumberOrTag, BlockNumberOrTagOrHash, Bytes256, GenericTransaction, Log, - ReceiptInfo, SyncingProgress, SyncingStatus, TransactionSigned, H160, H256, U256, + Block, BlockNumberOrTag, BlockNumberOrTagOrHash, GenericTransaction, ReceiptInfo, + SyncingProgress, SyncingStatus, TransactionSigned, H160, H256, U256, }, EthTransactError, EthTransactInfo, }; -use sp_core::keccak_256; use sp_weights::Weight; -use std::{ - collections::{HashMap, VecDeque}, - sync::Arc, - time::Duration, -}; +use std::{ops::ControlFlow, sync::Arc, time::Duration}; use subxt::{ backend::{ legacy::{rpc_methods::SystemHealth, LegacyRpcMethods}, @@ -54,11 +47,10 @@ use subxt::{ storage::Storage, Config, OnlineClient, }; -use subxt_client::transaction_payment::events::TransactionFeePaid; use thiserror::Error; -use tokio::sync::{watch::Sender, RwLock}; +use tokio::{sync::RwLock, try_join}; -use crate::subxt_client::{self, system::events::ExtrinsicSuccess, SrcChainConfig}; +use crate::subxt_client::{self, SrcChainConfig}; /// The substrate block type. pub type SubstrateBlock = subxt::blocks::Block>; @@ -75,29 +67,6 @@ pub type Shared = Arc>; /// The runtime balance type. pub type Balance = u128; -/// The cache maintains a buffer of the last N blocks, -#[derive(Default)] -struct BlockCache { - /// A double-ended queue of the last N blocks. - /// The most recent block is at the back of the queue, and the oldest block is at the front. - buffer: VecDeque>, - - /// A map of blocks by block number. - blocks_by_number: HashMap>, - - /// A map of blocks by block hash. - blocks_by_hash: HashMap>, - - /// A map of receipts by hash. - receipts_by_hash: HashMap, - - /// A map of Signed transaction by hash. - signed_tx_by_hash: HashMap, - - /// A map of receipt hashes by block hash. - tx_hashes_by_block_and_index: HashMap>, -} - /// Unwrap the original `jsonrpsee::core::client::Error::Call` error. fn unwrap_call_err(err: &subxt::error::RpcError) -> Option { use subxt::backend::rpc::reconnecting_rpc_client; @@ -167,6 +136,9 @@ pub enum ClientError { /// A [`RpcError`] wrapper error. #[error(transparent)] RpcError(#[from] RpcError), + /// A [`sqlx::Error`] wrapper error. + #[error(transparent)] + SqlxError(#[from] sqlx::Error), /// A [`codec::Error`] wrapper error. #[error(transparent)] CodecError(#[from] codec::Error), @@ -179,9 +151,18 @@ pub enum ClientError { /// The block hash was not found. #[error("hash not found")] BlockNotFound, + + #[error("No Ethereum extrinsic found")] + EthExtrinsicNotFound, /// The transaction fee could not be found #[error("transactionFeePaid event not found")] TxFeeNotFound, + /// Failed to decode a raw payload into a signed transaction. + #[error("Failed to decode a raw payload into a signed transaction")] + TxDecodingFailed, + /// Failed to recover eth address. + #[error("failed to recover eth address")] + RecoverEthAddressFailed, /// The cache is empty. #[error("cache is empty")] CacheEmpty, @@ -214,163 +195,18 @@ impl From for ErrorObjectOwned { } } -/// The number of recent blocks maintained by the cache. -/// For each block in the cache, we also store the EVM transaction receipts. -pub const CACHE_SIZE: usize = 256; - -impl BlockCache { - fn latest_block(&self) -> Option<&Arc> { - self.buffer.back() - } - - /// Insert an entry into the cache, and prune the oldest entry if the cache is full. - fn insert(&mut self, block: SubstrateBlock) { - if self.buffer.len() >= N { - if let Some(block) = self.buffer.pop_front() { - log::trace!(target: LOG_TARGET, "Pruning block: {}", block.number()); - let hash = block.hash(); - self.blocks_by_hash.remove(&hash); - self.blocks_by_number.remove(&block.number()); - if let Some(entries) = self.tx_hashes_by_block_and_index.remove(&hash) { - for hash in entries.values() { - self.receipts_by_hash.remove(hash); - } - } - } - } - - let block = Arc::new(block); - self.buffer.push_back(block.clone()); - self.blocks_by_number.insert(block.number(), block.clone()); - self.blocks_by_hash.insert(block.hash(), block); - } -} - /// A client connect to a node and maintains a cache of the last `CACHE_SIZE` blocks. #[derive(Clone)] pub struct Client { - /// The inner state of the client. - inner: Arc, - /// A watch channel to signal cache updates. - pub updates: tokio::sync::watch::Receiver<()>, -} - -/// The inner state of the client. -struct ClientInner { api: OnlineClient, rpc_client: ReconnectingRpcClient, rpc: LegacyRpcMethods, - cache: Shared>, + receipt_provider: Arc, + block_provider: Arc, chain_id: u64, max_block_weight: Weight, } -impl ClientInner { - /// Create a new client instance connecting to the substrate node at the given URL. - async fn from_url(url: &str) -> Result { - let rpc_client = ReconnectingRpcClient::builder() - .retry_policy(ExponentialBackoff::from_millis(100).max_delay(Duration::from_secs(10))) - .build(url.to_string()) - .await?; - - let api = OnlineClient::::from_rpc_client(rpc_client.clone()).await?; - let cache = Arc::new(RwLock::new(BlockCache::::default())); - - let rpc = LegacyRpcMethods::::new(RpcClient::new(rpc_client.clone())); - - let (chain_id, max_block_weight) = - tokio::try_join!(chain_id(&api), max_block_weight(&api))?; - - Ok(Self { api, rpc_client, rpc, cache, chain_id, max_block_weight }) - } - - /// Get the receipt infos from the extrinsics in a block. - async fn receipt_infos( - &self, - block: &SubstrateBlock, - ) -> Result, ClientError> { - // Get extrinsics from the block - let extrinsics = block.extrinsics().await?; - - // Filter extrinsics from pallet_revive - let extrinsics = extrinsics.iter().flat_map(|ext| { - let call = ext.as_extrinsic::().ok()??; - let transaction_hash = H256(keccak_256(&call.payload)); - let signed_tx = TransactionSigned::decode(&call.payload).ok()?; - let from = signed_tx.recover_eth_address().ok()?; - let tx_info = GenericTransaction::from_signed(signed_tx.clone(), Some(from)); - let contract_address = if tx_info.to.is_none() { - Some(create1(&from, tx_info.nonce.unwrap_or_default().try_into().ok()?)) - } else { - None - }; - - Some((from, signed_tx, tx_info, transaction_hash, contract_address, ext)) - }); - - // Map each extrinsic to a receipt - stream::iter(extrinsics) - .map(|(from, signed_tx, tx_info, transaction_hash, contract_address, ext)| async move { - let events = ext.events().await?; - let tx_fees = - events.find_first::()?.ok_or(ClientError::TxFeeNotFound)?; - - let gas_price = tx_info.gas_price.unwrap_or_default(); - let gas_used = (tx_fees.tip.saturating_add(tx_fees.actual_fee)) - .checked_div(gas_price.as_u128()) - .unwrap_or_default(); - - let success = events.has::()?; - let transaction_index = ext.index(); - let block_hash = block.hash(); - let block_number = block.number().into(); - - // get logs from ContractEmitted event - let logs = events.iter() - .filter_map(|event_details| { - let event_details = event_details.ok()?; - let event = event_details.as_event::().ok()??; - - Some(Log { - address: event.contract, - topics: event.topics, - data: Some(event.data.into()), - block_number: Some(block_number), - transaction_hash, - transaction_index: Some(transaction_index.into()), - block_hash: Some(block_hash), - log_index: Some(event_details.index().into()), - ..Default::default() - }) - }).collect(); - - - log::debug!(target: LOG_TARGET, "Adding receipt for tx hash: {transaction_hash:?} - block: {block_number:?}"); - let receipt = ReceiptInfo::new( - block_hash, - block_number, - contract_address, - from, - logs, - tx_info.to, - gas_price, - gas_used.into(), - success, - transaction_hash, - transaction_index.into(), - tx_info.r#type.unwrap_or_default() - ); - - Ok::<_, ClientError>((receipt.transaction_hash, (signed_tx, receipt))) - }) - .buffer_unordered(10) - .collect::>>() - .await - .into_iter() - .collect::, _>>() - } -} - /// Fetch the chain ID from the substrate chain. async fn chain_id(api: &OnlineClient) -> Result { let query = subxt_client::constants().revive().chain_id(); @@ -395,23 +231,181 @@ async fn extract_block_timestamp(block: &SubstrateBlock) -> Option { Some(ext.value.now / 1000) } +/// Connect to a node at the given URL, and return the underlying API, RPC client, and legacy RPC +/// clients. +pub async fn connect( + node_rpc_url: &str, +) -> Result< + (OnlineClient, ReconnectingRpcClient, LegacyRpcMethods), + ClientError, +> { + log::info!(target: LOG_TARGET, "Connecting to node at: {node_rpc_url} ..."); + let rpc_client = ReconnectingRpcClient::builder() + .retry_policy(ExponentialBackoff::from_millis(100).max_delay(Duration::from_secs(10))) + .build(node_rpc_url.to_string()) + .await?; + log::info!(target: LOG_TARGET, "Connected to node at: {node_rpc_url}"); + + let api = OnlineClient::::from_rpc_client(rpc_client.clone()).await?; + let rpc = LegacyRpcMethods::::new(RpcClient::new(rpc_client.clone())); + Ok((api, rpc_client, rpc)) +} + impl Client { /// Create a new client instance. - /// The client will subscribe to new blocks and maintain a cache of [`CACHE_SIZE`] blocks. - pub async fn from_url( - url: &str, - spawn_handle: &sc_service::SpawnEssentialTaskHandle, + pub async fn new( + api: OnlineClient, + rpc_client: ReconnectingRpcClient, + rpc: LegacyRpcMethods, + block_provider: Arc, + receipt_provider: Arc, ) -> Result { - log::info!(target: LOG_TARGET, "Connecting to node at: {url} ..."); - let inner: Arc = Arc::new(ClientInner::from_url(url).await?); - log::info!(target: LOG_TARGET, "Connected to node at: {url}"); + let (chain_id, max_block_weight) = + tokio::try_join!(chain_id(&api), max_block_weight(&api))?; - let (tx, mut updates) = tokio::sync::watch::channel(()); + Ok(Self { + api, + rpc_client, + rpc, + receipt_provider, + block_provider, + chain_id, + max_block_weight, + }) + } - spawn_handle.spawn("subscribe-blocks", None, Self::subscribe_blocks(inner.clone(), tx)); + /// Subscribe to past blocks executing the callback for each block. + /// The subscription continues iterating past blocks until the closure returns + /// `ControlFlow::Break`. Blocks are iterated starting from the latest block and moving + /// backward. + #[allow(dead_code)] + async fn subscribe_past_blocks(&self, callback: F) -> Result<(), ClientError> + where + F: Fn(SubstrateBlock) -> Fut + Send + Sync, + Fut: std::future::Future, ClientError>> + Send, + { + log::info!(target: LOG_TARGET, "Subscribing to past blocks"); + let mut block = self.api.blocks().at_latest().await.inspect_err(|err| { + log::error!(target: LOG_TARGET, "Failed to fetch latest block: {err:?}"); + })?; + + loop { + let block_number = block.number(); + log::debug!(target: LOG_TARGET, "Processing block {block_number}"); + + let parent_hash = block.header().parent_hash; + let control_flow = callback(block).await.inspect_err(|err| { + log::error!(target: LOG_TARGET, "Failed to process block {block_number}: {err:?}"); + })?; + + match control_flow { + ControlFlow::Continue(_) => { + if block_number == 0 { + log::info!(target: LOG_TARGET, "All past blocks processed"); + return Ok(()); + } + block = self.api.blocks().at(parent_hash).await.inspect_err(|err| { + log::error!(target: LOG_TARGET, "Failed to fetch block at {parent_hash:?}: {err:?}"); + })?; + }, + ControlFlow::Break(_) => { + log::info!(target: LOG_TARGET, "Stopping past block subscription at {block_number}"); + return Ok(()); + }, + } + } + } + + /// Subscribe to new best blocks, and execute the async closure with + /// the extracted block and ethereum transactions + async fn subscribe_new_blocks(&self, callback: F) -> Result<(), ClientError> + where + F: Fn(SubstrateBlock) -> Fut + Send + Sync, + Fut: std::future::Future> + Send, + { + log::info!(target: LOG_TARGET, "Subscribing to new blocks"); + let mut block_stream = match self.api.blocks().subscribe_best().await { + Ok(s) => s, + Err(err) => { + log::error!(target: LOG_TARGET, "Failed to subscribe to blocks: {err:?}"); + return Err(err.into()); + }, + }; + + while let Some(block) = block_stream.next().await { + let block = match block { + Ok(block) => block, + Err(err) => { + if err.is_disconnected_will_reconnect() { + log::warn!( + target: LOG_TARGET, + "The RPC connection was lost and we may have missed a few blocks" + ); + continue; + } + + log::error!(target: LOG_TARGET, "Failed to fetch block: {err:?}"); + return Err(err.into()); + }, + }; + + log::debug!(target: LOG_TARGET, "Pushing block: {}", block.number()); + callback(block).await?; + } - updates.changed().await.expect("tx is not dropped"); - Ok(Self { inner, updates }) + log::info!(target: LOG_TARGET, "Block subscription ended"); + Ok(()) + } + + /// Start the block subscription, and populate the block cache. + pub fn subscribe_and_cache_blocks(&self, spawn_handle: &sc_service::SpawnEssentialTaskHandle) { + let client = self.clone(); + spawn_handle.spawn("subscribe-blocks", None, async move { + let res = client + .subscribe_new_blocks(|block| async { + let receipts = extract_receipts_from_block(&block).await?; + + client.receipt_provider.insert(&block.hash(), &receipts).await; + if let Some(pruned) = client.block_provider.cache_block(block).await { + client.receipt_provider.remove(&pruned).await; + } + + Ok(()) + }) + .await; + + if let Err(err) = res { + log::error!(target: LOG_TARGET, "Block subscription error: {err:?}"); + } + }); + } + + /// Start the block subscription, and populate the block cache. + pub async fn subscribe_and_cache_receipts( + &self, + oldest_block: Option, + ) -> Result<(), ClientError> { + let new_blocks_fut = self.subscribe_new_blocks(|block| async move { + let receipts = extract_receipts_from_block(&block).await.inspect_err(|err| { + log::error!(target: LOG_TARGET, "Failed to extract receipts from block: {err:?}"); + })?; + self.receipt_provider.insert(&block.hash(), &receipts).await; + Ok(()) + }); + + let Some(oldest_block) = oldest_block else { return new_blocks_fut.await }; + + let old_blocks_fut = self.subscribe_past_blocks(|block| async move { + let receipts = extract_receipts_from_block(&block).await?; + self.receipt_provider.insert(&block.hash(), &receipts).await; + if block.number() == oldest_block { + Ok(ControlFlow::Break(())) + } else { + Ok(ControlFlow::Continue(())) + } + }); + + try_join!(new_blocks_fut, old_blocks_fut).map(|_| ()) } /// Expose the storage API. @@ -425,14 +419,14 @@ impl Client { (*block_number).try_into().map_err(|_| ClientError::ConversionFailed)?; let hash = self.get_block_hash(n).await?.ok_or(ClientError::BlockNotFound)?; - Ok(self.inner.api.storage().at(hash)) + Ok(self.api.storage().at(hash)) }, - BlockNumberOrTagOrHash::H256(hash) => Ok(self.inner.api.storage().at(*hash)), + BlockNumberOrTagOrHash::H256(hash) => Ok(self.api.storage().at(*hash)), BlockNumberOrTagOrHash::BlockTag(_) => { if let Some(block) = self.latest_block().await { - return Ok(self.inner.api.storage().at(block.hash())); + return Ok(self.api.storage().at(block.hash())); } - let storage = self.inner.api.storage().at_latest().await?; + let storage = self.api.storage().at_latest().await?; Ok(storage) }, } @@ -452,90 +446,24 @@ impl Client { (*block_number).try_into().map_err(|_| ClientError::ConversionFailed)?; let hash = self.get_block_hash(n).await?.ok_or(ClientError::BlockNotFound)?; - Ok(self.inner.api.runtime_api().at(hash)) + Ok(self.api.runtime_api().at(hash)) }, - BlockNumberOrTagOrHash::H256(hash) => Ok(self.inner.api.runtime_api().at(*hash)), + BlockNumberOrTagOrHash::H256(hash) => Ok(self.api.runtime_api().at(*hash)), BlockNumberOrTagOrHash::BlockTag(_) => { if let Some(block) = self.latest_block().await { - return Ok(self.inner.api.runtime_api().at(block.hash())); + return Ok(self.api.runtime_api().at(block.hash())); } - let api = self.inner.api.runtime_api().at_latest().await?; + let api = self.api.runtime_api().at_latest().await?; Ok(api) }, } } - /// Subscribe to new blocks and update the cache. - async fn subscribe_blocks(inner: Arc, tx: Sender<()>) { - log::info!(target: LOG_TARGET, "Subscribing to new blocks"); - let mut block_stream = match inner.as_ref().api.blocks().subscribe_best().await { - Ok(s) => s, - Err(err) => { - log::error!(target: LOG_TARGET, "Failed to subscribe to blocks: {err:?}"); - return; - }, - }; - - while let Some(block) = block_stream.next().await { - let block = match block { - Ok(block) => block, - Err(err) => { - if err.is_disconnected_will_reconnect() { - log::warn!( - target: LOG_TARGET, - "The RPC connection was lost and we may have missed a few blocks" - ); - continue; - } - - log::error!(target: LOG_TARGET, "Failed to fetch block: {err:?}"); - return; - }, - }; - - log::trace!(target: LOG_TARGET, "Pushing block: {}", block.number()); - let mut cache = inner.cache.write().await; - - let receipts = inner - .receipt_infos(&block) - .await - .inspect_err(|err| { - log::error!(target: LOG_TARGET, "Failed to get receipts: {err:?}"); - }) - .unwrap_or_default(); - - if !receipts.is_empty() { - let values = receipts - .iter() - .map(|(hash, (_, receipt))| (receipt.transaction_index, *hash)) - .collect::>(); - - cache.tx_hashes_by_block_and_index.insert(block.hash(), values); - - cache - .receipts_by_hash - .extend(receipts.iter().map(|(hash, (_, receipt))| (*hash, receipt.clone()))); - - cache.signed_tx_by_hash.extend( - receipts.iter().map(|(hash, (signed_tx, _))| (*hash, signed_tx.clone())), - ) - } - - cache.insert(block); - tx.send_replace(()); - } - - log::info!(target: LOG_TARGET, "Block subscription ended"); - } -} - -impl Client { /// Get the most recent block stored in the cache. pub async fn latest_block(&self) -> Option> { - let cache = self.inner.cache.read().await; - let block = cache.latest_block()?; - Some(block.clone()) + let block = self.block_provider.latest_block().await?; + Some(block) } /// Expose the transaction API. @@ -543,23 +471,22 @@ impl Client { &self, call: subxt::tx::DefaultPayload, ) -> Result { - let ext = self.inner.api.tx().create_unsigned(&call).map_err(ClientError::from)?; + let ext = self.api.tx().create_unsigned(&call).map_err(ClientError::from)?; let hash = ext.submit().await?; Ok(hash) } /// Get an EVM transaction receipt by hash. pub async fn receipt(&self, tx_hash: &H256) -> Option { - let cache = self.inner.cache.read().await; - cache.receipts_by_hash.get(tx_hash).cloned() + self.receipt_provider.receipt_by_hash(tx_hash).await } /// Get the syncing status of the chain. pub async fn syncing(&self) -> Result { - let health = self.inner.rpc.system_health().await?; + let health = self.rpc.system_health().await?; let status = if health.is_syncing { - let client = RpcClient::new(self.inner.rpc_client.clone()); + let client = RpcClient::new(self.rpc_client.clone()); let sync_state: sc_rpc::system::SyncState = client.request("system_syncState", Default::default()).await?; @@ -582,27 +509,23 @@ impl Client { block_hash: &H256, transaction_index: &U256, ) -> Option { - let cache = self.inner.cache.read().await; - let receipt_hash = - cache.tx_hashes_by_block_and_index.get(block_hash)?.get(transaction_index)?; - let receipt = cache.receipts_by_hash.get(receipt_hash)?; - Some(receipt.clone()) + self.receipt_provider + .receipt_by_block_hash_and_index(block_hash, transaction_index) + .await } pub async fn signed_tx_by_hash(&self, tx_hash: &H256) -> Option { - let cache = self.inner.cache.read().await; - cache.signed_tx_by_hash.get(tx_hash).cloned() + self.receipt_provider.signed_tx_by_hash(tx_hash).await } /// Get receipts count per block. pub async fn receipts_count_per_block(&self, block_hash: &SubstrateBlockHash) -> Option { - let cache = self.inner.cache.read().await; - cache.tx_hashes_by_block_and_index.get(block_hash).map(|v| v.len()) + self.receipt_provider.receipts_count_per_block(block_hash).await } /// Get the system health. pub async fn system_health(&self) -> Result { - let health = self.inner.rpc.system_health().await?; + let health = self.rpc.system_health().await?; Ok(health) } @@ -697,8 +620,8 @@ impl Client { /// Get the block number of the latest block. pub async fn block_number(&self) -> Result { - let cache = self.inner.cache.read().await; - let latest_block = cache.buffer.back().ok_or(ClientError::CacheEmpty)?; + let latest_block = + self.block_provider.latest_block().await.ok_or(ClientError::CacheEmpty)?; Ok(latest_block.number()) } @@ -707,13 +630,8 @@ impl Client { &self, block_number: SubstrateBlockNumber, ) -> Result, ClientError> { - let cache = self.inner.cache.read().await; - if let Some(block) = cache.blocks_by_number.get(&block_number) { - return Ok(Some(block.hash())); - } - - let hash = self.inner.rpc.chain_get_block_hash(Some(block_number.into())).await?; - Ok(hash) + let maybe_block = self.block_provider.block_by_number(block_number).await?; + Ok(maybe_block.map(|block| block.hash())) } /// Get a block for the specified hash or number. @@ -727,8 +645,8 @@ impl Client { self.block_by_number(n).await }, BlockNumberOrTag::BlockTag(_) => { - let cache = self.inner.cache.read().await; - Ok(cache.buffer.back().cloned()) + let block = self.block_provider.latest_block().await; + Ok(block) }, } } @@ -738,16 +656,7 @@ impl Client { &self, hash: &SubstrateBlockHash, ) -> Result>, ClientError> { - let cache = self.inner.cache.read().await; - if let Some(block) = cache.blocks_by_hash.get(hash) { - return Ok(Some(block.clone())); - } - - match self.inner.api.blocks().at(*hash).await { - Ok(block) => Ok(Some(Arc::new(block))), - Err(subxt::Error::Block(subxt::error::BlockError::NotFound(_))) => Ok(None), - Err(err) => Err(err.into()), - } + self.block_provider.block_by_hash(hash).await } /// Get a block by number @@ -755,23 +664,18 @@ impl Client { &self, block_number: SubstrateBlockNumber, ) -> Result>, ClientError> { - let cache = self.inner.cache.read().await; - if let Some(block) = cache.blocks_by_number.get(&block_number) { - return Ok(Some(block.clone())); - } - - let Some(hash) = self.get_block_hash(block_number).await? else { - return Ok(None); - }; - - self.block_by_hash(&hash).await + self.block_provider.block_by_number(block_number).await } /// Get the EVM block for the given hash. - pub async fn evm_block(&self, block: Arc) -> Result { - let runtime_api = self.inner.api.runtime_api().at(block.hash()); + pub async fn evm_block( + &self, + block: Arc, + hydrated_transactions: bool, + ) -> Result { + let runtime_api = self.api.runtime_api().at(block.hash()); let max_fee = Self::weight_to_fee(&runtime_api, self.max_block_weight()).await?; - let gas_limit = U256::from(max_fee / GAS_PRICE as u128); + let gas_limit = gas_from_fee(max_fee); let header = block.header(); let timestamp = extract_block_timestamp(&block).await.unwrap_or_default(); @@ -781,6 +685,23 @@ impl Client { let state_root = header.state_root.0.into(); let extrinsics_root = header.extrinsics_root.0.into(); + let receipts = extract_receipts_from_block(&block).await?; + let gas_used = + receipts.iter().fold(U256::zero(), |acc, (_, receipt)| acc + receipt.gas_used); + let transactions = if hydrated_transactions { + receipts + .into_iter() + .map(|(signed_tx, receipt)| TransactionInfo::new(receipt, signed_tx)) + .collect::>() + .into() + } else { + receipts + .into_iter() + .map(|(_, receipt)| receipt.transaction_hash) + .collect::>() + .into() + }; + Ok(Block { hash: block.hash(), parent_hash, @@ -789,9 +710,11 @@ impl Client { number: header.number.into(), timestamp: timestamp.into(), difficulty: Some(0u32.into()), + base_fee_per_gas: Some(crate::GAS_PRICE.into()), gas_limit, - logs_bloom: Bytes256([0u8; 256]), + gas_used, receipts_root: extrinsics_root, + transactions, ..Default::default() }) } @@ -811,11 +734,11 @@ impl Client { /// Get the chain ID. pub fn chain_id(&self) -> u64 { - self.inner.chain_id + self.chain_id } /// Get the Max Block Weight. pub fn max_block_weight(&self) -> Weight { - self.inner.max_block_weight + self.max_block_weight } } diff --git a/substrate/frame/revive/rpc/src/eth-indexer.rs b/substrate/frame/revive/rpc/src/eth-indexer.rs new file mode 100644 index 000000000000..3e7f6b6fa91b --- /dev/null +++ b/substrate/frame/revive/rpc/src/eth-indexer.rs @@ -0,0 +1,88 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//! The Ethereum JSON-RPC server. +use clap::Parser; +use pallet_revive_eth_rpc::{ + client::{connect, Client, SubstrateBlockNumber}, + BlockInfoProvider, BlockInfoProviderImpl, DBReceiptProvider, ReceiptProvider, +}; +use sc_cli::SharedParams; +use std::sync::Arc; + +// Parsed command instructions from the command line +#[derive(Parser, Debug)] +#[clap(author, about, version)] +pub struct CliCommand { + /// The node url to connect to + #[clap(long, default_value = "ws://127.0.0.1:9944")] + pub node_rpc_url: String, + + /// Specifies the block number to start indexing from, going backwards from the current block. + /// If not provided, only new blocks will be indexed + #[clap(long)] + pub oldest_block: Option, + + /// The database used to store Ethereum transaction hashes. + #[clap(long)] + pub database_url: String, + + #[allow(missing_docs)] + #[clap(flatten)] + pub shared_params: SharedParams, +} + +/// Initialize the logger +#[cfg(not(test))] +fn init_logger(params: &SharedParams) -> anyhow::Result<()> { + let mut logger = sc_cli::LoggerBuilder::new(params.log_filters().join(",")); + logger + .with_log_reloading(params.enable_log_reloading) + .with_detailed_output(params.detailed_log_output); + + if let Some(tracing_targets) = ¶ms.tracing_targets { + let tracing_receiver = params.tracing_receiver.into(); + logger.with_profiling(tracing_receiver, tracing_targets); + } + + if params.disable_log_color { + logger.with_colors(false); + } + + logger.init()?; + Ok(()) +} + +#[tokio::main] +pub async fn main() -> anyhow::Result<()> { + let CliCommand { + node_rpc_url, database_url, shared_params: _shared_params, oldest_block, .. + } = CliCommand::parse(); + + #[cfg(not(test))] + init_logger(&_shared_params)?; + + let (api, rpc_client, rpc) = connect(&node_rpc_url).await?; + let block_provider: Arc = + Arc::new(BlockInfoProviderImpl::new(0, api.clone(), rpc.clone())); + let receipt_provider: Arc = + Arc::new(DBReceiptProvider::new(&database_url, false, block_provider.clone()).await?); + + let client = Client::new(api, rpc_client, rpc, block_provider, receipt_provider).await?; + client.subscribe_and_cache_receipts(oldest_block).await?; + + Ok(()) +} diff --git a/substrate/frame/revive/rpc/src/lib.rs b/substrate/frame/revive/rpc/src/lib.rs index ccd8bb043e90..5e1341e2a29a 100644 --- a/substrate/frame/revive/rpc/src/lib.rs +++ b/substrate/frame/revive/rpc/src/lib.rs @@ -24,6 +24,7 @@ use jsonrpsee::{ types::{ErrorCode, ErrorObjectOwned}, }; use pallet_revive::evm::*; +use sp_arithmetic::Permill; use sp_core::{keccak_256, H160, H256, U256}; use thiserror::Error; @@ -35,6 +36,12 @@ pub mod subxt_client; #[cfg(test)] mod tests; +mod block_info_provider; +pub use block_info_provider::*; + +mod receipt_provider; +pub use receipt_provider::*; + mod rpc_health; pub use rpc_health::*; @@ -121,7 +128,12 @@ impl EthRpcServer for EthRpcServerImpl { transaction_hash: H256, ) -> RpcResult> { let receipt = self.client.receipt(&transaction_hash).await; - log::debug!(target: LOG_TARGET, "transaction_receipt for {transaction_hash:?}: {}", receipt.is_some()); + log::debug!( + target: LOG_TARGET, + "transaction_receipt for {transaction_hash:?}: received: {received} - success: {success:?}", + received = receipt.is_some(), + success = receipt.as_ref().map(|r| r.status == Some(U256::one())) + ); Ok(receipt) } @@ -148,31 +160,12 @@ impl EthRpcServer for EthRpcServerImpl { async fn send_raw_transaction(&self, transaction: Bytes) -> RpcResult { let hash = H256(keccak_256(&transaction.0)); - - let tx = TransactionSigned::decode(&transaction.0).map_err(|err| { - log::debug!(target: LOG_TARGET, "Failed to decode transaction: {err:?}"); - EthRpcError::from(err) - })?; - - let eth_addr = tx.recover_eth_address().map_err(|err| { - log::debug!(target: LOG_TARGET, "Failed to recover eth address: {err:?}"); - EthRpcError::InvalidSignature - })?; - - let tx = GenericTransaction::from_signed(tx, Some(eth_addr)); - - // Dry run the transaction to get the weight limit and storage deposit limit - let dry_run = self.client.dry_run(tx, BlockTag::Latest.into()).await?; - - let call = subxt_client::tx().revive().eth_transact( - transaction.0, - dry_run.gas_required.into(), - dry_run.storage_deposit, - ); + let call = subxt_client::tx().revive().eth_transact(transaction.0); self.client.submit(call).await.map_err(|err| { log::debug!(target: LOG_TARGET, "submit call failed: {err:?}"); err })?; + log::debug!(target: LOG_TARGET, "send_raw_transaction hash: {hash:?}"); Ok(hash) } @@ -216,12 +209,12 @@ impl EthRpcServer for EthRpcServerImpl { async fn get_block_by_hash( &self, block_hash: H256, - _hydrated_transactions: bool, + hydrated_transactions: bool, ) -> RpcResult> { let Some(block) = self.client.block_by_hash(&block_hash).await? else { return Ok(None); }; - let block = self.client.evm_block(block).await?; + let block = self.client.evm_block(block, hydrated_transactions).await?; Ok(Some(block)) } @@ -239,6 +232,11 @@ impl EthRpcServer for EthRpcServerImpl { Ok(U256::from(GAS_PRICE)) } + async fn max_priority_fee_per_gas(&self) -> RpcResult { + // TODO: Provide better estimation + Ok(U256::from(Permill::from_percent(20).mul_ceil(GAS_PRICE))) + } + async fn get_code(&self, address: H160, block: BlockNumberOrTagOrHash) -> RpcResult { let code = self.client.get_contract_code(&address, block).await?; Ok(code.into()) @@ -251,12 +249,12 @@ impl EthRpcServer for EthRpcServerImpl { async fn get_block_by_number( &self, block: BlockNumberOrTag, - _hydrated_transactions: bool, + hydrated_transactions: bool, ) -> RpcResult> { let Some(block) = self.client.block_by_number_or_tag(&block).await? else { return Ok(None); }; - let block = self.client.evm_block(block).await?; + let block = self.client.evm_block(block, hydrated_transactions).await?; Ok(Some(block)) } diff --git a/substrate/frame/revive/rpc/src/receipt_provider.rs b/substrate/frame/revive/rpc/src/receipt_provider.rs new file mode 100644 index 000000000000..5c102b3d3d41 --- /dev/null +++ b/substrate/frame/revive/rpc/src/receipt_provider.rs @@ -0,0 +1,240 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::{ + client::SubstrateBlock, + subxt_client::{ + revive::{calls::types::EthTransact, events::ContractEmitted}, + system::events::ExtrinsicSuccess, + transaction_payment::events::TransactionFeePaid, + SrcChainConfig, + }, + ClientError, LOG_TARGET, +}; +use futures::{stream, StreamExt}; +use jsonrpsee::core::async_trait; +use pallet_revive::{ + create1, + evm::{GenericTransaction, Log, ReceiptInfo, TransactionSigned, H256, U256}, +}; +use sp_core::keccak_256; +use tokio::join; + +mod cache; +pub use cache::CacheReceiptProvider; + +mod db; +pub use db::DBReceiptProvider; + +/// Provide means to store and retrieve receipts. +#[async_trait] +pub trait ReceiptProvider: Send + Sync { + /// Insert receipts into the provider. + async fn insert(&self, block_hash: &H256, receipts: &[(TransactionSigned, ReceiptInfo)]); + + /// Remove receipts with the given block hash. + async fn remove(&self, block_hash: &H256); + + /// Get the receipt for the given block hash and transaction index. + async fn receipt_by_block_hash_and_index( + &self, + block_hash: &H256, + transaction_index: &U256, + ) -> Option; + + /// Get the number of receipts per block. + async fn receipts_count_per_block(&self, block_hash: &H256) -> Option; + + /// Get the receipt for the given transaction hash. + async fn receipt_by_hash(&self, transaction_hash: &H256) -> Option; + + /// Get the signed transaction for the given transaction hash. + async fn signed_tx_by_hash(&self, transaction_hash: &H256) -> Option; +} + +#[async_trait] +impl ReceiptProvider for (Main, Fallback) { + async fn insert(&self, block_hash: &H256, receipts: &[(TransactionSigned, ReceiptInfo)]) { + join!(self.0.insert(block_hash, receipts), self.1.insert(block_hash, receipts)); + } + + async fn remove(&self, block_hash: &H256) { + join!(self.0.remove(block_hash), self.1.remove(block_hash)); + } + + async fn receipt_by_block_hash_and_index( + &self, + block_hash: &H256, + transaction_index: &U256, + ) -> Option { + if let Some(receipt) = + self.0.receipt_by_block_hash_and_index(block_hash, transaction_index).await + { + return Some(receipt); + } + + self.1.receipt_by_block_hash_and_index(block_hash, transaction_index).await + } + + async fn receipts_count_per_block(&self, block_hash: &H256) -> Option { + if let Some(count) = self.0.receipts_count_per_block(block_hash).await { + return Some(count); + } + self.1.receipts_count_per_block(block_hash).await + } + + async fn receipt_by_hash(&self, hash: &H256) -> Option { + if let Some(receipt) = self.0.receipt_by_hash(hash).await { + return Some(receipt); + } + self.1.receipt_by_hash(hash).await + } + + async fn signed_tx_by_hash(&self, hash: &H256) -> Option { + if let Some(tx) = self.0.signed_tx_by_hash(hash).await { + return Some(tx); + } + self.1.signed_tx_by_hash(hash).await + } +} + +/// Extract a [`TransactionSigned`] and a [`ReceiptInfo`] and from an extrinsic. +pub async fn extract_receipt_from_extrinsic( + block: &SubstrateBlock, + ext: subxt::blocks::ExtrinsicDetails>, + call: EthTransact, +) -> Result<(TransactionSigned, ReceiptInfo), ClientError> { + let transaction_index = ext.index(); + let block_number = U256::from(block.number()); + let block_hash = block.hash(); + let events = ext.events().await?; + + let success = events.has::().inspect_err(|err| { + log::debug!(target: LOG_TARGET, "Failed to lookup for ExtrinsicSuccess event in block {block_number}: {err:?}") + })?; + let tx_fees = events + .find_first::()? + .ok_or(ClientError::TxFeeNotFound) + .inspect_err( + |err| log::debug!(target: LOG_TARGET, "TransactionFeePaid not found in events for block {block_number}\n{err:?}") + )?; + let transaction_hash = H256(keccak_256(&call.payload)); + + let signed_tx = + TransactionSigned::decode(&call.payload).map_err(|_| ClientError::TxDecodingFailed)?; + let from = signed_tx.recover_eth_address().map_err(|_| { + log::error!(target: LOG_TARGET, "Failed to recover eth address from signed tx"); + ClientError::RecoverEthAddressFailed + })?; + + let tx_info = GenericTransaction::from_signed(signed_tx.clone(), Some(from)); + let gas_price = tx_info.gas_price.unwrap_or_default(); + let gas_used = (tx_fees.tip.saturating_add(tx_fees.actual_fee)) + .checked_div(gas_price.as_u128()) + .unwrap_or_default(); + + // get logs from ContractEmitted event + let logs = events + .iter() + .filter_map(|event_details| { + let event_details = event_details.ok()?; + let event = event_details.as_event::().ok()??; + + Some(Log { + address: event.contract, + topics: event.topics, + data: Some(event.data.into()), + block_number: Some(block_number), + transaction_hash, + transaction_index: Some(transaction_index.into()), + block_hash: Some(block_hash), + log_index: Some(event_details.index().into()), + ..Default::default() + }) + }) + .collect(); + + let contract_address = if tx_info.to.is_none() { + Some(create1( + &from, + tx_info + .nonce + .unwrap_or_default() + .try_into() + .map_err(|_| ClientError::ConversionFailed)?, + )) + } else { + None + }; + + log::debug!(target: LOG_TARGET, "Adding receipt for tx hash: {transaction_hash:?} - block: {block_number:?}"); + let receipt = ReceiptInfo::new( + block_hash, + block_number, + contract_address, + from, + logs, + tx_info.to, + gas_price, + gas_used.into(), + success, + transaction_hash, + transaction_index.into(), + tx_info.r#type.unwrap_or_default(), + ); + Ok((signed_tx, receipt)) +} + +/// Extract receipts from block. +pub async fn extract_receipts_from_block( + block: &SubstrateBlock, +) -> Result, ClientError> { + // Filter extrinsics from pallet_revive + let extrinsics = block.extrinsics().await.inspect_err(|err| { + log::debug!(target: LOG_TARGET, "Error fetching for #{:?} extrinsics: {err:?}", block.number()); + })?; + + let extrinsics = extrinsics.iter().flat_map(|ext| { + let call = ext.as_extrinsic::().ok()??; + Some((ext, call)) + }); + + stream::iter(extrinsics) + .map(|(ext, call)| async move { extract_receipt_from_extrinsic(block, ext, call).await }) + .buffer_unordered(10) + .collect::>>() + .await + .into_iter() + .collect::, _>>() +} + +/// Extract receipt from transaction +pub async fn extract_receipts_from_transaction( + block: &SubstrateBlock, + transaction_index: usize, +) -> Result<(TransactionSigned, ReceiptInfo), ClientError> { + let extrinsics = block.extrinsics().await?; + let ext = extrinsics + .iter() + .nth(transaction_index) + .ok_or(ClientError::EthExtrinsicNotFound)?; + + let call = ext + .as_extrinsic::()? + .ok_or_else(|| ClientError::EthExtrinsicNotFound)?; + extract_receipt_from_extrinsic(block, ext, call).await +} diff --git a/substrate/frame/revive/rpc/src/receipt_provider/cache.rs b/substrate/frame/revive/rpc/src/receipt_provider/cache.rs new file mode 100644 index 000000000000..39124929ec07 --- /dev/null +++ b/substrate/frame/revive/rpc/src/receipt_provider/cache.rs @@ -0,0 +1,148 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +use super::ReceiptProvider; +use jsonrpsee::core::async_trait; +use pallet_revive::evm::{ReceiptInfo, TransactionSigned, H256, U256}; +use std::{collections::HashMap, sync::Arc}; +use tokio::sync::RwLock; + +/// A `[ReceiptProvider]` that caches receipts in memory. +#[derive(Clone, Default)] +pub struct CacheReceiptProvider { + cache: Arc>, +} + +impl CacheReceiptProvider { + /// Get a read access on the shared cache. + async fn cache(&self) -> tokio::sync::RwLockReadGuard<'_, ReceiptCache> { + self.cache.read().await + } +} + +#[async_trait] +impl ReceiptProvider for CacheReceiptProvider { + async fn insert(&self, block_hash: &H256, receipts: &[(TransactionSigned, ReceiptInfo)]) { + let mut cache = self.cache.write().await; + cache.insert(block_hash, receipts); + } + + async fn remove(&self, block_hash: &H256) { + let mut cache = self.cache.write().await; + cache.remove(block_hash); + } + + async fn receipt_by_block_hash_and_index( + &self, + block_hash: &H256, + transaction_index: &U256, + ) -> Option { + let cache = self.cache().await; + let receipt_hash = cache + .transaction_hashes_by_block_and_index + .get(block_hash)? + .get(transaction_index)?; + let receipt = cache.receipts_by_hash.get(receipt_hash)?; + Some(receipt.clone()) + } + + async fn receipts_count_per_block(&self, block_hash: &H256) -> Option { + let cache = self.cache().await; + cache.transaction_hashes_by_block_and_index.get(block_hash).map(|v| v.len()) + } + + async fn receipt_by_hash(&self, hash: &H256) -> Option { + let cache = self.cache().await; + cache.receipts_by_hash.get(hash).cloned() + } + + async fn signed_tx_by_hash(&self, hash: &H256) -> Option { + let cache = self.cache().await; + cache.signed_tx_by_hash.get(hash).cloned() + } +} + +#[derive(Default)] +struct ReceiptCache { + /// A map of receipts by transaction hash. + receipts_by_hash: HashMap, + + /// A map of Signed transaction by transaction hash. + signed_tx_by_hash: HashMap, + + /// A map of receipt hashes by block hash. + transaction_hashes_by_block_and_index: HashMap>, +} + +impl ReceiptCache { + /// Insert new receipts into the cache. + pub fn insert(&mut self, block_hash: &H256, receipts: &[(TransactionSigned, ReceiptInfo)]) { + if !receipts.is_empty() { + let values = receipts + .iter() + .map(|(_, receipt)| (receipt.transaction_index, receipt.transaction_hash)) + .collect::>(); + + self.transaction_hashes_by_block_and_index.insert(*block_hash, values); + + self.receipts_by_hash.extend( + receipts.iter().map(|(_, receipt)| (receipt.transaction_hash, receipt.clone())), + ); + + self.signed_tx_by_hash.extend( + receipts + .iter() + .map(|(signed_tx, receipt)| (receipt.transaction_hash, signed_tx.clone())), + ) + } + } + + /// Remove entry from the cache. + pub fn remove(&mut self, hash: &H256) { + if let Some(entries) = self.transaction_hashes_by_block_and_index.remove(hash) { + for hash in entries.values() { + self.receipts_by_hash.remove(hash); + self.signed_tx_by_hash.remove(hash); + } + } + } +} + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn cache_insert_and_remove_works() { + let mut cache = ReceiptCache::default(); + + for i in 1u8..=3 { + let hash = H256::from([i; 32]); + cache.insert( + &hash, + &[( + TransactionSigned::default(), + ReceiptInfo { transaction_hash: hash, ..Default::default() }, + )], + ); + } + + cache.remove(&H256::from([1u8; 32])); + assert_eq!(cache.transaction_hashes_by_block_and_index.len(), 2); + assert_eq!(cache.receipts_by_hash.len(), 2); + assert_eq!(cache.signed_tx_by_hash.len(), 2); + } +} diff --git a/substrate/frame/revive/rpc/src/receipt_provider/db.rs b/substrate/frame/revive/rpc/src/receipt_provider/db.rs new file mode 100644 index 000000000000..63917d6193ea --- /dev/null +++ b/substrate/frame/revive/rpc/src/receipt_provider/db.rs @@ -0,0 +1,216 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::*; +use crate::BlockInfoProvider; +use jsonrpsee::core::async_trait; +use pallet_revive::evm::{ReceiptInfo, TransactionSigned}; +use sp_core::{H256, U256}; +use sqlx::{query, SqlitePool}; +use std::sync::Arc; + +/// A `[ReceiptProvider]` that stores receipts in a SQLite database. +#[derive(Clone)] +pub struct DBReceiptProvider { + /// The database pool. + pool: SqlitePool, + /// The block provider used to fetch blocks, and reconstruct receipts. + block_provider: Arc, + /// weather or not we should write to the DB. + read_only: bool, +} + +impl DBReceiptProvider { + /// Create a new `DBReceiptProvider` with the given database URL and block provider. + pub async fn new( + database_url: &str, + read_only: bool, + block_provider: Arc, + ) -> Result { + let pool = SqlitePool::connect(database_url).await?; + Ok(Self { pool, block_provider, read_only }) + } + + async fn fetch_row(&self, transaction_hash: &H256) -> Option<(H256, usize)> { + let transaction_hash = hex::encode(transaction_hash); + let result = query!( + r#" + SELECT block_hash, transaction_index + FROM transaction_hashes + WHERE transaction_hash = $1 + "#, + transaction_hash + ) + .fetch_optional(&self.pool) + .await + .ok()??; + + let block_hash = result.block_hash.parse::().ok()?; + let transaction_index = result.transaction_index.try_into().ok()?; + Some((block_hash, transaction_index)) + } +} + +#[async_trait] +impl ReceiptProvider for DBReceiptProvider { + async fn insert(&self, block_hash: &H256, receipts: &[(TransactionSigned, ReceiptInfo)]) { + if self.read_only { + return + } + + let block_hash_str = hex::encode(block_hash); + for (_, receipt) in receipts { + let transaction_hash = hex::encode(receipt.transaction_hash); + let transaction_index = receipt.transaction_index.as_u32() as i32; + + let result = query!( + r#" + INSERT INTO transaction_hashes (transaction_hash, block_hash, transaction_index) + VALUES ($1, $2, $3) + + ON CONFLICT(transaction_hash) DO UPDATE SET + block_hash = EXCLUDED.block_hash, + transaction_index = EXCLUDED.transaction_index + "#, + transaction_hash, + block_hash_str, + transaction_index + ) + .execute(&self.pool) + .await; + + if let Err(err) = result { + log::error!( + "Error inserting transaction for block hash {block_hash:?}: {:?}", + err + ); + } + } + } + + async fn remove(&self, _block_hash: &H256) {} + + async fn receipts_count_per_block(&self, block_hash: &H256) -> Option { + let block_hash = hex::encode(block_hash); + let row = query!( + r#" + SELECT COUNT(*) as count + FROM transaction_hashes + WHERE block_hash = $1 + "#, + block_hash + ) + .fetch_one(&self.pool) + .await + .ok()?; + + let count = row.count as usize; + Some(count) + } + + async fn receipt_by_block_hash_and_index( + &self, + block_hash: &H256, + transaction_index: &U256, + ) -> Option { + let block = self.block_provider.block_by_hash(block_hash).await.ok()??; + let transaction_index: usize = transaction_index.as_usize(); // TODO: check for overflow + let (_, receipt) = + extract_receipts_from_transaction(&block, transaction_index).await.ok()?; + Some(receipt) + } + + async fn receipt_by_hash(&self, transaction_hash: &H256) -> Option { + let (block_hash, transaction_index) = self.fetch_row(transaction_hash).await?; + + let block = self.block_provider.block_by_hash(&block_hash).await.ok()??; + let (_, receipt) = + extract_receipts_from_transaction(&block, transaction_index).await.ok()?; + Some(receipt) + } + + async fn signed_tx_by_hash(&self, transaction_hash: &H256) -> Option { + let transaction_hash = hex::encode(transaction_hash); + let result = query!( + r#" + SELECT block_hash, transaction_index + FROM transaction_hashes + WHERE transaction_hash = $1 + "#, + transaction_hash + ) + .fetch_optional(&self.pool) + .await + .ok()??; + + let block_hash = result.block_hash.parse::().ok()?; + let transaction_index = result.transaction_index.try_into().ok()?; + + let block = self.block_provider.block_by_hash(&block_hash).await.ok()??; + let (signed_tx, _) = + extract_receipts_from_transaction(&block, transaction_index).await.ok()?; + Some(signed_tx) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::test::MockBlockInfoProvider; + use pallet_revive::evm::{ReceiptInfo, TransactionSigned}; + use sp_core::H256; + use sqlx::SqlitePool; + + async fn setup_sqlite_provider(pool: SqlitePool) -> DBReceiptProvider { + DBReceiptProvider { + pool, + block_provider: Arc::new(MockBlockInfoProvider {}), + read_only: false, + } + } + + #[sqlx::test] + async fn test_insert(pool: SqlitePool) { + let provider = setup_sqlite_provider(pool).await; + let block_hash = H256::default(); + let receipts = vec![(TransactionSigned::default(), ReceiptInfo::default())]; + + provider.insert(&block_hash, &receipts).await; + let row = provider.fetch_row(&receipts[0].1.transaction_hash).await; + assert_eq!(row, Some((block_hash, 0))); + } + + #[sqlx::test] + async fn test_receipts_count_per_block(pool: SqlitePool) { + let provider = setup_sqlite_provider(pool).await; + let block_hash = H256::default(); + let receipts = vec![ + ( + TransactionSigned::default(), + ReceiptInfo { transaction_hash: H256::from([0u8; 32]), ..Default::default() }, + ), + ( + TransactionSigned::default(), + ReceiptInfo { transaction_hash: H256::from([1u8; 32]), ..Default::default() }, + ), + ]; + + provider.insert(&block_hash, &receipts).await; + let count = provider.receipts_count_per_block(&block_hash).await; + assert_eq!(count, Some(2)); + } +} diff --git a/substrate/frame/revive/rpc/src/rpc_health.rs b/substrate/frame/revive/rpc/src/rpc_health.rs index f94d4b82a80f..35c5a588f284 100644 --- a/substrate/frame/revive/rpc/src/rpc_health.rs +++ b/substrate/frame/revive/rpc/src/rpc_health.rs @@ -25,6 +25,10 @@ pub trait SystemHealthRpc { /// Proxy the substrate chain system_health RPC call. #[method(name = "system_health")] async fn system_health(&self) -> RpcResult; + + ///Returns the number of peers currently connected to the client. + #[method(name = "net_peerCount")] + async fn net_peer_count(&self) -> RpcResult; } pub struct SystemHealthRpcServerImpl { @@ -47,4 +51,9 @@ impl SystemHealthRpcServer for SystemHealthRpcServerImpl { should_have_peers: health.should_have_peers, }) } + + async fn net_peer_count(&self) -> RpcResult { + let health = self.client.system_health().await?; + Ok((health.peers as u64).into()) + } } diff --git a/substrate/frame/revive/rpc/src/rpc_methods_gen.rs b/substrate/frame/revive/rpc/src/rpc_methods_gen.rs index ad34dbfdfb49..da60360d9e61 100644 --- a/substrate/frame/revive/rpc/src/rpc_methods_gen.rs +++ b/substrate/frame/revive/rpc/src/rpc_methods_gen.rs @@ -142,6 +142,10 @@ pub trait EthRpc { transaction_hash: H256, ) -> RpcResult>; + /// Returns the current maxPriorityFeePerGas per gas in wei. + #[method(name = "eth_maxPriorityFeePerGas")] + async fn max_priority_fee_per_gas(&self) -> RpcResult; + /// Submits a raw transaction. For EIP-4844 transactions, the raw form must be the network form. /// This means it includes the blobs, KZG commitments, and KZG proofs. #[method(name = "eth_sendRawTransaction")] diff --git a/substrate/frame/revive/src/evm.rs b/substrate/frame/revive/src/evm.rs index c3495fc0559d..c8c967fbe091 100644 --- a/substrate/frame/revive/src/evm.rs +++ b/substrate/frame/revive/src/evm.rs @@ -19,4 +19,6 @@ mod api; pub use api::*; +mod gas_encoder; +pub use gas_encoder::*; pub mod runtime; diff --git a/substrate/frame/revive/src/evm/api/byte.rs b/substrate/frame/revive/src/evm/api/byte.rs index df4ed1740ecd..c2d64f8e5e42 100644 --- a/substrate/frame/revive/src/evm/api/byte.rs +++ b/substrate/frame/revive/src/evm/api/byte.rs @@ -116,7 +116,10 @@ macro_rules! impl_hex { impl Debug for $type { fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult { - write!(f, concat!(stringify!($type), "({})"), self.0.to_hex()) + let hex_str = self.0.to_hex(); + let truncated = &hex_str[..hex_str.len().min(100)]; + let ellipsis = if hex_str.len() > 100 { "..." } else { "" }; + write!(f, concat!(stringify!($type), "({}{})"), truncated,ellipsis) } } diff --git a/substrate/frame/revive/src/evm/api/rpc_types.rs b/substrate/frame/revive/src/evm/api/rpc_types.rs index ed046cb4da44..b4b2c6ffcf17 100644 --- a/substrate/frame/revive/src/evm/api/rpc_types.rs +++ b/substrate/frame/revive/src/evm/api/rpc_types.rs @@ -192,7 +192,11 @@ impl GenericTransaction { value: Some(tx.value), to: Some(tx.to), gas: Some(tx.gas), - gas_price: Some(tx.max_fee_per_blob_gas), + gas_price: Some( + U256::from(crate::GAS_PRICE) + .saturating_add(tx.max_priority_fee_per_gas) + .max(tx.max_fee_per_blob_gas), + ), access_list: Some(tx.access_list), blob_versioned_hashes: tx.blob_versioned_hashes, max_fee_per_blob_gas: Some(tx.max_fee_per_blob_gas), @@ -209,7 +213,11 @@ impl GenericTransaction { value: Some(tx.value), to: tx.to, gas: Some(tx.gas), - gas_price: Some(tx.gas_price), + gas_price: Some( + U256::from(crate::GAS_PRICE) + .saturating_add(tx.max_priority_fee_per_gas) + .max(tx.max_fee_per_gas), + ), access_list: Some(tx.access_list), max_fee_per_gas: Some(tx.max_fee_per_gas), max_priority_fee_per_gas: Some(tx.max_priority_fee_per_gas), diff --git a/substrate/frame/revive/src/evm/api/rpc_types_gen.rs b/substrate/frame/revive/src/evm/api/rpc_types_gen.rs index 1d65fdefdde6..5d31613ca314 100644 --- a/substrate/frame/revive/src/evm/api/rpc_types_gen.rs +++ b/substrate/frame/revive/src/evm/api/rpc_types_gen.rs @@ -87,7 +87,7 @@ pub struct Block { /// Total difficulty #[serde(rename = "totalDifficulty", skip_serializing_if = "Option::is_none")] pub total_difficulty: Option, - pub transactions: H256OrTransactionInfo, + pub transactions: HashesOrTransactionInfos, /// Transactions root #[serde(rename = "transactionsRoot")] pub transactions_root: H256, @@ -357,15 +357,15 @@ pub enum BlockTag { Debug, Clone, Encode, Decode, TypeInfo, Serialize, Deserialize, From, TryInto, Eq, PartialEq, )] #[serde(untagged)] -pub enum H256OrTransactionInfo { +pub enum HashesOrTransactionInfos { /// Transaction hashes - H256s(Vec), + Hashes(Vec), /// Full transactions TransactionInfos(Vec), } -impl Default for H256OrTransactionInfo { +impl Default for HashesOrTransactionInfos { fn default() -> Self { - H256OrTransactionInfo::H256s(Default::default()) + HashesOrTransactionInfos::Hashes(Default::default()) } } diff --git a/substrate/frame/revive/src/evm/gas_encoder.rs b/substrate/frame/revive/src/evm/gas_encoder.rs new file mode 100644 index 000000000000..ffdf8b13c043 --- /dev/null +++ b/substrate/frame/revive/src/evm/gas_encoder.rs @@ -0,0 +1,174 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//! Encodes/Decodes EVM gas values. + +use crate::Weight; +use core::ops::{Div, Rem}; +use frame_support::pallet_prelude::CheckedShl; +use sp_arithmetic::traits::{One, Zero}; +use sp_core::U256; + +// We use 3 digits to store each component. +const SCALE: u128 = 100; + +/// Rounds up the given value to the nearest multiple of the mask. +/// +/// # Panics +/// Panics if the `mask` is zero. +fn round_up(value: T, mask: T) -> T +where + T: One + Zero + Copy + Rem + Div, + ::Output: PartialEq, +{ + let rest = if value % mask == T::zero() { T::zero() } else { T::one() }; + value / mask + rest +} + +/// Rounds up the log2 of the given value to the nearest integer. +fn log2_round_up(val: T) -> u128 +where + T: Into, +{ + let val = val.into(); + val.checked_ilog2() + .map(|v| if 1u128 << v == val { v } else { v + 1 }) + .unwrap_or(0) as u128 +} + +mod private { + pub trait Sealed {} + impl Sealed for () {} +} + +/// Encodes/Decodes EVM gas values. +/// +/// # Note +/// +/// This is defined as a trait rather than standalone functions to allow +/// it to be added as an associated type to [`crate::Config`]. This way, +/// it can be invoked without requiring the implementation bounds to be +/// explicitly specified. +/// +/// This trait is sealed and cannot be implemented by downstream crates. +pub trait GasEncoder: private::Sealed { + /// Encodes all components (deposit limit, weight reference time, and proof size) into a single + /// gas value. + fn encode(gas_limit: U256, weight: Weight, deposit: Balance) -> U256; + + /// Decodes the weight and deposit from the encoded gas value. + /// Returns `None` if the gas value is invalid + fn decode(gas: U256) -> Option<(Weight, Balance)>; +} + +impl GasEncoder for () +where + Balance: Zero + One + CheckedShl + Into, +{ + /// The encoding follows the pattern `g...grrppdd`, where: + /// - `dd`: log2 Deposit value, encoded in the lowest 2 digits. + /// - `pp`: log2 Proof size, encoded in the next 2 digits. + /// - `rr`: log2 Reference time, encoded in the next 2 digits. + /// - `g...g`: Gas limit, encoded in the highest digits. + /// + /// # Note + /// - The deposit value is maxed by 2^99 + fn encode(gas_limit: U256, weight: Weight, deposit: Balance) -> U256 { + let deposit: u128 = deposit.into(); + let deposit_component = log2_round_up(deposit); + + let proof_size = weight.proof_size(); + let proof_size_component = SCALE * log2_round_up(proof_size); + + let ref_time = weight.ref_time(); + let ref_time_component = SCALE.pow(2) * log2_round_up(ref_time); + + let components = U256::from(deposit_component + proof_size_component + ref_time_component); + + let raw_gas_mask = U256::from(SCALE).pow(3.into()); + let raw_gas_component = if gas_limit < raw_gas_mask.saturating_add(components) { + raw_gas_mask + } else { + round_up(gas_limit, raw_gas_mask).saturating_mul(raw_gas_mask) + }; + + components.saturating_add(raw_gas_component) + } + + fn decode(gas: U256) -> Option<(Weight, Balance)> { + let deposit = gas % SCALE; + + // Casting with as_u32 is safe since all values are maxed by `SCALE`. + let deposit = deposit.as_u32(); + let proof_time = ((gas / SCALE) % SCALE).as_u32(); + let ref_time = ((gas / SCALE.pow(2)) % SCALE).as_u32(); + + let weight = Weight::from_parts( + if ref_time == 0 { 0 } else { 1u64.checked_shl(ref_time)? }, + if proof_time == 0 { 0 } else { 1u64.checked_shl(proof_time)? }, + ); + let deposit = + if deposit == 0 { Balance::zero() } else { Balance::one().checked_shl(deposit)? }; + + Some((weight, deposit)) + } +} + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn test_gas_encoding_decoding_works() { + let raw_gas_limit = 111_111_999_999_999u128; + let weight = Weight::from_parts(222_999_999, 333_999_999); + let deposit = 444_999_999u64; + + let encoded_gas = <() as GasEncoder>::encode(raw_gas_limit.into(), weight, deposit); + assert_eq!(encoded_gas, U256::from(111_112_000_282_929u128)); + assert!(encoded_gas > raw_gas_limit.into()); + + let (decoded_weight, decoded_deposit) = + <() as GasEncoder>::decode(encoded_gas).unwrap(); + assert!(decoded_weight.all_gte(weight)); + assert!(weight.mul(2).all_gte(weight)); + + assert!(decoded_deposit >= deposit); + assert!(deposit * 2 >= decoded_deposit); + } + + #[test] + fn test_encoding_zero_values_work() { + let encoded_gas = <() as GasEncoder>::encode( + Default::default(), + Default::default(), + Default::default(), + ); + + assert_eq!(encoded_gas, U256::from(1_00_00_00)); + + let (decoded_weight, decoded_deposit) = + <() as GasEncoder>::decode(encoded_gas).unwrap(); + assert_eq!(Weight::default(), decoded_weight); + assert_eq!(0u64, decoded_deposit); + } + + #[test] + fn test_overflow() { + assert_eq!(None, <() as GasEncoder>::decode(65_00u128.into()), "Invalid proof size"); + assert_eq!(None, <() as GasEncoder>::decode(65_00_00u128.into()), "Invalid ref_time"); + } +} diff --git a/substrate/frame/revive/src/evm/runtime.rs b/substrate/frame/revive/src/evm/runtime.rs index 24b75de83569..d4b344e20eb8 100644 --- a/substrate/frame/revive/src/evm/runtime.rs +++ b/substrate/frame/revive/src/evm/runtime.rs @@ -16,9 +16,13 @@ // limitations under the License. //! Runtime types for integrating `pallet-revive` with the EVM. use crate::{ - evm::api::{GenericTransaction, TransactionSigned}, - AccountIdOf, AddressMapper, BalanceOf, MomentOf, Weight, LOG_TARGET, + evm::{ + api::{GenericTransaction, TransactionSigned}, + GasEncoder, + }, + AccountIdOf, AddressMapper, BalanceOf, Config, MomentOf, LOG_TARGET, }; +use alloc::vec::Vec; use codec::{Decode, Encode}; use frame_support::{ dispatch::{DispatchInfo, GetDispatchInfo}, @@ -26,20 +30,17 @@ use frame_support::{ }; use pallet_transaction_payment::OnChargeTransaction; use scale_info::{StaticTypeInfo, TypeInfo}; -use sp_arithmetic::Percent; use sp_core::{Get, H256, U256}; use sp_runtime::{ generic::{self, CheckedExtrinsic, ExtrinsicFormat}, traits::{ - self, Checkable, Dispatchable, ExtrinsicLike, ExtrinsicMetadata, IdentifyAccount, Member, - TransactionExtension, + self, AtLeast32BitUnsigned, Checkable, Dispatchable, ExtrinsicLike, ExtrinsicMetadata, + IdentifyAccount, Member, TransactionExtension, }, transaction_validity::{InvalidTransaction, TransactionValidityError}, OpaqueExtrinsic, RuntimeDebug, Saturating, }; -use alloc::vec::Vec; - type CallOf = ::RuntimeCall; /// The EVM gas price. @@ -48,7 +49,28 @@ type CallOf = ::RuntimeCall; /// We use a fixed value for the gas price. /// This let us calculate the gas estimate for a transaction with the formula: /// `estimate_gas = substrate_fee / gas_price`. -pub const GAS_PRICE: u32 = 1u32; +/// +/// The chosen constant value is: +/// - Not too high, ensuring the gas value is large enough (at least 7 digits) to encode the +/// ref_time, proof_size, and deposit into the less significant (6 lower) digits of the gas value. +/// - Not too low, enabling users to adjust the gas price to define a tip. +pub const GAS_PRICE: u32 = 1_000u32; + +/// Convert a `Balance` into a gas value, using the fixed `GAS_PRICE`. +/// The gas is calculated as `balance / GAS_PRICE`, rounded up to the nearest integer. +pub fn gas_from_fee(fee: Balance) -> U256 +where + u32: Into, + Balance: Into + AtLeast32BitUnsigned + Copy, +{ + let gas_price = GAS_PRICE.into(); + let remainder = fee % gas_price; + if remainder.is_zero() { + (fee / gas_price).into() + } else { + (fee.saturating_add(gas_price) / gas_price).into() + } +} /// Wraps [`generic::UncheckedExtrinsic`] to support checking unsigned /// [`crate::Call::eth_transact`] extrinsic. @@ -140,15 +162,8 @@ where fn check(self, lookup: &Lookup) -> Result { if !self.0.is_signed() { if let Ok(call) = self.0.function.clone().try_into() { - if let crate::Call::eth_transact { payload, gas_limit, storage_deposit_limit } = - call - { - let checked = E::try_into_checked_extrinsic( - payload, - gas_limit, - storage_deposit_limit, - self.encoded_size(), - )?; + if let crate::Call::eth_transact { payload } = call { + let checked = E::try_into_checked_extrinsic(payload, self.encoded_size())?; return Ok(checked) }; } @@ -251,7 +266,7 @@ where /// EthExtra convert an unsigned [`crate::Call::eth_transact`] into a [`CheckedExtrinsic`]. pub trait EthExtra { /// The Runtime configuration. - type Config: crate::Config + pallet_transaction_payment::Config; + type Config: Config + pallet_transaction_payment::Config; /// The Runtime's transaction extension. /// It should include at least: @@ -281,8 +296,6 @@ pub trait EthExtra { /// - `encoded_len`: The encoded length of the extrinsic. fn try_into_checked_extrinsic( payload: Vec, - gas_limit: Weight, - storage_deposit_limit: BalanceOf, encoded_len: usize, ) -> Result< CheckedExtrinsic, CallOf, Self::Extension>, @@ -307,12 +320,16 @@ pub trait EthExtra { InvalidTransaction::BadProof })?; - let signer = - ::AddressMapper::to_fallback_account_id(&signer); + let signer = ::AddressMapper::to_fallback_account_id(&signer); let GenericTransaction { nonce, chain_id, to, value, input, gas, gas_price, .. } = GenericTransaction::from_signed(tx, None); - if chain_id.unwrap_or_default() != ::ChainId::get().into() { + let Some(gas) = gas else { + log::debug!(target: LOG_TARGET, "No gas provided"); + return Err(InvalidTransaction::Call); + }; + + if chain_id.unwrap_or_default() != ::ChainId::get().into() { log::debug!(target: LOG_TARGET, "Invalid chain_id {chain_id:?}"); return Err(InvalidTransaction::Call); } @@ -324,6 +341,13 @@ pub trait EthExtra { })?; let data = input.unwrap_or_default().0; + + let (gas_limit, storage_deposit_limit) = + ::EthGasEncoder::decode(gas).ok_or_else(|| { + log::debug!(target: LOG_TARGET, "Failed to decode gas: {gas:?}"); + InvalidTransaction::Call + })?; + let call = if let Some(dest) = to { crate::Call::call:: { dest, @@ -359,13 +383,13 @@ pub trait EthExtra { // Fees calculated with the fixed `GAS_PRICE` // When we dry-run the transaction, we set the gas to `Fee / GAS_PRICE` let eth_fee_no_tip = U256::from(GAS_PRICE) - .saturating_mul(gas.unwrap_or_default()) + .saturating_mul(gas) .try_into() .map_err(|_| InvalidTransaction::Call)?; // Fees with the actual gas_price from the transaction. let eth_fee: BalanceOf = U256::from(gas_price.unwrap_or_default()) - .saturating_mul(gas.unwrap_or_default()) + .saturating_mul(gas) .try_into() .map_err(|_| InvalidTransaction::Call)?; @@ -380,27 +404,17 @@ pub trait EthExtra { Default::default(), ) .into(); - log::trace!(target: LOG_TARGET, "try_into_checked_extrinsic: encoded_len: {encoded_len:?} actual_fee: {actual_fee:?} eth_fee: {eth_fee:?}"); + log::debug!(target: LOG_TARGET, "try_into_checked_extrinsic: gas_price: {gas_price:?}, encoded_len: {encoded_len:?} actual_fee: {actual_fee:?} eth_fee: {eth_fee:?}"); // The fees from the Ethereum transaction should be greater or equal to the actual fees paid // by the account. if eth_fee < actual_fee { - log::debug!(target: LOG_TARGET, "fees {eth_fee:?} too low for the extrinsic {actual_fee:?}"); + log::debug!(target: LOG_TARGET, "eth fees {eth_fee:?} too low, actual fees: {actual_fee:?}"); return Err(InvalidTransaction::Payment.into()) } - let min = actual_fee.min(eth_fee_no_tip); - let max = actual_fee.max(eth_fee_no_tip); - let diff = Percent::from_rational(max - min, min); - if diff > Percent::from_percent(10) { - log::trace!(target: LOG_TARGET, "Difference between the extrinsic fees {actual_fee:?} and the Ethereum gas fees {eth_fee_no_tip:?} should be no more than 10% got {diff:?}"); - return Err(InvalidTransaction::Call.into()) - } else { - log::trace!(target: LOG_TARGET, "Difference between the extrinsic fees {actual_fee:?} and the Ethereum gas fees {eth_fee_no_tip:?}: {diff:?}"); - } - let tip = eth_fee.saturating_sub(eth_fee_no_tip); - log::debug!(target: LOG_TARGET, "Created checked Ethereum transaction with nonce {nonce:?} and tip: {tip:?}"); + log::debug!(target: LOG_TARGET, "Created checked Ethereum transaction with nonce: {nonce:?} and tip: {tip:?}"); Ok(CheckedExtrinsic { format: ExtrinsicFormat::Signed(signer.into(), Self::get_eth_extension(nonce, tip)), function, @@ -415,6 +429,7 @@ mod test { evm::*, test_utils::*, tests::{ExtBuilder, RuntimeCall, RuntimeOrigin, Test}, + Weight, }; use frame_support::{error::LookupError, traits::fungible::Mutate}; use pallet_revive_fixtures::compile_module; @@ -456,8 +471,6 @@ mod test { #[derive(Clone)] struct UncheckedExtrinsicBuilder { tx: GenericTransaction, - gas_limit: Weight, - storage_deposit_limit: BalanceOf, before_validate: Option>, } @@ -467,12 +480,10 @@ mod test { Self { tx: GenericTransaction { from: Some(Account::default().address()), - chain_id: Some(::ChainId::get().into()), + chain_id: Some(::ChainId::get().into()), gas_price: Some(U256::from(GAS_PRICE)), ..Default::default() }, - gas_limit: Weight::zero(), - storage_deposit_limit: 0, before_validate: None, } } @@ -500,7 +511,6 @@ mod test { fn call_with(dest: H160) -> Self { let mut builder = Self::new(); builder.tx.to = Some(dest); - ExtBuilder::default().build().execute_with(|| builder.estimate_gas()); builder } @@ -508,45 +518,42 @@ mod test { fn instantiate_with(code: Vec, data: Vec) -> Self { let mut builder = Self::new(); builder.tx.input = Some(Bytes(code.into_iter().chain(data.into_iter()).collect())); - ExtBuilder::default().build().execute_with(|| builder.estimate_gas()); builder } - /// Update the transaction with the given function. - fn update(mut self, f: impl FnOnce(&mut GenericTransaction) -> ()) -> Self { - f(&mut self.tx); - self - } /// Set before_validate function. fn before_validate(mut self, f: impl Fn() + Send + Sync + 'static) -> Self { self.before_validate = Some(std::sync::Arc::new(f)); self } + fn check( + self, + ) -> Result<(RuntimeCall, SignedExtra, GenericTransaction), TransactionValidityError> { + self.mutate_estimate_and_check(Box::new(|_| ())) + } + /// Call `check` on the unchecked extrinsic, and `pre_dispatch` on the signed extension. - fn check(&self) -> Result<(RuntimeCall, SignedExtra), TransactionValidityError> { + fn mutate_estimate_and_check( + mut self, + f: Box ()>, + ) -> Result<(RuntimeCall, SignedExtra, GenericTransaction), TransactionValidityError> { + ExtBuilder::default().build().execute_with(|| self.estimate_gas()); + f(&mut self.tx); ExtBuilder::default().build().execute_with(|| { - let UncheckedExtrinsicBuilder { - tx, - gas_limit, - storage_deposit_limit, - before_validate, - } = self.clone(); + let UncheckedExtrinsicBuilder { tx, before_validate, .. } = self.clone(); // Fund the account. let account = Account::default(); - let _ = ::Currency::set_balance( + let _ = ::Currency::set_balance( &account.substrate_account(), 100_000_000_000_000, ); - let payload = - account.sign_transaction(tx.try_into_unsigned().unwrap()).signed_payload(); - let call = RuntimeCall::Contracts(crate::Call::eth_transact { - payload, - gas_limit, - storage_deposit_limit, - }); + let payload = account + .sign_transaction(tx.clone().try_into_unsigned().unwrap()) + .signed_payload(); + let call = RuntimeCall::Contracts(crate::Call::eth_transact { payload }); let encoded_len = call.encoded_size(); let uxt: Ex = generic::UncheckedExtrinsic::new_bare(call).into(); @@ -565,7 +572,7 @@ mod test { 0, )?; - Ok((result.function, extra)) + Ok((result.function, extra, tx)) }) } } @@ -573,14 +580,18 @@ mod test { #[test] fn check_eth_transact_call_works() { let builder = UncheckedExtrinsicBuilder::call_with(H160::from([1u8; 20])); + let (call, _, tx) = builder.check().unwrap(); + let (gas_limit, storage_deposit_limit) = + <::EthGasEncoder as GasEncoder<_>>::decode(tx.gas.unwrap()).unwrap(); + assert_eq!( - builder.check().unwrap().0, + call, crate::Call::call:: { - dest: builder.tx.to.unwrap(), - value: builder.tx.value.unwrap_or_default().as_u64(), - gas_limit: builder.gas_limit, - storage_deposit_limit: builder.storage_deposit_limit, - data: builder.tx.input.unwrap_or_default().0 + dest: tx.to.unwrap(), + value: tx.value.unwrap_or_default().as_u64(), + data: tx.input.unwrap_or_default().0, + gas_limit, + storage_deposit_limit } .into() ); @@ -591,16 +602,19 @@ mod test { let (code, _) = compile_module("dummy").unwrap(); let data = vec![]; let builder = UncheckedExtrinsicBuilder::instantiate_with(code.clone(), data.clone()); + let (call, _, tx) = builder.check().unwrap(); + let (gas_limit, storage_deposit_limit) = + <::EthGasEncoder as GasEncoder<_>>::decode(tx.gas.unwrap()).unwrap(); assert_eq!( - builder.check().unwrap().0, + call, crate::Call::instantiate_with_code:: { - value: builder.tx.value.unwrap_or_default().as_u64(), - gas_limit: builder.gas_limit, - storage_deposit_limit: builder.storage_deposit_limit, + value: tx.value.unwrap_or_default().as_u64(), code, data, - salt: None + salt: None, + gas_limit, + storage_deposit_limit } .into() ); @@ -608,11 +622,10 @@ mod test { #[test] fn check_eth_transact_nonce_works() { - let builder = UncheckedExtrinsicBuilder::call_with(H160::from([1u8; 20])) - .update(|tx| tx.nonce = Some(1u32.into())); + let builder = UncheckedExtrinsicBuilder::call_with(H160::from([1u8; 20])); assert_eq!( - builder.check(), + builder.mutate_estimate_and_check(Box::new(|tx| tx.nonce = Some(1u32.into()))), Err(TransactionValidityError::Invalid(InvalidTransaction::Future)) ); @@ -629,11 +642,10 @@ mod test { #[test] fn check_eth_transact_chain_id_works() { - let builder = UncheckedExtrinsicBuilder::call_with(H160::from([1u8; 20])) - .update(|tx| tx.chain_id = Some(42.into())); + let builder = UncheckedExtrinsicBuilder::call_with(H160::from([1u8; 20])); assert_eq!( - builder.check(), + builder.mutate_estimate_and_check(Box::new(|tx| tx.chain_id = Some(42.into()))), Err(TransactionValidityError::Invalid(InvalidTransaction::Call)) ); } @@ -646,14 +658,14 @@ mod test { // Fail because the tx input fail to get the blob length assert_eq!( - builder.clone().update(|tx| tx.input = Some(Bytes(vec![1, 2, 3]))).check(), + builder.mutate_estimate_and_check(Box::new(|tx| tx.input = Some(Bytes(vec![1, 2, 3])))), Err(TransactionValidityError::Invalid(InvalidTransaction::Call)) ); } #[test] fn check_transaction_fees() { - let scenarios: [(_, Box, _); 5] = [ + let scenarios: Vec<(_, Box, _)> = vec![ ( "Eth fees too low", Box::new(|tx| { @@ -661,42 +673,20 @@ mod test { }), InvalidTransaction::Payment, ), - ( - "Gas fees too high", - Box::new(|tx| { - tx.gas = Some(tx.gas.unwrap() * 2); - }), - InvalidTransaction::Call, - ), ( "Gas fees too low", Box::new(|tx| { - tx.gas = Some(tx.gas.unwrap() * 2); - }), - InvalidTransaction::Call, - ), - ( - "Diff > 10%", - Box::new(|tx| { - tx.gas = Some(tx.gas.unwrap() * 111 / 100); + tx.gas = Some(tx.gas.unwrap() / 2); }), - InvalidTransaction::Call, - ), - ( - "Diff < 10%", - Box::new(|tx| { - tx.gas_price = Some(tx.gas_price.unwrap() * 2); - tx.gas = Some(tx.gas.unwrap() * 89 / 100); - }), - InvalidTransaction::Call, + InvalidTransaction::Payment, ), ]; for (msg, update_tx, err) in scenarios { - let builder = - UncheckedExtrinsicBuilder::call_with(H160::from([1u8; 20])).update(update_tx); + let res = UncheckedExtrinsicBuilder::call_with(H160::from([1u8; 20])) + .mutate_estimate_and_check(update_tx); - assert_eq!(builder.check(), Err(TransactionValidityError::Invalid(err)), "{}", msg); + assert_eq!(res, Err(TransactionValidityError::Invalid(err)), "{}", msg); } } @@ -704,16 +694,16 @@ mod test { fn check_transaction_tip() { let (code, _) = compile_module("dummy").unwrap(); let data = vec![]; - let builder = UncheckedExtrinsicBuilder::instantiate_with(code.clone(), data.clone()) - .update(|tx| { - tx.gas_price = Some(tx.gas_price.unwrap() * 103 / 100); - log::debug!(target: LOG_TARGET, "Gas price: {:?}", tx.gas_price); - }); + let (_, extra, tx) = + UncheckedExtrinsicBuilder::instantiate_with(code.clone(), data.clone()) + .mutate_estimate_and_check(Box::new(|tx| { + tx.gas_price = Some(tx.gas_price.unwrap() * 103 / 100); + log::debug!(target: LOG_TARGET, "Gas price: {:?}", tx.gas_price); + })) + .unwrap(); - let tx = &builder.tx; let expected_tip = tx.gas_price.unwrap() * tx.gas.unwrap() - U256::from(GAS_PRICE) * tx.gas.unwrap(); - let (_, extra) = builder.check().unwrap(); assert_eq!(U256::from(extra.1.tip()), expected_tip); } } diff --git a/substrate/frame/revive/src/lib.rs b/substrate/frame/revive/src/lib.rs index b9a39e7ce4d3..04bce264a188 100644 --- a/substrate/frame/revive/src/lib.rs +++ b/substrate/frame/revive/src/lib.rs @@ -41,7 +41,10 @@ pub mod test_utils; pub mod weights; use crate::{ - evm::{runtime::GAS_PRICE, GenericTransaction}, + evm::{ + runtime::{gas_from_fee, GAS_PRICE}, + GasEncoder, GenericTransaction, + }, exec::{AccountIdOf, ExecError, Executable, Ext, Key, Origin, Stack as ExecStack}, gas::GasMeter, storage::{meter::Meter as StorageMeter, ContractInfo, DeletionQueueManager}, @@ -295,6 +298,11 @@ pub mod pallet { /// The ratio between the decimal representation of the native token and the ETH token. #[pallet::constant] type NativeToEthRatio: Get; + + /// Encode and decode Ethereum gas values. + /// Only valid value is `()`. See [`GasEncoder`]. + #[pallet::no_default_bounds] + type EthGasEncoder: GasEncoder>; } /// Container for different types that implement [`DefaultConfig`]` of this pallet. @@ -368,6 +376,7 @@ pub mod pallet { type PVFMemory = ConstU32<{ 512 * 1024 * 1024 }>; type ChainId = ConstU64<0>; type NativeToEthRatio = ConstU32<1>; + type EthGasEncoder = (); } } @@ -560,6 +569,8 @@ pub mod pallet { AccountUnmapped, /// Tried to map an account that is already mapped. AccountAlreadyMapped, + /// The transaction used to dry-run a contract is invalid. + InvalidGenericTransaction, } /// A reason for the pallet contracts placing a hold on funds. @@ -761,12 +772,7 @@ pub mod pallet { #[allow(unused_variables)] #[pallet::call_index(0)] #[pallet::weight(Weight::MAX)] - pub fn eth_transact( - origin: OriginFor, - payload: Vec, - gas_limit: Weight, - #[pallet::compact] storage_deposit_limit: BalanceOf, - ) -> DispatchResultWithPostInfo { + pub fn eth_transact(origin: OriginFor, payload: Vec) -> DispatchResultWithPostInfo { Err(frame_system::Error::CallFiltered::.into()) } @@ -1406,11 +1412,8 @@ where return Err(EthTransactError::Message("Invalid transaction".into())); }; - let eth_dispatch_call = crate::Call::::eth_transact { - payload: unsigned_tx.dummy_signed_payload(), - gas_limit: result.gas_required, - storage_deposit_limit: result.storage_deposit, - }; + let eth_dispatch_call = + crate::Call::::eth_transact { payload: unsigned_tx.dummy_signed_payload() }; let encoded_len = utx_encoded_size(eth_dispatch_call); let fee = pallet_transaction_payment::Pallet::::compute_fee( encoded_len, @@ -1418,7 +1421,9 @@ where 0u32.into(), ) .into(); - let eth_gas: U256 = (fee / GAS_PRICE.into()).into(); + let eth_gas = gas_from_fee(fee); + let eth_gas = + T::EthGasEncoder::encode(eth_gas, result.gas_required, result.storage_deposit); if eth_gas == result.eth_gas { log::trace!(target: LOG_TARGET, "bare_eth_call: encoded_len: {encoded_len:?} eth_gas: {eth_gas:?}"); diff --git a/substrate/frame/revive/src/wasm/mod.rs b/substrate/frame/revive/src/wasm/mod.rs index b24de61314f9..3bd4bde5679f 100644 --- a/substrate/frame/revive/src/wasm/mod.rs +++ b/substrate/frame/revive/src/wasm/mod.rs @@ -193,8 +193,9 @@ where &HoldReason::CodeUploadDepositReserve.into(), &self.code_info.owner, deposit, - ) .map_err(|err| { log::debug!(target: LOG_TARGET, "failed to store code for owner: {:?}: {err:?}", self.code_info.owner); - >::StorageDepositNotEnoughFunds + ) .map_err(|err| { + log::debug!(target: LOG_TARGET, "failed to hold store code deposit {deposit:?} for owner: {:?}: {err:?}", self.code_info.owner); + >::StorageDepositNotEnoughFunds })?; } diff --git a/substrate/frame/root-offences/src/mock.rs b/substrate/frame/root-offences/src/mock.rs index a27fb36f64a6..7a96b8eade4e 100644 --- a/substrate/frame/root-offences/src/mock.rs +++ b/substrate/frame/root-offences/src/mock.rs @@ -25,7 +25,7 @@ use frame_election_provider_support::{ }; use frame_support::{ derive_impl, parameter_types, - traits::{ConstU32, ConstU64, Hooks, OneSessionHandler}, + traits::{ConstU32, ConstU64, OneSessionHandler}, }; use pallet_staking::StakerStatus; use sp_runtime::{curve::PiecewiseLinear, testing::UintAuthorityId, traits::Zero, BuildStorage}; @@ -283,16 +283,12 @@ pub(crate) fn start_session(session_index: SessionIndex) { /// a block import/propose process where we first initialize the block, then execute some stuff (not /// in the function), and then finalize the block. pub(crate) fn run_to_block(n: BlockNumber) { - Staking::on_finalize(System::block_number()); - for b in (System::block_number() + 1)..=n { - System::set_block_number(b); - Session::on_initialize(b); - >::on_initialize(b); - Timestamp::set_timestamp(System::block_number() * BLOCK_TIME + INIT_TIMESTAMP); - if b != n { - Staking::on_finalize(System::block_number()); - } - } + System::run_to_block_with::( + n, + frame_system::RunToBlockHooks::default().after_initialize(|bn| { + Timestamp::set_timestamp(bn * BLOCK_TIME + INIT_TIMESTAMP); + }), + ); } pub(crate) fn active_era() -> EraIndex { diff --git a/substrate/frame/scheduler/src/mock.rs b/substrate/frame/scheduler/src/mock.rs index 8d36ca1c42e3..43a964bcf149 100644 --- a/substrate/frame/scheduler/src/mock.rs +++ b/substrate/frame/scheduler/src/mock.rs @@ -22,7 +22,7 @@ use super::*; use crate as scheduler; use frame_support::{ derive_impl, ord_parameter_types, parameter_types, - traits::{ConstU32, Contains, EitherOfDiverse, EqualPrivilegeOnly, OnFinalize, OnInitialize}, + traits::{ConstU32, Contains, EitherOfDiverse, EqualPrivilegeOnly}, }; use frame_system::{EnsureRoot, EnsureSignedBy}; use sp_runtime::{BuildStorage, Perbill}; @@ -236,14 +236,6 @@ pub fn new_test_ext() -> sp_io::TestExternalities { t.into() } -pub fn run_to_block(n: u64) { - while System::block_number() < n { - Scheduler::on_finalize(System::block_number()); - System::set_block_number(System::block_number() + 1); - Scheduler::on_initialize(System::block_number()); - } -} - pub fn root() -> OriginCaller { system::RawOrigin::Root.into() } diff --git a/substrate/frame/scheduler/src/tests.rs b/substrate/frame/scheduler/src/tests.rs index 3023a370a4b6..755223934108 100644 --- a/substrate/frame/scheduler/src/tests.rs +++ b/substrate/frame/scheduler/src/tests.rs @@ -20,7 +20,7 @@ use super::*; use crate::mock::{ logger::{self, Threshold}, - new_test_ext, root, run_to_block, LoggerCall, RuntimeCall, Scheduler, Test, *, + new_test_ext, root, LoggerCall, RuntimeCall, Scheduler, Test, *, }; use frame_support::{ assert_err, assert_noop, assert_ok, @@ -52,14 +52,14 @@ fn basic_scheduling_works() { )); // `log` runtime call should not have executed yet - run_to_block(3); + System::run_to_block::(3); assert!(logger::log().is_empty()); - run_to_block(4); + System::run_to_block::(4); // `log` runtime call should have executed at block 4 assert_eq!(logger::log(), vec![(root(), 42u32)]); - run_to_block(100); + System::run_to_block::(100); assert_eq!(logger::log(), vec![(root(), 42u32)]); }); } @@ -87,17 +87,17 @@ fn scheduling_with_preimages_works() { assert!(Preimage::is_requested(&hash)); // `log` runtime call should not have executed yet - run_to_block(3); + System::run_to_block::(3); assert!(logger::log().is_empty()); - run_to_block(4); + System::run_to_block::(4); // preimage should not have been removed when executed by the scheduler assert!(!Preimage::len(&hash).is_some()); assert!(!Preimage::is_requested(&hash)); // `log` runtime call should have executed at block 4 assert_eq!(logger::log(), vec![(root(), 42u32)]); - run_to_block(100); + System::run_to_block::(100); assert_eq!(logger::log(), vec![(root(), 42u32)]); }); } @@ -105,7 +105,7 @@ fn scheduling_with_preimages_works() { #[test] fn schedule_after_works() { new_test_ext().execute_with(|| { - run_to_block(2); + System::run_to_block::(2); let call = RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_parts(10, 0) }); assert!(!::BaseCallFilter::contains(&call)); @@ -117,11 +117,11 @@ fn schedule_after_works() { root(), Preimage::bound(call).unwrap() )); - run_to_block(5); + System::run_to_block::(5); assert!(logger::log().is_empty()); - run_to_block(6); + System::run_to_block::(6); assert_eq!(logger::log(), vec![(root(), 42u32)]); - run_to_block(100); + System::run_to_block::(100); assert_eq!(logger::log(), vec![(root(), 42u32)]); }); } @@ -129,7 +129,7 @@ fn schedule_after_works() { #[test] fn schedule_after_zero_works() { new_test_ext().execute_with(|| { - run_to_block(2); + System::run_to_block::(2); let call = RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_parts(10, 0) }); assert!(!::BaseCallFilter::contains(&call)); @@ -141,9 +141,9 @@ fn schedule_after_zero_works() { Preimage::bound(call).unwrap() )); // Will trigger on the next block. - run_to_block(3); + System::run_to_block::(3); assert_eq!(logger::log(), vec![(root(), 42u32)]); - run_to_block(100); + System::run_to_block::(100); assert_eq!(logger::log(), vec![(root(), 42u32)]); }); } @@ -163,19 +163,19 @@ fn periodic_scheduling_works() { })) .unwrap() )); - run_to_block(3); + System::run_to_block::(3); assert!(logger::log().is_empty()); - run_to_block(4); + System::run_to_block::(4); assert_eq!(logger::log(), vec![(root(), 42u32)]); - run_to_block(6); + System::run_to_block::(6); assert_eq!(logger::log(), vec![(root(), 42u32)]); - run_to_block(7); + System::run_to_block::(7); assert_eq!(logger::log(), vec![(root(), 42u32), (root(), 42u32)]); - run_to_block(9); + System::run_to_block::(9); assert_eq!(logger::log(), vec![(root(), 42u32), (root(), 42u32)]); - run_to_block(10); + System::run_to_block::(10); assert_eq!(logger::log(), vec![(root(), 42u32), (root(), 42u32), (root(), 42u32)]); - run_to_block(100); + System::run_to_block::(100); assert_eq!(logger::log(), vec![(root(), 42u32), (root(), 42u32), (root(), 42u32)]); }); } @@ -201,37 +201,37 @@ fn retry_scheduling_works() { // retry 10 times every 3 blocks assert_ok!(Scheduler::set_retry(root().into(), (4, 0), 10, 3)); assert_eq!(Retries::::iter().count(), 1); - run_to_block(3); + System::run_to_block::(3); assert!(logger::log().is_empty()); assert!(Agenda::::get(4)[0].is_some()); // task should be retried in block 7 - run_to_block(4); + System::run_to_block::(4); assert!(Agenda::::get(4).is_empty()); assert!(Agenda::::get(7)[0].is_some()); assert!(logger::log().is_empty()); - run_to_block(6); + System::run_to_block::(6); assert!(Agenda::::get(7)[0].is_some()); assert!(logger::log().is_empty()); // task still fails, should be retried in block 10 - run_to_block(7); + System::run_to_block::(7); assert!(Agenda::::get(7).is_empty()); assert!(Agenda::::get(10)[0].is_some()); assert!(logger::log().is_empty()); - run_to_block(8); + System::run_to_block::(8); assert!(Agenda::::get(10)[0].is_some()); assert!(logger::log().is_empty()); - run_to_block(9); + System::run_to_block::(9); assert!(logger::log().is_empty()); assert_eq!(Retries::::iter().count(), 1); // finally it should succeed - run_to_block(10); + System::run_to_block::(10); assert_eq!(logger::log(), vec![(root(), 42u32)]); assert_eq!(Retries::::iter().count(), 0); - run_to_block(11); + System::run_to_block::(11); assert_eq!(logger::log(), vec![(root(), 42u32)]); - run_to_block(12); + System::run_to_block::(12); assert_eq!(logger::log(), vec![(root(), 42u32)]); - run_to_block(100); + System::run_to_block::(100); assert_eq!(logger::log(), vec![(root(), 42u32)]); }); } @@ -262,37 +262,37 @@ fn named_retry_scheduling_works() { // retry 10 times every 3 blocks assert_ok!(Scheduler::set_retry_named(root().into(), [1u8; 32], 10, 3)); assert_eq!(Retries::::iter().count(), 1); - run_to_block(3); + System::run_to_block::(3); assert!(logger::log().is_empty()); assert!(Agenda::::get(4)[0].is_some()); // task should be retried in block 7 - run_to_block(4); + System::run_to_block::(4); assert!(Agenda::::get(4).is_empty()); assert!(Agenda::::get(7)[0].is_some()); assert!(logger::log().is_empty()); - run_to_block(6); + System::run_to_block::(6); assert!(Agenda::::get(7)[0].is_some()); assert!(logger::log().is_empty()); // task still fails, should be retried in block 10 - run_to_block(7); + System::run_to_block::(7); assert!(Agenda::::get(7).is_empty()); assert!(Agenda::::get(10)[0].is_some()); assert!(logger::log().is_empty()); - run_to_block(8); + System::run_to_block::(8); assert!(Agenda::::get(10)[0].is_some()); assert!(logger::log().is_empty()); - run_to_block(9); + System::run_to_block::(9); assert!(logger::log().is_empty()); assert_eq!(Retries::::iter().count(), 1); // finally it should succeed - run_to_block(10); + System::run_to_block::(10); assert_eq!(logger::log(), vec![(root(), 42u32)]); assert_eq!(Retries::::iter().count(), 0); - run_to_block(11); + System::run_to_block::(11); assert_eq!(logger::log(), vec![(root(), 42u32)]); - run_to_block(12); + System::run_to_block::(12); assert_eq!(logger::log(), vec![(root(), 42u32)]); - run_to_block(100); + System::run_to_block::(100); assert_eq!(logger::log(), vec![(root(), 42u32)]); }); } @@ -333,11 +333,11 @@ fn retry_scheduling_multiple_tasks_works() { // task 42 will be retried 10 times every 3 blocks assert_ok!(Scheduler::set_retry(root().into(), (4, 1), 10, 3)); assert_eq!(Retries::::iter().count(), 2); - run_to_block(3); + System::run_to_block::(3); assert!(logger::log().is_empty()); assert_eq!(Agenda::::get(4).len(), 2); // both tasks fail - run_to_block(4); + System::run_to_block::(4); assert!(Agenda::::get(4).is_empty()); // 20 is rescheduled for next block assert_eq!(Agenda::::get(5).len(), 1); @@ -345,41 +345,41 @@ fn retry_scheduling_multiple_tasks_works() { assert_eq!(Agenda::::get(7).len(), 1); assert!(logger::log().is_empty()); // 20 still fails - run_to_block(5); + System::run_to_block::(5); // 20 rescheduled for next block assert_eq!(Agenda::::get(6).len(), 1); assert_eq!(Agenda::::get(7).len(), 1); assert_eq!(Retries::::iter().count(), 2); assert!(logger::log().is_empty()); // 20 still fails - run_to_block(6); + System::run_to_block::(6); // rescheduled for next block together with 42 assert_eq!(Agenda::::get(7).len(), 2); assert_eq!(Retries::::iter().count(), 2); assert!(logger::log().is_empty()); // both tasks will fail, for 20 it was the last retry so it's dropped - run_to_block(7); + System::run_to_block::(7); assert!(Agenda::::get(7).is_empty()); assert!(Agenda::::get(8).is_empty()); // 42 is rescheduled for block 10 assert_eq!(Agenda::::get(10).len(), 1); assert_eq!(Retries::::iter().count(), 1); assert!(logger::log().is_empty()); - run_to_block(8); + System::run_to_block::(8); assert_eq!(Agenda::::get(10).len(), 1); assert!(logger::log().is_empty()); - run_to_block(9); + System::run_to_block::(9); assert!(logger::log().is_empty()); assert_eq!(Retries::::iter().count(), 1); // 42 runs successfully - run_to_block(10); + System::run_to_block::(10); assert_eq!(logger::log(), vec![(root(), 42u32)]); assert_eq!(Retries::::iter().count(), 0); - run_to_block(11); + System::run_to_block::(11); assert_eq!(logger::log(), vec![(root(), 42u32)]); - run_to_block(12); + System::run_to_block::(12); assert_eq!(logger::log(), vec![(root(), 42u32)]); - run_to_block(100); + System::run_to_block::(100); assert_eq!(logger::log(), vec![(root(), 42u32)]); }); } @@ -422,11 +422,11 @@ fn retry_scheduling_multiple_named_tasks_works() { // task 42 will be retried 10 times every 3 block assert_ok!(Scheduler::set_retry_named(root().into(), [42u8; 32], 10, 3)); assert_eq!(Retries::::iter().count(), 2); - run_to_block(3); + System::run_to_block::(3); assert!(logger::log().is_empty()); assert_eq!(Agenda::::get(4).len(), 2); // both tasks fail - run_to_block(4); + System::run_to_block::(4); assert!(Agenda::::get(4).is_empty()); // 42 is rescheduled for block 7 assert_eq!(Agenda::::get(7).len(), 1); @@ -434,41 +434,41 @@ fn retry_scheduling_multiple_named_tasks_works() { assert_eq!(Agenda::::get(5).len(), 1); assert!(logger::log().is_empty()); // 20 still fails - run_to_block(5); + System::run_to_block::(5); // 20 rescheduled for next block assert_eq!(Agenda::::get(6).len(), 1); assert_eq!(Agenda::::get(7).len(), 1); assert_eq!(Retries::::iter().count(), 2); assert!(logger::log().is_empty()); // 20 still fails - run_to_block(6); + System::run_to_block::(6); // 20 rescheduled for next block together with 42 assert_eq!(Agenda::::get(7).len(), 2); assert_eq!(Retries::::iter().count(), 2); assert!(logger::log().is_empty()); // both tasks will fail, for 20 it was the last retry so it's dropped - run_to_block(7); + System::run_to_block::(7); assert!(Agenda::::get(7).is_empty()); assert!(Agenda::::get(8).is_empty()); // 42 is rescheduled for block 10 assert_eq!(Agenda::::get(10).len(), 1); assert_eq!(Retries::::iter().count(), 1); assert!(logger::log().is_empty()); - run_to_block(8); + System::run_to_block::(8); assert_eq!(Agenda::::get(10).len(), 1); assert!(logger::log().is_empty()); - run_to_block(9); + System::run_to_block::(9); assert!(logger::log().is_empty()); assert_eq!(Retries::::iter().count(), 1); // 42 runs successfully - run_to_block(10); + System::run_to_block::(10); assert_eq!(logger::log(), vec![(root(), 42u32)]); assert_eq!(Retries::::iter().count(), 0); - run_to_block(11); + System::run_to_block::(11); assert_eq!(logger::log(), vec![(root(), 42u32)]); - run_to_block(12); + System::run_to_block::(12); assert_eq!(logger::log(), vec![(root(), 42u32)]); - run_to_block(100); + System::run_to_block::(100); assert_eq!(logger::log(), vec![(root(), 42u32)]); }); } @@ -495,33 +495,33 @@ fn retry_scheduling_with_period_works() { // 42 will be retried 10 times every 2 blocks assert_ok!(Scheduler::set_retry(root().into(), (4, 0), 10, 2)); assert_eq!(Retries::::iter().count(), 1); - run_to_block(3); + System::run_to_block::(3); assert!(logger::log().is_empty()); assert!(Agenda::::get(4)[0].is_some()); // 42 runs successfully once, it will run again at block 7 - run_to_block(4); + System::run_to_block::(4); assert!(Agenda::::get(4).is_empty()); assert!(Agenda::::get(7)[0].is_some()); assert_eq!(Retries::::iter().count(), 1); assert_eq!(logger::log(), vec![(root(), 42u32)]); // nothing changed - run_to_block(6); + System::run_to_block::(6); assert!(Agenda::::get(7)[0].is_some()); assert_eq!(logger::log(), vec![(root(), 42u32)]); // 42 runs successfully again, it will run again at block 10 - run_to_block(7); + System::run_to_block::(7); assert!(Agenda::::get(7).is_empty()); assert!(Agenda::::get(10)[0].is_some()); assert_eq!(Retries::::iter().count(), 1); assert_eq!(logger::log(), vec![(root(), 42u32), (root(), 42u32)]); - run_to_block(9); + System::run_to_block::(9); assert!(Agenda::::get(10)[0].is_some()); assert_eq!(logger::log(), vec![(root(), 42u32), (root(), 42u32)]); // 42 has 10 retries left out of a total of 10 assert_eq!(Retries::::get((10, 0)).unwrap().remaining, 10); // 42 will fail because we're outside the set threshold (block number in `4..8`), so it // should be retried in 2 blocks (at block 12) - run_to_block(10); + System::run_to_block::(10); // should be queued for the normal period of 3 blocks assert!(Agenda::::get(13)[0].is_some()); // should also be queued to be retried in 2 blocks @@ -532,7 +532,7 @@ fn retry_scheduling_with_period_works() { assert_eq!(Retries::::iter().count(), 2); assert_eq!(logger::log(), vec![(root(), 42u32), (root(), 42u32)]); // 42 will fail again - run_to_block(12); + System::run_to_block::(12); // should still be queued for the normal period assert!(Agenda::::get(13)[0].is_some()); // should be queued to be retried in 2 blocks @@ -543,7 +543,7 @@ fn retry_scheduling_with_period_works() { assert_eq!(Retries::::iter().count(), 2); assert_eq!(logger::log(), vec![(root(), 42u32), (root(), 42u32)]); // 42 will fail for the regular periodic run - run_to_block(13); + System::run_to_block::(13); // should still be queued for the normal period assert!(Agenda::::get(16)[0].is_some()); // should still be queued to be retried next block @@ -560,7 +560,7 @@ fn retry_scheduling_with_period_works() { // change the threshold to allow the task to succeed Threshold::::put((14, 100)); // first retry should now succeed - run_to_block(14); + System::run_to_block::(14); assert!(Agenda::::get(15)[0].as_ref().unwrap().maybe_periodic.is_none()); assert_eq!(Agenda::::get(16).iter().filter(|entry| entry.is_some()).count(), 1); assert!(Agenda::::get(16)[0].is_some()); @@ -569,7 +569,7 @@ fn retry_scheduling_with_period_works() { assert_eq!(Retries::::iter().count(), 2); assert_eq!(logger::log(), vec![(root(), 42u32), (root(), 42u32), (root(), 42u32)]); // second retry should also succeed - run_to_block(15); + System::run_to_block::(15); assert_eq!(Agenda::::get(16).iter().filter(|entry| entry.is_some()).count(), 1); assert!(Agenda::::get(16)[0].is_some()); assert!(Agenda::::get(17).is_empty()); @@ -580,7 +580,7 @@ fn retry_scheduling_with_period_works() { vec![(root(), 42u32), (root(), 42u32), (root(), 42u32), (root(), 42u32)] ); // normal periodic run on block 16 will succeed - run_to_block(16); + System::run_to_block::(16); // next periodic run at block 19 assert!(Agenda::::get(19)[0].is_some()); assert!(Agenda::::get(18).is_empty()); @@ -598,7 +598,7 @@ fn retry_scheduling_with_period_works() { ] ); // final periodic run on block 19 will succeed - run_to_block(19); + System::run_to_block::(19); // next periodic run at block 19 assert_eq!(Agenda::::iter().count(), 0); assert_eq!(Retries::::iter().count(), 0); @@ -639,33 +639,33 @@ fn named_retry_scheduling_with_period_works() { // 42 will be retried 10 times every 2 blocks assert_ok!(Scheduler::set_retry_named(root().into(), [42u8; 32], 10, 2)); assert_eq!(Retries::::iter().count(), 1); - run_to_block(3); + System::run_to_block::(3); assert!(logger::log().is_empty()); assert!(Agenda::::get(4)[0].is_some()); // 42 runs successfully once, it will run again at block 7 - run_to_block(4); + System::run_to_block::(4); assert!(Agenda::::get(4).is_empty()); assert!(Agenda::::get(7)[0].is_some()); assert_eq!(Retries::::iter().count(), 1); assert_eq!(logger::log(), vec![(root(), 42u32)]); // nothing changed - run_to_block(6); + System::run_to_block::(6); assert!(Agenda::::get(7)[0].is_some()); assert_eq!(logger::log(), vec![(root(), 42u32)]); // 42 runs successfully again, it will run again at block 10 - run_to_block(7); + System::run_to_block::(7); assert!(Agenda::::get(7).is_empty()); assert!(Agenda::::get(10)[0].is_some()); assert_eq!(Retries::::iter().count(), 1); assert_eq!(logger::log(), vec![(root(), 42u32), (root(), 42u32)]); - run_to_block(9); + System::run_to_block::(9); assert!(Agenda::::get(10)[0].is_some()); assert_eq!(logger::log(), vec![(root(), 42u32), (root(), 42u32)]); // 42 has 10 retries left out of a total of 10 assert_eq!(Retries::::get((10, 0)).unwrap().remaining, 10); // 42 will fail because we're outside the set threshold (block number in `4..8`), so it // should be retried in 2 blocks (at block 12) - run_to_block(10); + System::run_to_block::(10); // should be queued for the normal period of 3 blocks assert!(Agenda::::get(13)[0].is_some()); // should also be queued to be retried in 2 blocks @@ -677,7 +677,7 @@ fn named_retry_scheduling_with_period_works() { assert_eq!(Lookup::::get([42u8; 32]).unwrap(), (13, 0)); assert_eq!(logger::log(), vec![(root(), 42u32), (root(), 42u32)]); // 42 will fail again - run_to_block(12); + System::run_to_block::(12); // should still be queued for the normal period assert!(Agenda::::get(13)[0].is_some()); // should be queued to be retried in 2 blocks @@ -688,7 +688,7 @@ fn named_retry_scheduling_with_period_works() { assert_eq!(Retries::::iter().count(), 2); assert_eq!(logger::log(), vec![(root(), 42u32), (root(), 42u32)]); // 42 will fail for the regular periodic run - run_to_block(13); + System::run_to_block::(13); // should still be queued for the normal period assert!(Agenda::::get(16)[0].is_some()); // should still be queued to be retried next block @@ -706,7 +706,7 @@ fn named_retry_scheduling_with_period_works() { // change the threshold to allow the task to succeed Threshold::::put((14, 100)); // first retry should now succeed - run_to_block(14); + System::run_to_block::(14); assert!(Agenda::::get(15)[0].as_ref().unwrap().maybe_periodic.is_none()); assert_eq!(Agenda::::get(16).iter().filter(|entry| entry.is_some()).count(), 1); assert!(Agenda::::get(16)[0].is_some()); @@ -715,7 +715,7 @@ fn named_retry_scheduling_with_period_works() { assert_eq!(Retries::::iter().count(), 2); assert_eq!(logger::log(), vec![(root(), 42u32), (root(), 42u32), (root(), 42u32)]); // second retry should also succeed - run_to_block(15); + System::run_to_block::(15); assert_eq!(Agenda::::get(16).iter().filter(|entry| entry.is_some()).count(), 1); assert!(Agenda::::get(16)[0].is_some()); assert!(Agenda::::get(17).is_empty()); @@ -727,7 +727,7 @@ fn named_retry_scheduling_with_period_works() { vec![(root(), 42u32), (root(), 42u32), (root(), 42u32), (root(), 42u32)] ); // normal periodic run on block 16 will succeed - run_to_block(16); + System::run_to_block::(16); // next periodic run at block 19 assert!(Agenda::::get(19)[0].is_some()); assert!(Agenda::::get(18).is_empty()); @@ -746,7 +746,7 @@ fn named_retry_scheduling_with_period_works() { ] ); // final periodic run on block 19 will succeed - run_to_block(19); + System::run_to_block::(19); // next periodic run at block 19 assert_eq!(Agenda::::iter().count(), 0); assert_eq!(Retries::::iter().count(), 0); @@ -786,12 +786,12 @@ fn retry_scheduling_expires() { // task 42 will be retried 3 times every block assert_ok!(Scheduler::set_retry(root().into(), (4, 0), 3, 1)); assert_eq!(Retries::::iter().count(), 1); - run_to_block(3); + System::run_to_block::(3); assert!(logger::log().is_empty()); // task 42 is scheduled for next block assert!(Agenda::::get(4)[0].is_some()); // task fails because we're past block 3 - run_to_block(4); + System::run_to_block::(4); // task is scheduled for next block assert!(Agenda::::get(4).is_empty()); assert!(Agenda::::get(5)[0].is_some()); @@ -799,7 +799,7 @@ fn retry_scheduling_expires() { assert_eq!(Retries::::get((5, 0)).unwrap().remaining, 2); assert!(logger::log().is_empty()); // task fails again - run_to_block(5); + System::run_to_block::(5); // task is scheduled for next block assert!(Agenda::::get(5).is_empty()); assert!(Agenda::::get(6)[0].is_some()); @@ -807,7 +807,7 @@ fn retry_scheduling_expires() { assert_eq!(Retries::::get((6, 0)).unwrap().remaining, 1); assert!(logger::log().is_empty()); // task fails again - run_to_block(6); + System::run_to_block::(6); // task is scheduled for next block assert!(Agenda::::get(6).is_empty()); assert!(Agenda::::get(7)[0].is_some()); @@ -815,7 +815,7 @@ fn retry_scheduling_expires() { assert_eq!(Retries::::get((7, 0)).unwrap().remaining, 0); assert!(logger::log().is_empty()); // task fails again - run_to_block(7); + System::run_to_block::(7); // task ran out of retries so it gets dropped assert_eq!(Agenda::::iter().count(), 0); assert_eq!(Retries::::iter().count(), 0); @@ -949,17 +949,17 @@ fn retry_periodic_full_cycle() { // 42 will be retried 2 times every block assert_ok!(Scheduler::set_retry_named(root().into(), [42u8; 32], 2, 1)); assert_eq!(Retries::::iter().count(), 1); - run_to_block(9); + System::run_to_block::(9); assert!(logger::log().is_empty()); assert!(Agenda::::get(10)[0].is_some()); // 42 runs successfully once, it will run again at block 110 - run_to_block(10); + System::run_to_block::(10); assert!(Agenda::::get(10).is_empty()); assert!(Agenda::::get(110)[0].is_some()); assert_eq!(Retries::::iter().count(), 1); assert_eq!(logger::log(), vec![(root(), 42u32)]); // nothing changed - run_to_block(109); + System::run_to_block::(109); assert!(Agenda::::get(110)[0].is_some()); // original task still has 2 remaining retries assert_eq!(Retries::::get((110, 0)).unwrap().remaining, 2); @@ -968,7 +968,7 @@ fn retry_periodic_full_cycle() { Threshold::::put((1, 2)); // 42 will fail because we're outside the set threshold (block number in `1..2`), so it // should be retried next block (at block 111) - run_to_block(110); + System::run_to_block::(110); // should be queued for the normal period of 100 blocks assert!(Agenda::::get(210)[0].is_some()); // should also be queued to be retried next block @@ -980,7 +980,7 @@ fn retry_periodic_full_cycle() { assert_eq!(Retries::::iter().count(), 2); assert_eq!(logger::log(), vec![(root(), 42u32)]); // 42 retry will fail again - run_to_block(111); + System::run_to_block::(111); // should still be queued for the normal period assert!(Agenda::::get(210)[0].is_some()); // should be queued to be retried next block @@ -991,20 +991,20 @@ fn retry_periodic_full_cycle() { assert_eq!(Retries::::iter().count(), 2); assert_eq!(logger::log(), vec![(root(), 42u32)]); // 42 retry will fail again - run_to_block(112); + System::run_to_block::(112); // should still be queued for the normal period assert!(Agenda::::get(210)[0].is_some()); // 42 retry clone ran out of retries, must have been evicted assert_eq!(Agenda::::iter().count(), 1); // advance - run_to_block(209); + System::run_to_block::(209); // should still be queued for the normal period assert!(Agenda::::get(210)[0].is_some()); // 42 retry clone ran out of retries, must have been evicted assert_eq!(Agenda::::iter().count(), 1); // 42 should fail again and should spawn another retry clone - run_to_block(210); + System::run_to_block::(210); // should be queued for the normal period of 100 blocks assert!(Agenda::::get(310)[0].is_some()); // should also be queued to be retried next block @@ -1018,7 +1018,7 @@ fn retry_periodic_full_cycle() { // make 42 run successfully again Threshold::::put((1, 1000)); // 42 retry clone should now succeed - run_to_block(211); + System::run_to_block::(211); // should be queued for the normal period of 100 blocks assert!(Agenda::::get(310)[0].is_some()); // retry was successful, retry task should have been discarded @@ -1029,7 +1029,7 @@ fn retry_periodic_full_cycle() { assert_eq!(logger::log(), vec![(root(), 42u32), (root(), 42u32)]); // fast forward to the last periodic run of 42 - run_to_block(310); + System::run_to_block::(310); // 42 was successful, the period ended as this was the 4th scheduled periodic run so 42 must // have been discarded assert_eq!(Agenda::::iter().count(), 0); @@ -1057,7 +1057,7 @@ fn reschedule_works() { (4, 0) ); - run_to_block(3); + System::run_to_block::(3); assert!(logger::log().is_empty()); assert_eq!(Scheduler::do_reschedule((4, 0), DispatchTime::At(6)).unwrap(), (6, 0)); @@ -1067,13 +1067,13 @@ fn reschedule_works() { Error::::RescheduleNoChange ); - run_to_block(4); + System::run_to_block::(4); assert!(logger::log().is_empty()); - run_to_block(6); + System::run_to_block::(6); assert_eq!(logger::log(), vec![(root(), 42u32)]); - run_to_block(100); + System::run_to_block::(100); assert_eq!(logger::log(), vec![(root(), 42u32)]); }); } @@ -1097,7 +1097,7 @@ fn reschedule_named_works() { (4, 0) ); - run_to_block(3); + System::run_to_block::(3); assert!(logger::log().is_empty()); assert_eq!(Scheduler::do_reschedule_named([1u8; 32], DispatchTime::At(6)).unwrap(), (6, 0)); @@ -1107,13 +1107,13 @@ fn reschedule_named_works() { Error::::RescheduleNoChange ); - run_to_block(4); + System::run_to_block::(4); assert!(logger::log().is_empty()); - run_to_block(6); + System::run_to_block::(6); assert_eq!(logger::log(), vec![(root(), 42u32)]); - run_to_block(100); + System::run_to_block::(100); assert_eq!(logger::log(), vec![(root(), 42u32)]); }); } @@ -1137,16 +1137,16 @@ fn reschedule_named_periodic_works() { (4, 0) ); - run_to_block(3); + System::run_to_block::(3); assert!(logger::log().is_empty()); assert_eq!(Scheduler::do_reschedule_named([1u8; 32], DispatchTime::At(5)).unwrap(), (5, 0)); assert_eq!(Scheduler::do_reschedule_named([1u8; 32], DispatchTime::At(6)).unwrap(), (6, 0)); - run_to_block(5); + System::run_to_block::(5); assert!(logger::log().is_empty()); - run_to_block(6); + System::run_to_block::(6); assert_eq!(logger::log(), vec![(root(), 42u32)]); assert_eq!( @@ -1154,16 +1154,16 @@ fn reschedule_named_periodic_works() { (10, 0) ); - run_to_block(9); + System::run_to_block::(9); assert_eq!(logger::log(), vec![(root(), 42u32)]); - run_to_block(10); + System::run_to_block::(10); assert_eq!(logger::log(), vec![(root(), 42u32), (root(), 42u32)]); - run_to_block(13); + System::run_to_block::(13); assert_eq!(logger::log(), vec![(root(), 42u32), (root(), 42u32), (root(), 42u32)]); - run_to_block(100); + System::run_to_block::(100); assert_eq!(logger::log(), vec![(root(), 42u32), (root(), 42u32), (root(), 42u32)]); }); } @@ -1197,11 +1197,11 @@ fn cancel_named_scheduling_works_with_normal_cancel() { .unwrap(), ) .unwrap(); - run_to_block(3); + System::run_to_block::(3); assert!(logger::log().is_empty()); assert_ok!(Scheduler::do_cancel_named(None, [1u8; 32])); assert_ok!(Scheduler::do_cancel(None, i)); - run_to_block(100); + System::run_to_block::(100); assert!(logger::log().is_empty()); }); } @@ -1251,13 +1251,13 @@ fn cancel_named_periodic_scheduling_works() { .unwrap(), ) .unwrap(); - run_to_block(3); + System::run_to_block::(3); assert!(logger::log().is_empty()); - run_to_block(4); + System::run_to_block::(4); assert_eq!(logger::log(), vec![(root(), 42u32)]); - run_to_block(6); + System::run_to_block::(6); assert_ok!(Scheduler::do_cancel_named(None, [1u8; 32])); - run_to_block(100); + System::run_to_block::(100); assert_eq!(logger::log(), vec![(root(), 42u32), (root(), 69u32)]); }); } @@ -1283,9 +1283,9 @@ fn scheduler_respects_weight_limits() { Preimage::bound(call).unwrap(), )); // 69 and 42 do not fit together - run_to_block(4); + System::run_to_block::(4); assert_eq!(logger::log(), vec![(root(), 42u32)]); - run_to_block(5); + System::run_to_block::(5); assert_eq!(logger::log(), vec![(root(), 42u32), (root(), 69u32)]); }); } @@ -1316,26 +1316,26 @@ fn retry_respects_weight_limits() { // set a retry config for 20 for 10 retries every block assert_ok!(Scheduler::set_retry(root().into(), (4, 0), 10, 1)); // 20 should fail and be retried later - run_to_block(4); + System::run_to_block::(4); assert!(Agenda::::get(5)[0].is_some()); assert!(Agenda::::get(8)[0].is_some()); assert_eq!(Retries::::iter().count(), 1); assert!(logger::log().is_empty()); // 20 still fails but is scheduled next block together with 42 - run_to_block(7); + System::run_to_block::(7); assert_eq!(Agenda::::get(8).len(), 2); assert_eq!(Retries::::iter().count(), 1); assert!(logger::log().is_empty()); // 20 and 42 do not fit together // 42 is executed as it was first in the queue // 20 is still on the 8th block's agenda - run_to_block(8); + System::run_to_block::(8); assert!(Agenda::::get(8)[0].is_none()); assert!(Agenda::::get(8)[1].is_some()); assert_eq!(Retries::::iter().count(), 1); assert_eq!(logger::log(), vec![(root(), 42u32)]); // 20 is executed and the schedule is cleared - run_to_block(9); + System::run_to_block::(9); assert_eq!(Agenda::::iter().count(), 0); assert_eq!(Retries::::iter().count(), 0); assert_eq!(logger::log(), vec![(root(), 42u32), (root(), 20u32)]); @@ -1386,7 +1386,7 @@ fn try_schedule_retry_respects_weight_limits() { // set a retry config for 20 for 10 retries every block assert_ok!(Scheduler::set_retry(root().into(), (4, 0), 10, 1)); // 20 should fail and, because of insufficient weight, it should not be scheduled again - run_to_block(4); + System::run_to_block::(4); // nothing else should be scheduled assert_eq!(Agenda::::iter().count(), 0); assert_eq!(Retries::::iter().count(), 0); @@ -1415,7 +1415,7 @@ fn scheduler_does_not_delete_permanently_overweight_call() { Preimage::bound(call).unwrap(), )); // Never executes. - run_to_block(100); + System::run_to_block::(100); assert_eq!(logger::log(), vec![]); // Assert the `PermanentlyOverweight` event. @@ -1445,7 +1445,7 @@ fn scheduler_handles_periodic_failure() { bound.clone(), )); // Executes 5 times till block 20. - run_to_block(20); + System::run_to_block::(20); assert_eq!(logger::log().len(), 5); // Block 28 will already be full. @@ -1460,7 +1460,7 @@ fn scheduler_handles_periodic_failure() { } // Going to block 24 will emit a `PeriodicFailed` event. - run_to_block(24); + System::run_to_block::(24); assert_eq!(logger::log().len(), 6); assert_eq!( @@ -1498,7 +1498,7 @@ fn scheduler_handles_periodic_unavailable_preimage() { assert_ok!(Preimage::note_preimage(RuntimeOrigin::signed(1), call.encode())); // Executes 1 times till block 4. - run_to_block(4); + System::run_to_block::(4); assert_eq!(logger::log().len(), 1); // As the public api doesn't support to remove a noted preimage, we need to first unnote it @@ -1508,7 +1508,7 @@ fn scheduler_handles_periodic_unavailable_preimage() { Preimage::request(&hash); // Does not ever execute again. - run_to_block(100); + System::run_to_block::(100); assert_eq!(logger::log().len(), 1); // The preimage is not requested anymore. @@ -1536,7 +1536,7 @@ fn scheduler_respects_priority_ordering() { root(), Preimage::bound(call).unwrap(), )); - run_to_block(4); + System::run_to_block::(4); assert_eq!(logger::log(), vec![(root(), 69u32), (root(), 42u32)]); }); } @@ -1571,10 +1571,10 @@ fn scheduler_respects_priority_ordering_with_soft_deadlines() { )); // 2600 does not fit with 69 or 42, but has higher priority, so will go through - run_to_block(4); + System::run_to_block::(4); assert_eq!(logger::log(), vec![(root(), 2600u32)]); // 69 and 42 fit together - run_to_block(5); + System::run_to_block::(5); assert_eq!(logger::log(), vec![(root(), 2600u32), (root(), 69u32), (root(), 42u32)]); }); } @@ -1701,14 +1701,14 @@ fn root_calls_works() { Scheduler::schedule_named(RuntimeOrigin::root(), [1u8; 32], 4, None, 127, call,) ); assert_ok!(Scheduler::schedule(RuntimeOrigin::root(), 4, None, 127, call2)); - run_to_block(3); + System::run_to_block::(3); // Scheduled calls are in the agenda. assert_eq!(Agenda::::get(4).len(), 2); assert!(logger::log().is_empty()); assert_ok!(Scheduler::cancel_named(RuntimeOrigin::root(), [1u8; 32])); assert_ok!(Scheduler::cancel(RuntimeOrigin::root(), 4, 1)); // Scheduled calls are made NONE, so should not effect state - run_to_block(100); + System::run_to_block::(100); assert!(logger::log().is_empty()); }); } @@ -1716,7 +1716,7 @@ fn root_calls_works() { #[test] fn fails_to_schedule_task_in_the_past() { new_test_ext().execute_with(|| { - run_to_block(3); + System::run_to_block::(3); let call1 = Box::new(RuntimeCall::Logger(LoggerCall::log { i: 69, @@ -1768,14 +1768,14 @@ fn should_use_origin() { call, )); assert_ok!(Scheduler::schedule(system::RawOrigin::Signed(1).into(), 4, None, 127, call2,)); - run_to_block(3); + System::run_to_block::(3); // Scheduled calls are in the agenda. assert_eq!(Agenda::::get(4).len(), 2); assert!(logger::log().is_empty()); assert_ok!(Scheduler::cancel_named(system::RawOrigin::Signed(1).into(), [1u8; 32])); assert_ok!(Scheduler::cancel(system::RawOrigin::Signed(1).into(), 4, 1)); // Scheduled calls are made NONE, so should not effect state - run_to_block(100); + System::run_to_block::(100); assert!(logger::log().is_empty()); }); } @@ -1829,7 +1829,7 @@ fn should_check_origin_for_cancel() { call, )); assert_ok!(Scheduler::schedule(system::RawOrigin::Signed(1).into(), 4, None, 127, call2,)); - run_to_block(3); + System::run_to_block::(3); // Scheduled calls are in the agenda. assert_eq!(Agenda::::get(4).len(), 2); assert!(logger::log().is_empty()); @@ -1840,7 +1840,7 @@ fn should_check_origin_for_cancel() { assert_noop!(Scheduler::cancel(system::RawOrigin::Signed(2).into(), 4, 1), BadOrigin); assert_noop!(Scheduler::cancel_named(system::RawOrigin::Root.into(), [1u8; 32]), BadOrigin); assert_noop!(Scheduler::cancel(system::RawOrigin::Root.into(), 4, 1), BadOrigin); - run_to_block(5); + System::run_to_block::(5); assert_eq!( logger::log(), vec![ @@ -1888,17 +1888,17 @@ fn cancel_removes_retry_entry() { // task 42 will be retried 10 times every 3 blocks assert_ok!(Scheduler::set_retry_named(root().into(), [1u8; 32], 10, 1)); assert_eq!(Retries::::iter().count(), 2); - run_to_block(3); + System::run_to_block::(3); assert!(logger::log().is_empty()); assert_eq!(Agenda::::get(4).len(), 2); // both tasks fail - run_to_block(4); + System::run_to_block::(4); assert!(Agenda::::get(4).is_empty()); // 42 and 20 are rescheduled for next block assert_eq!(Agenda::::get(5).len(), 2); assert!(logger::log().is_empty()); // 42 and 20 still fail - run_to_block(5); + System::run_to_block::(5); // 42 and 20 rescheduled for next block assert_eq!(Agenda::::get(6).len(), 2); assert_eq!(Retries::::iter().count(), 2); @@ -1909,7 +1909,7 @@ fn cancel_removes_retry_entry() { assert!(Scheduler::cancel(root().into(), 6, 0).is_ok()); // 20 is removed, 42 still fails - run_to_block(6); + System::run_to_block::(6); // 42 rescheduled for next block assert_eq!(Agenda::::get(7).len(), 1); // 20's retry entry is removed @@ -1920,7 +1920,7 @@ fn cancel_removes_retry_entry() { assert!(Scheduler::cancel(root().into(), 7, 0).is_ok()); // both tasks are canceled, everything is removed now - run_to_block(7); + System::run_to_block::(7); assert!(Agenda::::get(8).is_empty()); assert_eq!(Retries::::iter().count(), 0); }); @@ -1963,7 +1963,7 @@ fn cancel_retries_works() { // task 42 will be retried 10 times every 3 blocks assert_ok!(Scheduler::set_retry_named(root().into(), [1u8; 32], 10, 1)); assert_eq!(Retries::::iter().count(), 2); - run_to_block(3); + System::run_to_block::(3); assert!(logger::log().is_empty()); assert_eq!(Agenda::::get(4).len(), 2); // cancel the retry config for 20 @@ -1972,7 +1972,7 @@ fn cancel_retries_works() { // cancel the retry config for 42 assert_ok!(Scheduler::cancel_retry_named(root().into(), [1u8; 32])); assert_eq!(Retries::::iter().count(), 0); - run_to_block(4); + System::run_to_block::(4); // both tasks failed and there are no more retries, so they are evicted assert_eq!(Agenda::::get(4).len(), 0); assert_eq!(Retries::::iter().count(), 0); @@ -2287,7 +2287,7 @@ fn postponed_named_task_cannot_be_rescheduled() { assert!(Lookup::::contains_key(name)); // Run to a very large block. - run_to_block(10); + System::run_to_block::(10); // It was not executed. assert!(logger::log().is_empty()); @@ -2321,7 +2321,7 @@ fn postponed_named_task_cannot_be_rescheduled() { // Finally add the preimage. assert_ok!(Preimage::note_preimage(RuntimeOrigin::signed(0), call.encode())); - run_to_block(1000); + System::run_to_block::(1000); // It did not execute. assert!(logger::log().is_empty()); assert!(!Preimage::is_requested(&hash)); @@ -2357,14 +2357,14 @@ fn scheduler_v3_anon_basic_works() { ) .unwrap(); - run_to_block(3); + System::run_to_block::(3); // Did not execute till block 3. assert!(logger::log().is_empty()); // Executes in block 4. - run_to_block(4); + System::run_to_block::(4); assert_eq!(logger::log(), vec![(root(), 42u32)]); // ... but not again. - run_to_block(100); + System::run_to_block::(100); assert_eq!(logger::log(), vec![(root(), 42u32)]); }); } @@ -2389,7 +2389,7 @@ fn scheduler_v3_anon_cancel_works() { // Cancel the call. assert_ok!(>::cancel(address)); // It did not get executed. - run_to_block(100); + System::run_to_block::(100); assert!(logger::log().is_empty()); // Cannot cancel again. assert_err!(>::cancel(address), DispatchError::Unavailable); @@ -2413,7 +2413,7 @@ fn scheduler_v3_anon_reschedule_works() { ) .unwrap(); - run_to_block(3); + System::run_to_block::(3); // Did not execute till block 3. assert!(logger::log().is_empty()); @@ -2430,9 +2430,9 @@ fn scheduler_v3_anon_reschedule_works() { // Re-schedule to block 5. assert_ok!(>::reschedule(address, DispatchTime::At(5))); // Scheduled for block 5. - run_to_block(4); + System::run_to_block::(4); assert!(logger::log().is_empty()); - run_to_block(5); + System::run_to_block::(5); // Does execute in block 5. assert_eq!(logger::log(), vec![(root(), 42)]); // Cannot re-schedule executed task. @@ -2461,14 +2461,14 @@ fn scheduler_v3_anon_next_schedule_time_works() { ) .unwrap(); - run_to_block(3); + System::run_to_block::(3); // Did not execute till block 3. assert!(logger::log().is_empty()); // Scheduled for block 4. assert_eq!(>::next_dispatch_time(address), Ok(4)); // Block 4 executes it. - run_to_block(4); + System::run_to_block::(4); assert_eq!(logger::log(), vec![(root(), 42)]); // It has no dispatch time anymore. @@ -2498,7 +2498,7 @@ fn scheduler_v3_anon_reschedule_and_next_schedule_time_work() { ) .unwrap(); - run_to_block(3); + System::run_to_block::(3); // Did not execute till block 3. assert!(logger::log().is_empty()); @@ -2512,10 +2512,10 @@ fn scheduler_v3_anon_reschedule_and_next_schedule_time_work() { assert_eq!(>::next_dispatch_time(address), Ok(5)); // Block 4 does nothing. - run_to_block(4); + System::run_to_block::(4); assert!(logger::log().is_empty()); // Block 5 executes it. - run_to_block(5); + System::run_to_block::(5); assert_eq!(logger::log(), vec![(root(), 42)]); }); } @@ -2548,7 +2548,7 @@ fn scheduler_v3_anon_schedule_agenda_overflows() { DispatchError::Exhausted ); - run_to_block(4); + System::run_to_block::(4); // All scheduled calls are executed. assert_eq!(logger::log().len() as u32, max); }); @@ -2597,7 +2597,7 @@ fn scheduler_v3_anon_cancel_and_schedule_fills_holes() { assert_eq!(i, index); } - run_to_block(4); + System::run_to_block::(4); // Maximum number of calls are executed. assert_eq!(logger::log().len() as u32, max); }); @@ -2643,7 +2643,7 @@ fn scheduler_v3_anon_reschedule_fills_holes() { assert_eq!(new, want); } - run_to_block(4); + System::run_to_block::(4); // Maximum number of calls are executed. assert_eq!(logger::log().len() as u32, max); }); @@ -2670,14 +2670,14 @@ fn scheduler_v3_named_basic_works() { ) .unwrap(); - run_to_block(3); + System::run_to_block::(3); // Did not execute till block 3. assert!(logger::log().is_empty()); // Executes in block 4. - run_to_block(4); + System::run_to_block::(4); assert_eq!(logger::log(), vec![(root(), 42u32)]); // ... but not again. - run_to_block(100); + System::run_to_block::(100); assert_eq!(logger::log(), vec![(root(), 42u32)]); }); } @@ -2705,7 +2705,7 @@ fn scheduler_v3_named_cancel_named_works() { // Cancel the call by name. assert_ok!(>::cancel_named(name)); // It did not get executed. - run_to_block(100); + System::run_to_block::(100); assert!(logger::log().is_empty()); // Cannot cancel again. assert_noop!(>::cancel_named(name), DispatchError::Unavailable); @@ -2735,7 +2735,7 @@ fn scheduler_v3_named_cancel_without_name_works() { // Cancel the call by address. assert_ok!(>::cancel(address)); // It did not get executed. - run_to_block(100); + System::run_to_block::(100); assert!(logger::log().is_empty()); // Cannot cancel again. assert_err!(>::cancel(address), DispatchError::Unavailable); @@ -2762,7 +2762,7 @@ fn scheduler_v3_named_reschedule_named_works() { ) .unwrap(); - run_to_block(3); + System::run_to_block::(3); // Did not execute till block 3. assert!(logger::log().is_empty()); @@ -2784,9 +2784,9 @@ fn scheduler_v3_named_reschedule_named_works() { // Re-schedule to block 5. assert_ok!(>::reschedule_named(name, DispatchTime::At(5))); // Scheduled for block 5. - run_to_block(4); + System::run_to_block::(4); assert!(logger::log().is_empty()); - run_to_block(5); + System::run_to_block::(5); // Does execute in block 5. assert_eq!(logger::log(), vec![(root(), 42)]); // Cannot re-schedule executed task. @@ -2822,7 +2822,7 @@ fn scheduler_v3_named_next_schedule_time_works() { ) .unwrap(); - run_to_block(3); + System::run_to_block::(3); // Did not execute till block 3. assert!(logger::log().is_empty()); @@ -2831,7 +2831,7 @@ fn scheduler_v3_named_next_schedule_time_works() { // Also works by address. assert_eq!(>::next_dispatch_time(address), Ok(4)); // Block 4 executes it. - run_to_block(4); + System::run_to_block::(4); assert_eq!(logger::log(), vec![(root(), 42)]); // It has no dispatch time anymore. @@ -3025,7 +3025,7 @@ fn unavailable_call_is_detected() { assert!(Preimage::is_requested(&hash)); // Executes in block 4. - run_to_block(4); + System::run_to_block::(4); assert_eq!( System::events().last().unwrap().event, diff --git a/substrate/frame/society/src/mock.rs b/substrate/frame/society/src/mock.rs index 3c27c08a1061..8cb5dc823753 100644 --- a/substrate/frame/society/src/mock.rs +++ b/substrate/frame/society/src/mock.rs @@ -138,18 +138,6 @@ impl EnvBuilder { } } -/// Run until a particular block. -pub fn run_to_block(n: u64) { - while System::block_number() < n { - if System::block_number() > 1 { - System::on_finalize(System::block_number()); - } - System::set_block_number(System::block_number() + 1); - System::on_initialize(System::block_number()); - Society::on_initialize(System::block_number()); - } -} - /// Creates a bid struct using input parameters. pub fn bid( who: AccountId, @@ -173,12 +161,12 @@ pub fn candidacy( pub fn next_challenge() { let challenge_period: u64 = ::ChallengePeriod::get(); let now = System::block_number(); - run_to_block(now + challenge_period - now % challenge_period); + System::run_to_block::(now + challenge_period - now % challenge_period); } pub fn next_voting() { if let Period::Voting { more, .. } = Society::period() { - run_to_block(System::block_number() + more); + System::run_to_block::(System::block_number() + more); } } @@ -235,8 +223,11 @@ pub fn conclude_intake(allow_resignation: bool, judge_intake: Option) { pub fn next_intake() { let claim_period: u64 = ::ClaimPeriod::get(); match Society::period() { - Period::Voting { more, .. } => run_to_block(System::block_number() + more + claim_period), - Period::Claim { more, .. } => run_to_block(System::block_number() + more), + Period::Voting { more, .. } => System::run_to_block::( + System::block_number() + more + claim_period, + ), + Period::Claim { more, .. } => + System::run_to_block::(System::block_number() + more), } } diff --git a/substrate/frame/society/src/tests.rs b/substrate/frame/society/src/tests.rs index 2a13f99855b5..22832f18b6fe 100644 --- a/substrate/frame/society/src/tests.rs +++ b/substrate/frame/society/src/tests.rs @@ -272,7 +272,7 @@ fn bidding_works() { // 40, now a member, can vote for 50 assert_ok!(Society::vote(Origin::signed(40), 50, true)); conclude_intake(true, None); - run_to_block(12); + System::run_to_block::(12); // 50 is now a member assert_eq!(members(), vec![10, 30, 40, 50]); // Pot is increased by 1000, and 500 is paid out. Total payout so far is 1200. @@ -282,7 +282,7 @@ fn bidding_works() { assert_eq!(candidacies(), vec![]); assert_ok!(Society::defender_vote(Origin::signed(10), true)); // Keep defender around // Next period - run_to_block(16); + System::run_to_block::(16); // Same members assert_eq!(members(), vec![10, 30, 40, 50]); // Pot is increased by 1000 again @@ -294,7 +294,7 @@ fn bidding_works() { // Candidate 60 is voted in. assert_ok!(Society::vote(Origin::signed(50), 60, true)); conclude_intake(true, None); - run_to_block(20); + System::run_to_block::(20); // 60 joins as a member assert_eq!(members(), vec![10, 30, 40, 50, 60]); // Pay them @@ -368,7 +368,7 @@ fn rejecting_skeptic_on_approved_is_punished() { } conclude_intake(true, None); assert_eq!(Members::::get(10).unwrap().strikes, 0); - run_to_block(12); + System::run_to_block::(12); assert_eq!(members(), vec![10, 20, 30, 40]); assert_eq!(Members::::get(skeptic).unwrap().strikes, 1); }); @@ -418,7 +418,7 @@ fn slash_payout_works() { Payouts::::get(20), PayoutRecord { paid: 0, payouts: vec![(8, 500)].try_into().unwrap() } ); - run_to_block(8); + System::run_to_block::(8); // payout should be here, but 500 less assert_ok!(Society::payout(RuntimeOrigin::signed(20))); assert_eq!(Balances::free_balance(20), 550); @@ -1315,7 +1315,7 @@ fn drop_candidate_works() { assert_ok!(Society::vote(Origin::signed(10), 40, false)); assert_ok!(Society::vote(Origin::signed(20), 40, false)); assert_ok!(Society::vote(Origin::signed(30), 40, false)); - run_to_block(12); + System::run_to_block::(12); assert_ok!(Society::drop_candidate(Origin::signed(50), 40)); // 40 candidacy has gone. assert_eq!(candidates(), vec![]); diff --git a/substrate/frame/src/lib.rs b/substrate/frame/src/lib.rs index f79a52bc6c5b..e3e58fc01b5f 100644 --- a/substrate/frame/src/lib.rs +++ b/substrate/frame/src/lib.rs @@ -325,7 +325,7 @@ pub mod testing_prelude { assert_storage_noop, hypothetically, storage_alias, }; - pub use frame_system::{self, mocking::*}; + pub use frame_system::{self, mocking::*, RunToBlockHooks}; #[deprecated(note = "Use `frame::testing_prelude::TestState` instead.")] pub use sp_io::TestExternalities; diff --git a/substrate/frame/staking/src/mock.rs b/substrate/frame/staking/src/mock.rs index df8cb38e8b37..769b84826b41 100644 --- a/substrate/frame/staking/src/mock.rs +++ b/substrate/frame/staking/src/mock.rs @@ -25,7 +25,7 @@ use frame_election_provider_support::{ use frame_support::{ assert_ok, derive_impl, ord_parameter_types, parameter_types, traits::{ - ConstU64, Currency, EitherOfDiverse, FindAuthor, Get, Hooks, Imbalance, LockableCurrency, + ConstU64, Currency, EitherOfDiverse, FindAuthor, Get, Imbalance, LockableCurrency, OnUnbalanced, OneSessionHandler, WithdrawReasons, }, weights::constants::RocksDbWeight, @@ -155,7 +155,7 @@ impl pallet_session::historical::Config for Test { } impl pallet_authorship::Config for Test { type FindAuthor = Author11; - type EventHandler = Pallet; + type EventHandler = (); } impl pallet_timestamp::Config for Test { @@ -544,13 +544,10 @@ impl ExtBuilder { let mut ext = sp_io::TestExternalities::from(storage); if self.initialize_first_session { - // We consider all test to start after timestamp is initialized This must be ensured by - // having `timestamp::on_initialize` called before `staking::on_initialize`. Also, if - // session length is 1, then it is already triggered. ext.execute_with(|| { - System::set_block_number(1); - Session::on_initialize(1); - >::on_initialize(1); + run_to_block(1); + + // Force reset the timestamp to the initial timestamp for easy testing. Timestamp::set_timestamp(INIT_TIMESTAMP); }); } @@ -618,33 +615,31 @@ pub(crate) fn bond_virtual_nominator( /// a block import/propose process where we first initialize the block, then execute some stuff (not /// in the function), and then finalize the block. pub(crate) fn run_to_block(n: BlockNumber) { - Staking::on_finalize(System::block_number()); - for b in (System::block_number() + 1)..=n { - System::set_block_number(b); - Session::on_initialize(b); - >::on_initialize(b); - Timestamp::set_timestamp(System::block_number() * BLOCK_TIME + INIT_TIMESTAMP); - if b != n { - Staking::on_finalize(System::block_number()); - } - } + System::run_to_block_with::( + n, + frame_system::RunToBlockHooks::default().after_initialize(|bn| { + Timestamp::set_timestamp(bn * BLOCK_TIME + INIT_TIMESTAMP); + }), + ); } /// Progresses from the current block number (whatever that may be) to the `P * session_index + 1`. -pub(crate) fn start_session(session_index: SessionIndex) { +pub(crate) fn start_session(end_session_idx: SessionIndex) { + let period = Period::get(); let end: u64 = if Offset::get().is_zero() { - (session_index as u64) * Period::get() + (end_session_idx as u64) * period } else { - Offset::get() + (session_index.saturating_sub(1) as u64) * Period::get() + Offset::get() + (end_session_idx.saturating_sub(1) as u64) * period }; + run_to_block(end); + + let curr_session_idx = Session::current_index(); + // session must have progressed properly. assert_eq!( - Session::current_index(), - session_index, - "current session index = {}, expected = {}", - Session::current_index(), - session_index, + curr_session_idx, end_session_idx, + "current session index = {curr_session_idx}, expected = {end_session_idx}", ); } diff --git a/substrate/frame/state-trie-migration/src/lib.rs b/substrate/frame/state-trie-migration/src/lib.rs index 61323b70b33d..1dc1a3928f2b 100644 --- a/substrate/frame/state-trie-migration/src/lib.rs +++ b/substrate/frame/state-trie-migration/src/lib.rs @@ -1309,16 +1309,17 @@ mod mock { pub(crate) fn run_to_block(n: u32) -> (H256, Weight) { let mut root = Default::default(); let mut weight_sum = Weight::zero(); + log::trace!(target: LOG_TARGET, "running from {:?} to {:?}", System::block_number(), n); - while System::block_number() < n { - System::set_block_number(System::block_number() + 1); - System::on_initialize(System::block_number()); - weight_sum += StateTrieMigration::on_initialize(System::block_number()); + System::run_to_block_with::( + n, + frame_system::RunToBlockHooks::default().after_initialize(|bn| { + weight_sum += StateTrieMigration::on_initialize(bn); + root = *System::finalize().state_root(); + }), + ); - root = *System::finalize().state_root(); - System::on_finalize(System::block_number()); - } (root, weight_sum) } } diff --git a/substrate/frame/support/src/dispatch.rs b/substrate/frame/support/src/dispatch.rs index 990996830030..14bc2667def1 100644 --- a/substrate/frame/support/src/dispatch.rs +++ b/substrate/frame/support/src/dispatch.rs @@ -315,10 +315,8 @@ impl PostDispatchInfo { "Post dispatch weight is greater than pre dispatch weight. \ Pre dispatch weight may underestimating the actual weight. \ Greater post dispatch weight components are ignored. - Pre dispatch weight: {:?}, - Post dispatch weight: {:?}", - actual_weight, - info_total_weight, + Pre dispatch weight: {info_total_weight:?}, + Post dispatch weight: {actual_weight:?}", ); } actual_weight.min(info.total_weight()) diff --git a/substrate/frame/system/src/lib.rs b/substrate/frame/system/src/lib.rs index 894e1898ed15..f2bb5e290c94 100644 --- a/substrate/frame/system/src/lib.rs +++ b/substrate/frame/system/src/lib.rs @@ -1974,6 +1974,51 @@ impl Pallet { .collect::<_>() } + /// Simulate the execution of a block sequence up to a specified height, injecting the + /// provided hooks at each block. + /// + /// `on_finalize` is always called before `on_initialize` with the current block number. + /// `on_initalize` is always called with the next block number. + /// + /// These hooks allows custom logic to be executed at each block at specific location. + /// For example, you might use one of them to set a timestamp for each block. + #[cfg(any(feature = "std", feature = "runtime-benchmarks", test))] + pub fn run_to_block_with( + n: BlockNumberFor, + mut hooks: RunToBlockHooks, + ) where + AllPalletsWithSystem: frame_support::traits::OnInitialize> + + frame_support::traits::OnFinalize>, + { + let mut bn = Self::block_number(); + + while bn < n { + // Skip block 0. + if !bn.is_zero() { + (hooks.before_finalize)(bn); + AllPalletsWithSystem::on_finalize(bn); + (hooks.after_finalize)(bn); + } + + bn += One::one(); + + Self::set_block_number(bn); + (hooks.before_initialize)(bn); + AllPalletsWithSystem::on_initialize(bn); + (hooks.after_initialize)(bn); + } + } + + /// Simulate the execution of a block sequence up to a specified height. + #[cfg(any(feature = "std", feature = "runtime-benchmarks", test))] + pub fn run_to_block(n: BlockNumberFor) + where + AllPalletsWithSystem: frame_support::traits::OnInitialize> + + frame_support::traits::OnFinalize>, + { + Self::run_to_block_with::(n, Default::default()); + } + /// Set the block number to something in particular. Can be used as an alternative to /// `initialize` for tests that don't need to bother with the other environment entries. #[cfg(any(feature = "std", feature = "runtime-benchmarks", test))] @@ -2347,6 +2392,72 @@ impl Lookup for ChainContext { } } +/// Hooks for the [`Pallet::run_to_block_with`] function. +#[cfg(any(feature = "std", feature = "runtime-benchmarks", test))] +pub struct RunToBlockHooks<'a, T> +where + T: 'a + Config, +{ + before_initialize: Box)>, + after_initialize: Box)>, + before_finalize: Box)>, + after_finalize: Box)>, +} + +#[cfg(any(feature = "std", feature = "runtime-benchmarks", test))] +impl<'a, T> RunToBlockHooks<'a, T> +where + T: 'a + Config, +{ + /// Set the hook function logic before the initialization of the block. + pub fn before_initialize(mut self, f: F) -> Self + where + F: 'a + FnMut(BlockNumberFor), + { + self.before_initialize = Box::new(f); + self + } + /// Set the hook function logic after the initialization of the block. + pub fn after_initialize(mut self, f: F) -> Self + where + F: 'a + FnMut(BlockNumberFor), + { + self.after_initialize = Box::new(f); + self + } + /// Set the hook function logic before the finalization of the block. + pub fn before_finalize(mut self, f: F) -> Self + where + F: 'a + FnMut(BlockNumberFor), + { + self.before_finalize = Box::new(f); + self + } + /// Set the hook function logic after the finalization of the block. + pub fn after_finalize(mut self, f: F) -> Self + where + F: 'a + FnMut(BlockNumberFor), + { + self.after_finalize = Box::new(f); + self + } +} + +#[cfg(any(feature = "std", feature = "runtime-benchmarks", test))] +impl<'a, T> Default for RunToBlockHooks<'a, T> +where + T: Config, +{ + fn default() -> Self { + Self { + before_initialize: Box::new(|_| {}), + after_initialize: Box::new(|_| {}), + before_finalize: Box::new(|_| {}), + after_finalize: Box::new(|_| {}), + } + } +} + /// Prelude to be used alongside pallet macro, for ease of use. pub mod pallet_prelude { pub use crate::{ensure_none, ensure_root, ensure_signed, ensure_signed_or_root}; diff --git a/substrate/frame/transaction-storage/src/mock.rs b/substrate/frame/transaction-storage/src/mock.rs index 73174b73dbac..84a77043d577 100644 --- a/substrate/frame/transaction-storage/src/mock.rs +++ b/substrate/frame/transaction-storage/src/mock.rs @@ -21,10 +21,7 @@ use crate::{ self as pallet_transaction_storage, TransactionStorageProof, DEFAULT_MAX_BLOCK_TRANSACTIONS, DEFAULT_MAX_TRANSACTION_SIZE, }; -use frame_support::{ - derive_impl, - traits::{ConstU32, OnFinalize, OnInitialize}, -}; +use frame_support::{derive_impl, traits::ConstU32}; use sp_runtime::{traits::IdentityLookup, BuildStorage}; pub type Block = frame_system::mocking::MockBlock; @@ -80,15 +77,13 @@ pub fn new_test_ext() -> sp_io::TestExternalities { t.into() } -pub fn run_to_block(n: u64, f: impl Fn() -> Option) { - while System::block_number() < n { - if let Some(proof) = f() { - TransactionStorage::check_proof(RuntimeOrigin::none(), proof).unwrap(); - } - TransactionStorage::on_finalize(System::block_number()); - System::on_finalize(System::block_number()); - System::set_block_number(System::block_number() + 1); - System::on_initialize(System::block_number()); - TransactionStorage::on_initialize(System::block_number()); - } +pub fn run_to_block(n: u64, f: impl Fn() -> Option + 'static) { + System::run_to_block_with::( + n, + frame_system::RunToBlockHooks::default().before_finalize(|_| { + if let Some(proof) = f() { + TransactionStorage::check_proof(RuntimeOrigin::none(), proof).unwrap(); + } + }), + ); } diff --git a/substrate/primitives/storage/src/lib.rs b/substrate/primitives/storage/src/lib.rs index 4b25f85fba68..df7570a18548 100644 --- a/substrate/primitives/storage/src/lib.rs +++ b/substrate/primitives/storage/src/lib.rs @@ -191,11 +191,15 @@ pub mod well_known_keys { /// Wasm code of the runtime. /// /// Stored as a raw byte vector. Required by substrate. + /// + /// Encodes to `0x3A636F6465`. pub const CODE: &[u8] = b":code"; /// Number of wasm linear memory pages required for execution of the runtime. /// /// The type of this value is encoded `u64`. + /// + /// Encodes to `0x307833413633364636343635` pub const HEAP_PAGES: &[u8] = b":heappages"; /// Current extrinsic index (u32) is stored under this key. diff --git a/substrate/test-utils/runtime/transaction-pool/src/lib.rs b/substrate/test-utils/runtime/transaction-pool/src/lib.rs index 93e5855eefc6..f88694fb1071 100644 --- a/substrate/test-utils/runtime/transaction-pool/src/lib.rs +++ b/substrate/test-utils/runtime/transaction-pool/src/lib.rs @@ -352,9 +352,18 @@ impl ChainApi for TestApi { fn validate_transaction( &self, at: ::Hash, - _source: TransactionSource, + source: TransactionSource, uxt: Arc<::Extrinsic>, ) -> Self::ValidationFuture { + ready(self.validate_transaction_blocking(at, source, uxt)) + } + + fn validate_transaction_blocking( + &self, + at: ::Hash, + _source: TransactionSource, + uxt: Arc<::Extrinsic>, + ) -> Result { let uxt = (*uxt).clone(); self.validation_requests.write().push(uxt.clone()); let block_number; @@ -374,16 +383,12 @@ impl ChainApi for TestApi { // the transaction. (This is not required for this test function, but in real // environment it would fail because of this). if !found_best { - return ready(Ok(Err(TransactionValidityError::Invalid( - InvalidTransaction::Custom(1), - )))) + return Ok(Err(TransactionValidityError::Invalid(InvalidTransaction::Custom(1)))) } }, Ok(None) => - return ready(Ok(Err(TransactionValidityError::Invalid( - InvalidTransaction::Custom(2), - )))), - Err(e) => return ready(Err(e)), + return Ok(Err(TransactionValidityError::Invalid(InvalidTransaction::Custom(2)))), + Err(e) => return Err(e), } let (requires, provides) = if let Ok(transfer) = TransferData::try_from(&uxt) { @@ -423,7 +428,7 @@ impl ChainApi for TestApi { if self.enable_stale_check && transfer.nonce < chain_nonce { log::info!("test_api::validate_transaction: invalid_transaction(stale)...."); - return ready(Ok(Err(TransactionValidityError::Invalid(InvalidTransaction::Stale)))) + return Ok(Err(TransactionValidityError::Invalid(InvalidTransaction::Stale))) } (requires, provides) @@ -433,7 +438,7 @@ impl ChainApi for TestApi { if self.chain.read().invalid_hashes.contains(&self.hash_and_length(&uxt).0) { log::info!("test_api::validate_transaction: invalid_transaction...."); - return ready(Ok(Err(TransactionValidityError::Invalid(InvalidTransaction::Custom(0))))) + return Ok(Err(TransactionValidityError::Invalid(InvalidTransaction::Custom(0)))) } let priority = self.chain.read().priorities.get(&self.hash_and_length(&uxt).0).cloned(); @@ -447,16 +452,7 @@ impl ChainApi for TestApi { (self.valid_modifier.read())(&mut validity); - ready(Ok(Ok(validity))) - } - - fn validate_transaction_blocking( - &self, - _at: ::Hash, - _source: TransactionSource, - _uxt: Arc<::Extrinsic>, - ) -> Result { - unimplemented!(); + Ok(Ok(validity)) } fn block_id_to_number(