diff --git a/.codespellrc b/.codespellrc new file mode 100644 index 000000000..771985af1 --- /dev/null +++ b/.codespellrc @@ -0,0 +1,3 @@ +[codespell] +skip = .git,target,./crates/storage/libmdbx-rs/mdbx-sys/libmdbx,Cargo.toml,Cargo.lock +ignore-words-list = crate,ser,ratatui diff --git a/.github/ISSUE_TEMPLATE/bug.yml b/.github/ISSUE_TEMPLATE/bug.yml index bfb81f1b7..1142a5bf2 100644 --- a/.github/ISSUE_TEMPLATE/bug.yml +++ b/.github/ISSUE_TEMPLATE/bug.yml @@ -6,7 +6,7 @@ body: attributes: value: | Thanks for taking the time to fill out this bug report! Please provide as much detail as possible. - + If you believe you have found a vulnerability, please provide details [here](mailto:georgios@paradigm.xyz) instead. - type: textarea id: what-happened @@ -14,7 +14,7 @@ body: label: Describe the bug description: | A clear and concise description of what the bug is. - + If the bug is in a crate you are using (i.e. you are not running the standard `reth` binary) please mention that as well. validations: required: true @@ -25,7 +25,7 @@ body: description: Please provide any steps you think might be relevant to reproduce the bug. placeholder: | Steps to reproduce: - + 1. Start '...' 2. Then '...' 3. Check '...' @@ -76,6 +76,13 @@ body: description: This can be obtained with `reth db version` validations: required: true + - type: textarea + id: network + attributes: + label: Which chain / network are you on? + description: This is the argument you pass to `reth --chain`. If you are using `--dev`, type in 'dev' here. If you are not running with `--chain` or `--dev` then it is mainnet. + validations: + required: true - type: dropdown id: node-type attributes: diff --git a/.github/scripts/label_pr.js b/.github/scripts/label_pr.js index c01f4c98a..16ace2db0 100644 --- a/.github/scripts/label_pr.js +++ b/.github/scripts/label_pr.js @@ -8,6 +8,20 @@ function shouldIncludeLabel (label) { return !isStatus && !isTrackingIssue && !isPreventStale && !isDifficulty; } +// Get the issue number from an issue link in the forms ` ` or ` #`. +function getIssueLink (repoUrl, body) { + const urlPattern = new RegExp(`(close|closes|closed|fix|fixes|fixed|resolve|resolves|resolved) ${repoUrl}/issues/(?\\d+)`, 'i') + const issuePattern = new RegExp(`(close|closes|closed|fix|fixes|fixed|resolve|resolves|resolved) \#(?\\d+)`, 'i') + + const urlRe = body.match(urlPattern); + const issueRe = body.match(issuePattern); + if (urlRe?.groups?.issue_number) { + return urlRe.groups.issue_number + } else { + return issueRe?.groups?.issue_number + } +} + module.exports = async ({ github, context }) => { try { const prNumber = context.payload.pull_request.number; @@ -15,11 +29,7 @@ module.exports = async ({ github, context }) => { const repo = context.repo; const repoUrl = context.payload.repository.html_url; - const pattern = new RegExp(`(close|closes|closed|fix|fixes|fixed|resolve|resolves|resolved) ${repoUrl}/issues/(?\\d+)`, 'i') - - const re = prBody.match(pattern); - const issueNumber = re?.groups?.issue_number; - + const issueNumber = getIssueLink(repoUrl, prBody); if (!issueNumber) { console.log('No issue reference found in PR description.'); return; diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 1fb89f7c1..20ae6644b 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -11,8 +11,10 @@ on: env: REPO_NAME: ${{ github.repository_owner }}/reth IMAGE_NAME: ${{ github.repository_owner }}/reth + OP_IMAGE_NAME: ${{ github.repository_owner }}/op-reth CARGO_TERM_COLOR: always DOCKER_IMAGE_NAME: ghcr.io/${{ github.repository_owner }}/reth + OP_DOCKER_IMAGE_NAME: ghcr.io/${{ github.repository_owner }}/op-reth DOCKER_USERNAME: ${{ github.actor }} jobs: @@ -36,9 +38,15 @@ jobs: run: | docker run --privileged --rm tonistiigi/binfmt --install arm64,amd64 docker buildx create --use --name cross-builder - - name: Build and push image, tag as "latest" + - name: Build and push reth image, tag as "latest" if: ${{ contains(github.event.ref, 'beta') }} run: make PROFILE=maxperf docker-build-push-latest - - name: Build and push image + - name: Build and push reth image if: ${{ ! contains(github.event.ref, 'beta') }} run: make PROFILE=maxperf docker-build-push + - name: Build and push op-reth image, tag as "latest" + if: ${{ contains(github.event.ref, 'beta') }} + run: make IMAGE_NAME=$OP_IMAGE_NAME DOCKER_IMAGE_NAME=$OP_DOCKER_IMAGE_NAME PROFILE=maxperf op-docker-build-push-latest + - name: Build and push op-reth image + if: ${{ ! contains(github.event.ref, 'beta') }} + run: make IMAGE_NAME=$OP_IMAGE_NAME DOCKER_IMAGE_NAME=$OP_DOCKER_IMAGE_NAME PROFILE=maxperf op-docker-build-push diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 1f1f7a13c..4f3632875 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -27,8 +27,7 @@ jobs: - uses: Swatinem/rust-cache@v2 with: cache-on-failure: true - - run: - cargo clippy --bin "${{ matrix.binary }}" --workspace --features "${{ matrix.network }} asm-keccak jemalloc jemalloc-prof min-error-logs min-warn-logs min-info-logs min-debug-logs min-trace-logs" + - run: cargo clippy --bin "${{ matrix.binary }}" --workspace --features "${{ matrix.network }} asm-keccak jemalloc jemalloc-prof min-error-logs min-warn-logs min-info-logs min-debug-logs min-trace-logs" env: RUSTFLAGS: -D warnings @@ -95,9 +94,7 @@ jobs: env: # Keep in sync with ./book.yml:jobs.build # This should only add `-D warnings` - RUSTDOCFLAGS: - --cfg docsrs --show-type-layout --generate-link-to-definition --enable-index-page - -Zunstable-options -D warnings + RUSTDOCFLAGS: --cfg docsrs --show-type-layout --generate-link-to-definition --enable-index-page -Zunstable-options -D warnings fmt: name: fmt @@ -110,6 +107,12 @@ jobs: components: rustfmt - run: cargo fmt --all --check + codespell: + runs-on: ubuntu-latest + timeout-minutes: 30 + steps: + - uses: codespell-project/actions-codespell@v2 + grafana: runs-on: ubuntu-latest timeout-minutes: 30 @@ -124,7 +127,7 @@ jobs: name: lint success runs-on: ubuntu-latest if: always() - needs: [clippy-binaries, clippy, crate-checks, docs, fmt, grafana] + needs: [clippy-binaries, clippy, crate-checks, docs, fmt, codespell, grafana] timeout-minutes: 30 steps: - name: Decide whether the needed jobs succeeded or failed diff --git a/.github/workflows/release-dist.yml b/.github/workflows/release-dist.yml index 5989a532c..2142360e0 100644 --- a/.github/workflows/release-dist.yml +++ b/.github/workflows/release-dist.yml @@ -14,7 +14,7 @@ jobs: - name: Update Homebrew formula uses: dawidd6/action-homebrew-bump-formula@v3 with: - token: ${{ secrets.GITHUB_TOKEN }} + token: ${{ secrets.HOMEBREW }} no_fork: true tap: paradigmxyz/brew formula: reth diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 55ce0843f..91f65d2bc 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -10,6 +10,7 @@ on: env: REPO_NAME: ${{ github.repository_owner }}/reth + OP_IMAGE_NAME: ${{ github.repository_owner }}/op-reth IMAGE_NAME: ${{ github.repository_owner }}/reth CARGO_TERM_COLOR: always @@ -26,11 +27,11 @@ jobs: build: name: build release - runs-on: ${{ matrix.os }} + runs-on: ${{ matrix.configs.os }} needs: extract-version strategy: matrix: - include: + configs: - target: x86_64-unknown-linux-gnu os: ubuntu-20.04 profile: maxperf @@ -46,29 +47,34 @@ jobs: - target: x86_64-pc-windows-gnu os: ubuntu-20.04 profile: maxperf + build: + - command: build + binary: reth + - command: op-build + binary: op-reth steps: - uses: actions/checkout@v4 - uses: dtolnay/rust-toolchain@stable with: - target: ${{ matrix.target }} + target: ${{ matrix.configs.target }} - uses: taiki-e/install-action@cross - uses: Swatinem/rust-cache@v2 with: cache-on-failure: true - name: Apple M1 setup - if: matrix.target == 'aarch64-apple-darwin' + if: matrix.configs.target == 'aarch64-apple-darwin' run: | echo "SDKROOT=$(xcrun -sdk macosx --show-sdk-path)" >> $GITHUB_ENV - echo "MACOSX_DEPLOYMENT_TARGET=$(xcrun -sdk macosx --show-sdk-os-version)" >> $GITHUB_ENV + echo "MACOSX_DEPLOYMENT_TARGET=$(xcrun -sdk macosx --show-sdk-platform-version)" >> $GITHUB_ENV - name: Build Reth - run: make PROFILE=${{ matrix.profile }} build-${{ matrix.target }} + run: make PROFILE=${{ matrix.configs.profile }} ${{ matrix.build.command }}-${{ matrix.configs.target }} - name: Move binary run: | mkdir artifacts - [[ "${{ matrix.target }}" == *windows* ]] && ext=".exe" - mv "target/${{ matrix.target }}/${{ matrix.profile }}/reth${ext}" ./artifacts + [[ "${{ matrix.configs.target }}" == *windows* ]] && ext=".exe" + mv "target/${{ matrix.configs.target }}/${{ matrix.configs.profile }}/${{ matrix.build.binary }}${ext}" ./artifacts - name: Configure GPG and create artifacts env: @@ -78,22 +84,22 @@ jobs: export GPG_TTY=$(tty) echo -n "$GPG_SIGNING_KEY" | base64 --decode | gpg --batch --import cd artifacts - tar -czf reth-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.target }}.tar.gz reth* - echo "$GPG_PASSPHRASE" | gpg --passphrase-fd 0 --pinentry-mode loopback --batch -ab reth-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.target }}.tar.gz + tar -czf ${{ matrix.build.binary }}-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.configs.target }}.tar.gz ${{ matrix.build.binary }}* + echo "$GPG_PASSPHRASE" | gpg --passphrase-fd 0 --pinentry-mode loopback --batch -ab ${{ matrix.build.binary }}-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.configs.target }}.tar.gz mv *tar.gz* .. shell: bash - name: Upload artifact uses: actions/upload-artifact@v4 with: - name: reth-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.target }}.tar.gz - path: reth-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.target }}.tar.gz + name: ${{ matrix.build.binary }}-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.configs.target }}.tar.gz + path: ${{ matrix.build.binary }}-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.configs.target }}.tar.gz - name: Upload signature uses: actions/upload-artifact@v4 with: - name: reth-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.target }}.tar.gz.asc - path: reth-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.target }}.tar.gz.asc + name: ${{ matrix.build.binary }}-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.configs.target }}.tar.gz.asc + path: ${{ matrix.build.binary }}-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.configs.target }}.tar.gz.asc draft-release: name: draft release @@ -184,7 +190,7 @@ jobs: ENDBODY ) assets=() - for asset in ./reth-*.tar.gz*; do + for asset in ./*reth-*.tar.gz*; do assets+=("$asset/$asset") done tag_name="${{ env.VERSION }}" diff --git a/.github/workflows/unit.yml b/.github/workflows/unit.yml index 91a247fac..05ff09609 100644 --- a/.github/workflows/unit.yml +++ b/.github/workflows/unit.yml @@ -57,6 +57,7 @@ jobs: uses: actions/checkout@v4 with: repository: ethereum/tests + ref: 1c23e3c27ac53b794de0844d2d5e19cd2495b9d8 path: testing/ef-tests/ethereum-tests submodules: recursive fetch-depth: 1 diff --git a/CODEOWNERS b/CODEOWNERS index 8efa8da85..bd86e2e58 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -1,24 +1,27 @@ * @gakonst bin/ @onbjerg crates/blockchain-tree @rakita @rkrasiuk -crates/consensus/auto-seal @mattsse -crates/consensus/beacon @rkrasiuk @mattsse @Rjected +crates/cli/ @onbjerg @mattsse +crates/consensus @rkrasiuk @mattsse @Rjected crates/exex @onbjerg @shekhirin crates/metrics @onbjerg crates/net/ @emhane @mattsse @Rjected crates/net/downloaders/ @onbjerg @rkrasiuk +crates/node/ @mattsse @Rjected @onbjerg +crates/node-core/ @mattsse @Rjected @onbjerg +crates/node-ethereum/ @mattsse @Rjected crates/payload/ @mattsse @Rjected crates/prune @shekhirin @joshieDo -crates/revm/src/ @rakita -crates/revm/ @mattsse +crates/revm/ @mattsse @rakita crates/rpc/ @mattsse @Rjected -crates/rpc/rpc-types @mattsse @Rjected @Evalir -crates/rpc/rpc-types-compat @mattsse @Rjected @Evalir crates/stages/ @onbjerg @rkrasiuk @shekhirin +crates/stages-api/ @onbjerg @rkrasiuk @shekhirin crates/static-file @joshieDo @shekhirin crates/storage/ @rakita @joshieDo @shekhirin crates/tasks @mattsse crates/tracing @onbjerg crates/transaction-pool/ @mattsse crates/trie @rkrasiuk +crates/trie-parallel @rkrasiuk +crates/optimism @mattsse .github/ @onbjerg @gakonst @DaniPopes diff --git a/Cargo.lock b/Cargo.lock index dfb64fdd7..6d9252332 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2,19 +2,6 @@ # It is not intended for manual editing. version = 3 -[[package]] -name = "additional-rpc-namespace-in-cli" -version = "0.0.0" -dependencies = [ - "clap", - "eyre", - "jsonrpsee", - "reth", - "reth-node-ethereum", - "reth-transaction-pool", - "tokio", -] - [[package]] name = "addr2line" version = "0.21.0" @@ -131,37 +118,49 @@ checksum = "5c6cb57a04249c6480766f7f7cef5467412af1490f8d1e243141daddada3264f" [[package]] name = "alloy-chains" -version = "0.1.16" +version = "0.1.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40646aa7f01e396139cf0d6c3a7475eeb8094a0f41d8199f10860c8aef09d2f1" +checksum = "fe6c2674230e94ea98767550b02853bf7024b46f784827be95acfc5f5f1a445f" dependencies = [ "alloy-rlp", "arbitrary", "num_enum", "proptest", "serde", - "strum 0.26.2", + "strum", ] [[package]] name = "alloy-consensus" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=39b8695#39b869585955d95e9c64c3e1b66f16432ae4f132" +source = "git+https://github.com/alloy-rs/alloy?rev=77c1240#77c1240533b411ed0eb5533f94396eba8d7f6ab6" dependencies = [ - "alloy-eips", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", "alloy-primitives", "alloy-rlp", - "alloy-serde", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", + "c-kzg", + "serde", +] + +[[package]] +name = "alloy-consensus" +version = "0.1.0" +source = "git+https://github.com/alloy-rs/alloy#3ccadcf62d571f402ba9149a3b0d684333e4b014" +dependencies = [ + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy)", + "alloy-primitives", + "alloy-rlp", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy)", "c-kzg", "serde", - "sha2 0.10.8", ] [[package]] name = "alloy-dyn-abi" -version = "0.7.0" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "872f239c15befa27cc4f0d3d82a70b3365c2d0202562bf906eb93b299fa31882" +checksum = "545885d9b0b2c30fd344ae291439b4bfe59e48dd62fbc862f8503d98088967dc" dependencies = [ "alloy-json-abi", "alloy-primitives", @@ -172,42 +171,70 @@ dependencies = [ "itoa", "serde", "serde_json", - "winnow 0.6.6", + "winnow 0.6.7", ] [[package]] name = "alloy-eips" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=39b8695#39b869585955d95e9c64c3e1b66f16432ae4f132" +source = "git+https://github.com/alloy-rs/alloy?rev=77c1240#77c1240533b411ed0eb5533f94396eba8d7f6ab6" dependencies = [ "alloy-primitives", "alloy-rlp", - "alloy-serde", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", "arbitrary", "c-kzg", + "derive_more", "ethereum_ssz", "ethereum_ssz_derive", "once_cell", "proptest", "proptest-derive", "serde", + "sha2 0.10.8", +] + +[[package]] +name = "alloy-eips" +version = "0.1.0" +source = "git+https://github.com/alloy-rs/alloy#3ccadcf62d571f402ba9149a3b0d684333e4b014" +dependencies = [ + "alloy-primitives", + "alloy-rlp", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy)", + "c-kzg", + "once_cell", + "serde", + "sha2 0.10.8", ] [[package]] name = "alloy-genesis" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=39b8695#39b869585955d95e9c64c3e1b66f16432ae4f132" +source = "git+https://github.com/alloy-rs/alloy?rev=77c1240#77c1240533b411ed0eb5533f94396eba8d7f6ab6" dependencies = [ "alloy-primitives", - "alloy-serde", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", "serde", + "serde_json", +] + +[[package]] +name = "alloy-genesis" +version = "0.1.0" +source = "git+https://github.com/alloy-rs/alloy#3ccadcf62d571f402ba9149a3b0d684333e4b014" +dependencies = [ + "alloy-primitives", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy)", + "serde", + "serde_json", ] [[package]] name = "alloy-json-abi" -version = "0.7.0" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83a35ddfd27576474322a5869e4c123e5f3e7b2177297c18e4e82ea501cb125b" +checksum = "786689872ec4e7d354810ab0dffd48bb40b838c047522eb031cbd47d15634849" dependencies = [ "alloy-primitives", "alloy-sol-type-parser", @@ -218,7 +245,7 @@ dependencies = [ [[package]] name = "alloy-json-rpc" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=39b8695#39b869585955d95e9c64c3e1b66f16432ae4f132" +source = "git+https://github.com/alloy-rs/alloy?rev=77c1240#77c1240533b411ed0eb5533f94396eba8d7f6ab6" dependencies = [ "alloy-primitives", "serde", @@ -230,14 +257,15 @@ dependencies = [ [[package]] name = "alloy-network" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=39b8695#39b869585955d95e9c64c3e1b66f16432ae4f132" +source = "git+https://github.com/alloy-rs/alloy?rev=77c1240#77c1240533b411ed0eb5533f94396eba8d7f6ab6" dependencies = [ - "alloy-consensus", - "alloy-eips", + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", "alloy-json-rpc", "alloy-primitives", - "alloy-rpc-types", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", "alloy-signer", + "alloy-sol-types", "async-trait", "futures-utils-wasm", "thiserror", @@ -246,9 +274,9 @@ dependencies = [ [[package]] name = "alloy-node-bindings" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=39b8695#39b869585955d95e9c64c3e1b66f16432ae4f132" +source = "git+https://github.com/alloy-rs/alloy?rev=77c1240#77c1240533b411ed0eb5533f94396eba8d7f6ab6" dependencies = [ - "alloy-genesis", + "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", "alloy-primitives", "k256", "serde_json", @@ -260,9 +288,9 @@ dependencies = [ [[package]] name = "alloy-primitives" -version = "0.7.0" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99bbad0a6b588ef4aec1b5ddbbfdacd9ef04e00b979617765b03174318ee1f3a" +checksum = "525448f6afc1b70dd0f9d0a8145631bf2f5e434678ab23ab18409ca264cae6b3" dependencies = [ "alloy-rlp", "arbitrary", @@ -288,14 +316,14 @@ dependencies = [ [[package]] name = "alloy-provider" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=39b8695#39b869585955d95e9c64c3e1b66f16432ae4f132" +source = "git+https://github.com/alloy-rs/alloy?rev=77c1240#77c1240533b411ed0eb5533f94396eba8d7f6ab6" dependencies = [ - "alloy-eips", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", "alloy-json-rpc", "alloy-network", "alloy-primitives", "alloy-rpc-client", - "alloy-rpc-types", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", "alloy-rpc-types-trace", "alloy-transport", "alloy-transport-http", @@ -338,7 +366,7 @@ dependencies = [ [[package]] name = "alloy-rpc-client" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=39b8695#39b869585955d95e9c64c3e1b66f16432ae4f132" +source = "git+https://github.com/alloy-rs/alloy?rev=77c1240#77c1240533b411ed0eb5533f94396eba8d7f6ab6" dependencies = [ "alloy-json-rpc", "alloy-transport", @@ -358,14 +386,14 @@ dependencies = [ [[package]] name = "alloy-rpc-types" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=39b8695#39b869585955d95e9c64c3e1b66f16432ae4f132" +source = "git+https://github.com/alloy-rs/alloy?rev=77c1240#77c1240533b411ed0eb5533f94396eba8d7f6ab6" dependencies = [ - "alloy-consensus", - "alloy-eips", - "alloy-genesis", + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", + "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", "alloy-primitives", "alloy-rlp", - "alloy-serde", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", "alloy-sol-types", "arbitrary", "itertools 0.12.1", @@ -377,30 +405,62 @@ dependencies = [ "thiserror", ] +[[package]] +name = "alloy-rpc-types" +version = "0.1.0" +source = "git+https://github.com/alloy-rs/alloy#3ccadcf62d571f402ba9149a3b0d684333e4b014" +dependencies = [ + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy)", + "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy)", + "alloy-primitives", + "alloy-rlp", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy)", + "alloy-sol-types", + "itertools 0.12.1", + "serde", + "serde_json", + "thiserror", +] + [[package]] name = "alloy-rpc-types-anvil" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=39b8695#39b869585955d95e9c64c3e1b66f16432ae4f132" +source = "git+https://github.com/alloy-rs/alloy?rev=77c1240#77c1240533b411ed0eb5533f94396eba8d7f6ab6" dependencies = [ "alloy-primitives", - "alloy-serde", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", "serde", ] +[[package]] +name = "alloy-rpc-types-beacon" +version = "0.1.0" +source = "git+https://github.com/alloy-rs/alloy?rev=77c1240#77c1240533b411ed0eb5533f94396eba8d7f6ab6" +dependencies = [ + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", + "alloy-primitives", + "alloy-rpc-types-engine", + "serde", + "serde_with", +] + [[package]] name = "alloy-rpc-types-engine" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=39b8695#39b869585955d95e9c64c3e1b66f16432ae4f132" +source = "git+https://github.com/alloy-rs/alloy?rev=77c1240#77c1240533b411ed0eb5533f94396eba8d7f6ab6" dependencies = [ - "alloy-consensus", - "alloy-eips", + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", "alloy-primitives", "alloy-rlp", - "alloy-rpc-types", - "alloy-serde", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", "ethereum_ssz", "ethereum_ssz_derive", "jsonrpsee-types", + "jsonwebtoken 9.3.0", + "rand 0.8.5", "serde", "thiserror", ] @@ -408,11 +468,21 @@ dependencies = [ [[package]] name = "alloy-rpc-types-trace" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=39b8695#39b869585955d95e9c64c3e1b66f16432ae4f132" +source = "git+https://github.com/alloy-rs/alloy?rev=77c1240#77c1240533b411ed0eb5533f94396eba8d7f6ab6" +dependencies = [ + "alloy-primitives", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", + "serde", + "serde_json", +] + +[[package]] +name = "alloy-serde" +version = "0.1.0" +source = "git+https://github.com/alloy-rs/alloy?rev=77c1240#77c1240533b411ed0eb5533f94396eba8d7f6ab6" dependencies = [ "alloy-primitives", - "alloy-rpc-types", - "alloy-serde", "serde", "serde_json", ] @@ -420,7 +490,7 @@ dependencies = [ [[package]] name = "alloy-serde" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=39b8695#39b869585955d95e9c64c3e1b66f16432ae4f132" +source = "git+https://github.com/alloy-rs/alloy#3ccadcf62d571f402ba9149a3b0d684333e4b014" dependencies = [ "alloy-primitives", "serde", @@ -430,7 +500,7 @@ dependencies = [ [[package]] name = "alloy-signer" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=39b8695#39b869585955d95e9c64c3e1b66f16432ae4f132" +source = "git+https://github.com/alloy-rs/alloy?rev=77c1240#77c1240533b411ed0eb5533f94396eba8d7f6ab6" dependencies = [ "alloy-primitives", "async-trait", @@ -443,9 +513,9 @@ dependencies = [ [[package]] name = "alloy-signer-wallet" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=39b8695#39b869585955d95e9c64c3e1b66f16432ae4f132" +source = "git+https://github.com/alloy-rs/alloy?rev=77c1240#77c1240533b411ed0eb5533f94396eba8d7f6ab6" dependencies = [ - "alloy-consensus", + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", "alloy-network", "alloy-primitives", "alloy-signer", @@ -459,9 +529,9 @@ dependencies = [ [[package]] name = "alloy-sol-macro" -version = "0.7.0" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "452d929748ac948a10481fff4123affead32c553cf362841c5103dd508bdfc16" +checksum = "89c80a2cb97e7aa48611cbb63950336f9824a174cdf670527cc6465078a26ea1" dependencies = [ "alloy-json-abi", "alloy-sol-macro-input", @@ -478,9 +548,9 @@ dependencies = [ [[package]] name = "alloy-sol-macro-input" -version = "0.7.0" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df64e094f6d2099339f9e82b5b38440b159757b6920878f28316243f8166c8d1" +checksum = "c58894b58ac50979eeac6249661991ac40b9d541830d9a725f7714cc9ef08c23" dependencies = [ "alloy-json-abi", "const-hex", @@ -495,18 +565,18 @@ dependencies = [ [[package]] name = "alloy-sol-type-parser" -version = "0.7.0" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "715f4d09a330cc181fc7c361b5c5c2766408fa59a0bac60349dcb7baabd404cc" +checksum = "7da8e71ea68e780cc203919e03f69f59e7afe92d2696fb1dcb6662f61e4031b6" dependencies = [ - "winnow 0.6.6", + "winnow 0.6.7", ] [[package]] name = "alloy-sol-types" -version = "0.7.0" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43bc2d6dfc2a19fd56644494479510f98b1ee929e04cf0d4aa45e98baa3e545b" +checksum = "399287f68d1081ed8b1f4903c49687658b95b142207d7cb4ae2f4813915343ef" dependencies = [ "alloy-json-abi", "alloy-primitives", @@ -518,7 +588,7 @@ dependencies = [ [[package]] name = "alloy-transport" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=39b8695#39b869585955d95e9c64c3e1b66f16432ae4f132" +source = "git+https://github.com/alloy-rs/alloy?rev=77c1240#77c1240533b411ed0eb5533f94396eba8d7f6ab6" dependencies = [ "alloy-json-rpc", "base64 0.22.0", @@ -536,13 +606,14 @@ dependencies = [ [[package]] name = "alloy-transport-http" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=39b8695#39b869585955d95e9c64c3e1b66f16432ae4f132" +source = "git+https://github.com/alloy-rs/alloy?rev=77c1240#77c1240533b411ed0eb5533f94396eba8d7f6ab6" dependencies = [ "alloy-json-rpc", "alloy-transport", "reqwest 0.12.4", "serde_json", "tower", + "tracing", "url", ] @@ -819,15 +890,28 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "81953c529336010edd6d8e358f886d9581267795c61b19475b71314bffa46d35" dependencies = [ "concurrent-queue", - "event-listener", + "event-listener 2.5.3", "futures-core", ] +[[package]] +name = "async-channel" +version = "2.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "136d4d23bcc79e27423727b36823d86233aad06dfea531837b038394d11e9928" +dependencies = [ + "concurrent-queue", + "event-listener 5.3.0", + "event-listener-strategy 0.5.2", + "futures-core", + "pin-project-lite", +] + [[package]] name = "async-compression" -version = "0.4.8" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07dbbf24db18d609b1462965249abdf49129ccad073ec257da372adc83259c60" +checksum = "4e9eabd7a98fe442131a17c316bd9349c43695e49e730c3c8e12cfb5f4da2693" dependencies = [ "brotli", "flate2", @@ -839,14 +923,25 @@ dependencies = [ "zstd-safe", ] +[[package]] +name = "async-lock" +version = "3.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d034b430882f8381900d3fe6f0aaa3ad94f2cb4ac519b429692a1bc2dda4ae7b" +dependencies = [ + "event-listener 4.0.3", + "event-listener-strategy 0.4.0", + "pin-project-lite", +] + [[package]] name = "async-sse" version = "5.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2e6fa871e4334a622afd6bb2f611635e8083a6f5e2936c0f90f37c7ef9856298" dependencies = [ - "async-channel", - "futures-lite", + "async-channel 1.9.0", + "futures-lite 1.13.0", "http-types", "log", "memchr", @@ -875,6 +970,12 @@ dependencies = [ "syn 2.0.60", ] +[[package]] +name = "async-task" +version = "4.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b75356056920673b02621b35afd0f7dda9306d03c79a30f5c56c44cf256e3de" + [[package]] name = "async-trait" version = "0.1.80" @@ -886,6 +987,12 @@ dependencies = [ "syn 2.0.60", ] +[[package]] +name = "atomic-waker" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" + [[package]] name = "attohttpc" version = "0.24.1" @@ -930,7 +1037,7 @@ version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d67782c3f868daa71d3533538e98a8e13713231969def7536e8039606fc46bf0" dependencies = [ - "fastrand 2.0.2", + "fastrand 2.1.0", "futures-core", "pin-project", "tokio", @@ -938,9 +1045,9 @@ dependencies = [ [[package]] name = "backtrace" -version = "0.3.69" +version = "0.3.71" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2089b7e3f35b9dd2d0ed921ead4f6d318c27680d4a5bd167b3ee120edb105837" +checksum = "26b05800d2e817c8b3b4b54abd461726265fa9789ae34330622f2db9ee696f9d" dependencies = [ "addr2line", "cc", @@ -991,6 +1098,7 @@ checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" name = "beacon-api-sse" version = "0.0.0" dependencies = [ + "alloy-rpc-types-beacon", "clap", "futures-util", "mev-share-sse", @@ -1133,6 +1241,20 @@ dependencies = [ "generic-array", ] +[[package]] +name = "blocking" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "495f7104e962b7356f0aeb34247aca1fe7d2e783b346582db7f2904cb5717e88" +dependencies = [ + "async-channel 2.2.1", + "async-lock", + "async-task", + "futures-io", + "futures-lite 2.3.0", + "piper", +] + [[package]] name = "blst" version = "0.3.11" @@ -1280,9 +1402,9 @@ dependencies = [ [[package]] name = "brotli" -version = "4.0.0" +version = "5.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "125740193d7fee5cc63ab9e16c2fdc4e07c74ba755cc53b327d6ea029e9fc569" +checksum = "19483b140a7ac7174d34b5a581b406c64f84da5409d3e09cf4fff604f9270e67" dependencies = [ "alloc-no-stdlib", "alloc-stdlib", @@ -1291,9 +1413,9 @@ dependencies = [ [[package]] name = "brotli-decompressor" -version = "3.0.0" +version = "4.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65622a320492e09b5e0ac436b14c54ff68199bac392d0e89a6832c4518eea525" +checksum = "e6221fe77a248b9117d431ad93761222e1cf8ff282d9d1d5d9f53d6299a1cf76" dependencies = [ "alloc-no-stdlib", "alloc-stdlib", @@ -1425,14 +1547,24 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" +[[package]] +name = "castaway" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a17ed5635fc8536268e5d4de1e22e81ac34419e5f052d4d51f4e01dcc263fcc" +dependencies = [ + "rustversion", +] + [[package]] name = "cc" -version = "1.0.83" +version = "1.0.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1174fb0b6ec23863f8b971027804a42614e347eafb0a95bf0b12cdae21fc4d0" +checksum = "d32a725bc159af97c3e629873bb9f88fb8cf8a4867175f76dc987815ea07c83b" dependencies = [ "jobserver", "libc", + "once_cell", ] [[package]] @@ -1562,14 +1694,6 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "98cc8fbded0c607b7ba9dd60cd98df59af97e84d24e49c8557331cfc26d301ce" -[[package]] -name = "cli-extension-event-hooks" -version = "0.0.0" -dependencies = [ - "reth", - "reth-node-ethereum", -] - [[package]] name = "coins-bip32" version = "0.8.7" @@ -1635,11 +1759,24 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b34115915337defe99b2aff5c2ce6771e5fbc4079f4b506301f5cf394c8452f7" dependencies = [ "crossterm", - "strum 0.26.2", - "strum_macros 0.26.2", + "strum", + "strum_macros", "unicode-width", ] +[[package]] +name = "compact_str" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f86b9c4c00838774a6d902ef931eff7470720c51d90c2e32cfe15dc304737b3f" +dependencies = [ + "castaway", + "cfg-if", + "itoa", + "ryu", + "static_assertions", +] + [[package]] name = "concat-kdf" version = "0.1.0" @@ -1651,9 +1788,9 @@ dependencies = [ [[package]] name = "concurrent-queue" -version = "2.4.0" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d16048cd947b08fa32c24458a22f5dc5e835264f689f4f5653210c69fd107363" +checksum = "4ca0197aee26d1ae37445ee532fefce43251d24cc7c166799f4d46817f1d3973" dependencies = [ "crossbeam-utils", ] @@ -1871,7 +2008,7 @@ dependencies = [ "crossterm_winapi", "libc", "mio", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "signal-hook", "signal-hook-mio", "winapi", @@ -2008,47 +2145,47 @@ dependencies = [ ] [[package]] -name = "custom-evm" +name = "custom-engine-types" version = "0.0.0" dependencies = [ "eyre", "reth", + "reth-basic-payload-builder", + "reth-ethereum-payload-builder", "reth-node-api", "reth-node-core", "reth-node-ethereum", + "reth-payload-builder", "reth-primitives", + "reth-rpc-types", "reth-tracing", + "serde", + "thiserror", "tokio", ] [[package]] -name = "custom-inspector" +name = "custom-evm" version = "0.0.0" dependencies = [ - "clap", - "futures-util", + "eyre", "reth", + "reth-node-api", + "reth-node-core", "reth-node-ethereum", + "reth-primitives", + "reth-tracing", + "tokio", ] [[package]] -name = "custom-node" +name = "custom-inspector" version = "0.0.0" dependencies = [ - "eyre", + "clap", + "futures-util", "reth", - "reth-basic-payload-builder", - "reth-ethereum-payload-builder", - "reth-node-api", - "reth-node-core", "reth-node-ethereum", - "reth-payload-builder", - "reth-primitives", - "reth-rpc-types", - "reth-tracing", - "serde", - "thiserror", - "tokio", ] [[package]] @@ -2193,7 +2330,7 @@ dependencies = [ "hashbrown 0.14.3", "lock_api", "once_cell", - "parking_lot_core 0.9.9", + "parking_lot_core 0.9.10", ] [[package]] @@ -2222,6 +2359,17 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "db-access" +version = "0.0.0" +dependencies = [ + "eyre", + "reth-db", + "reth-primitives", + "reth-provider", + "reth-rpc-types", +] + [[package]] name = "debug-helper" version = "0.3.13" @@ -2401,11 +2549,13 @@ dependencies = [ [[package]] name = "discv5" -version = "0.4.1" -source = "git+https://github.com/sigp/discv5?rev=04ac004#04ac0042a345a9edf93b090007e5d31c008261ed" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cafb8ed8d460b7d1c8d4c970270d45ecb5e283179a3945143196624c55cda6ac" dependencies = [ "aes 0.7.5", "aes-gcm", + "alloy-rlp", "arrayvec", "delay_map", "enr", @@ -2420,7 +2570,6 @@ dependencies = [ "more-asserts", "parking_lot 0.11.2", "rand 0.8.5", - "rlp", "smallvec", "socket2 0.4.10", "tokio", @@ -2529,13 +2678,13 @@ dependencies = [ [[package]] name = "ef-tests" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "alloy-rlp", "rayon", "reth-db", + "reth-evm-ethereum", "reth-interfaces", - "reth-node-ethereum", "reth-primitives", "reth-provider", "reth-revm", @@ -2595,10 +2744,11 @@ checksum = "c34f04666d835ff5d62e058c3995147c06f42fe86ff053337632bca83e42702d" [[package]] name = "enr" -version = "0.10.0" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a3d8dc56e02f954cac8eb489772c552c473346fc34f67412bb6244fd647f7e4" +checksum = "4ab656b89cdd15051d92d0931888103508de14ef9e51177c86d478dfa551ce0f" dependencies = [ + "alloy-rlp", "base64 0.21.7", "bytes", "ed25519-dalek", @@ -2606,8 +2756,7 @@ dependencies = [ "k256", "log", "rand 0.8.5", - "rlp", - "secp256k1 0.27.0", + "secp256k1", "serde", "sha3", "zeroize", @@ -2734,25 +2883,121 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" [[package]] -name = "examples" +name = "event-listener" +version = "4.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67b215c49b2b248c855fb73579eb1f4f26c38ffdc12973e20e07b91d78d5646e" +dependencies = [ + "concurrent-queue", + "parking", + "pin-project-lite", +] + +[[package]] +name = "event-listener" +version = "5.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d9944b8ca13534cdfb2800775f8dd4902ff3fc75a50101466decadfdf322a24" +dependencies = [ + "concurrent-queue", + "parking", + "pin-project-lite", +] + +[[package]] +name = "event-listener-strategy" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "958e4d70b6d5e81971bebec42271ec641e7ff4e170a6fa605f2b8a8b65cb97d3" +dependencies = [ + "event-listener 4.0.3", + "pin-project-lite", +] + +[[package]] +name = "event-listener-strategy" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0f214dc438f977e6d4e3500aaa277f5ad94ca83fbbd9b1a15713ce2344ccc5a1" +dependencies = [ + "event-listener 5.3.0", + "pin-project-lite", +] + +[[package]] +name = "exex-in-memory-state" +version = "0.0.0" +dependencies = [ + "eyre", + "reth", + "reth-exex", + "reth-node-api", + "reth-node-ethereum", + "reth-tracing", +] + +[[package]] +name = "exex-minimal" version = "0.0.0" dependencies = [ - "async-trait", "eyre", "futures", - "reth-beacon-consensus", - "reth-blockchain-tree", - "reth-db", - "reth-network", - "reth-network-api", + "reth", + "reth-exex", + "reth-node-api", + "reth-node-core", + "reth-node-ethereum", + "reth-primitives", + "reth-tracing", + "tokio", +] + +[[package]] +name = "exex-op-bridge" +version = "0.0.0" +dependencies = [ + "alloy-sol-types", + "eyre", + "futures", + "itertools 0.12.1", + "reth", + "reth-exex", + "reth-node-api", + "reth-node-core", + "reth-node-ethereum", + "reth-primitives", + "reth-provider", + "reth-tracing", + "rusqlite", + "tokio", +] + +[[package]] +name = "exex-rollup" +version = "0.0.0" +dependencies = [ + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", + "alloy-rlp", + "alloy-sol-types", + "eyre", + "foundry-blob-explorers", + "futures", + "once_cell", + "reth", + "reth-cli-runner", + "reth-exex", + "reth-interfaces", + "reth-node-api", + "reth-node-core", + "reth-node-ethereum", "reth-primitives", "reth-provider", "reth-revm", - "reth-rpc-builder", - "reth-rpc-types", - "reth-rpc-types-compat", - "reth-tasks", - "reth-transaction-pool", + "reth-tracing", + "reth-trie", + "rusqlite", + "secp256k1", + "serde_json", "tokio", ] @@ -2795,9 +3040,9 @@ dependencies = [ [[package]] name = "fastrand" -version = "2.0.2" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "658bd65b1cf4c852a3cc96f18a8ce7b5640f6b703f905c7d74532294c2a63984" +checksum = "9fc0510504f03c51ada170672ac806f1f105a88aa97a5281117e1ddc3368e51a" [[package]] name = "fastrlp" @@ -2832,9 +3077,9 @@ dependencies = [ [[package]] name = "fiat-crypto" -version = "0.2.7" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c007b1ae3abe1cb6f85a16305acd418b7ca6343b953633fee2b76d8f108b830f" +checksum = "38793c55593b33412e3ae40c2c9781ffaa6f438f6f8c10f24e71846fbd7ae01e" [[package]] name = "findshlibs" @@ -2863,9 +3108,9 @@ dependencies = [ [[package]] name = "flate2" -version = "1.0.28" +version = "1.0.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46303f565772937ffe1d394a4fac6f411c6013172fadde9dcdb1e147a086940e" +checksum = "4556222738635b7a3417ae6130d8f52201e45a0c4d1a907f0826383adb5f85e7" dependencies = [ "crc32fast", "miniz_oxide", @@ -2886,6 +3131,21 @@ dependencies = [ "percent-encoding", ] +[[package]] +name = "foundry-blob-explorers" +version = "0.1.0" +source = "git+https://github.com/foundry-rs/block-explorers#adcb750e8d8e57f7decafca433118bf7836ffd55" +dependencies = [ + "alloy-chains", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy)", + "alloy-primitives", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy)", + "alloy-serde 0.1.0 (git+https://github.com/alloy-rs/alloy)", + "chrono", + "reqwest 0.12.4", + "serde", +] + [[package]] name = "fragile" version = "2.0.0" @@ -2962,6 +3222,16 @@ dependencies = [ "waker-fn", ] +[[package]] +name = "futures-lite" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "52527eb5074e35e9339c6b4e8d12600c7128b68fb25dcb9fa9dec18f7c25f3a5" +dependencies = [ + "futures-core", + "pin-project-lite", +] + [[package]] name = "futures-macro" version = "0.3.30" @@ -3065,8 +3335,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94b22e06ecb0110981051723910cbf0b5f5e09a2062dd7663334ee79a9d1286c" dependencies = [ "cfg-if", + "js-sys", "libc", "wasi 0.11.0+wasi-snapshot-preview1", + "wasm-bindgen", ] [[package]] @@ -3398,9 +3670,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6e9b187a72d63adbfba487f48095306ac823049cb504ee195541e91c7775f5ad" dependencies = [ "anyhow", - "async-channel", + "async-channel 1.9.0", "base64 0.13.1", - "futures-lite", + "futures-lite 1.13.0", "infer", "pin-project-lite", "rand 0.7.3", @@ -3498,12 +3770,29 @@ dependencies = [ "http 0.2.12", "hyper 0.14.28", "log", - "rustls 0.21.11", + "rustls 0.21.12", "rustls-native-certs 0.6.3", "tokio", "tokio-rustls 0.24.1", ] +[[package]] +name = "hyper-rustls" +version = "0.26.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a0bea761b46ae2b24eb4aef630d8d1c398157b6fc29e6350ecf090a0b70c952c" +dependencies = [ + "futures-util", + "http 1.1.0", + "hyper 1.3.1", + "hyper-util", + "rustls 0.22.4", + "rustls-pki-types", + "tokio", + "tokio-rustls 0.25.0", + "tower-service", +] + [[package]] name = "hyper-system-resolver" version = "0.5.0" @@ -3904,6 +4193,33 @@ dependencies = [ "cfg-if", ] +[[package]] +name = "interprocess" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81f2533f3be42fffe3b5e63b71aeca416c1c3bc33e4e27be018521e76b1f38fb" +dependencies = [ + "blocking", + "cfg-if", + "futures-core", + "futures-io", + "intmap", + "libc", + "once_cell", + "rustc_version 0.4.0", + "spinning", + "thiserror", + "to_method", + "tokio", + "winapi", +] + +[[package]] +name = "intmap" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae52f28f45ac2bc96edb7714de995cffc174a395fb0abf5bff453587c980d7b9" + [[package]] name = "intrusive-collections" version = "0.9.6" @@ -3978,9 +4294,9 @@ checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b" [[package]] name = "jobserver" -version = "0.1.30" +version = "0.1.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "685a7d121ee3f65ae4fddd72b25a04bb36b6af81bc0828f7d5434c0fe60fa3a2" +checksum = "d2b099aaa34a9751c5bf0878add70444e1ed2dd73f347be99003d4577277de6e" dependencies = [ "libc", ] @@ -4033,7 +4349,7 @@ dependencies = [ "tokio-util", "tracing", "url", - "webpki-roots 0.26.1", + "webpki-roots", ] [[package]] @@ -4049,7 +4365,7 @@ dependencies = [ "futures-util", "hyper 0.14.28", "jsonrpsee-types", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "pin-project", "rand 0.8.5", "rustc-hash", @@ -4070,7 +4386,7 @@ checksum = "ac13bc1e44cd00448a5ff485824a128629c945f02077804cb659c07a0ba41395" dependencies = [ "async-trait", "hyper 0.14.28", - "hyper-rustls", + "hyper-rustls 0.24.2", "jsonrpsee-core", "jsonrpsee-types", "serde", @@ -4163,13 +4479,28 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6971da4d9c3aa03c3d8f3ff0f4155b534aad021292003895a469716b2a230378" dependencies = [ "base64 0.21.7", - "pem", + "pem 1.1.1", "ring 0.16.20", "serde", "serde_json", "simple_asn1", ] +[[package]] +name = "jsonwebtoken" +version = "9.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9ae10193d25051e74945f1ea2d0b42e03cc3b890f7e4cc5faa44997d808193f" +dependencies = [ + "base64 0.21.7", + "js-sys", + "pem 3.0.4", + "ring 0.17.8", + "serde", + "serde_json", + "simple_asn1", +] + [[package]] name = "k256" version = "0.13.3" @@ -4322,7 +4653,7 @@ dependencies = [ "multihash", "multistream-select", "once_cell", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "pin-project", "quick-protobuf", "rand 0.8.5", @@ -4377,9 +4708,9 @@ dependencies = [ [[package]] name = "libproc" -version = "0.14.6" +version = "0.14.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8eb6497078a4c9c2aca63df56d8dce6eb4381d53a960f781a3a748f7ea97436d" +checksum = "ae9ea4b75e1a81675429dafe43441df1caea70081e82246a8cccf514884a88bb" dependencies = [ "bindgen", "errno", @@ -4484,9 +4815,9 @@ checksum = "f9d642685b028806386b2b6e75685faadd3eb65a85fff7df711ce18446a422da" [[package]] name = "lock_api" -version = "0.4.11" +version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c168f8615b12bc01f9c17e2eb0cc07dcae1940121185446edc3744920e8ef45" +checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17" dependencies = [ "autocfg", "scopeguard", @@ -4542,8 +4873,9 @@ dependencies = [ "reth-ecies", "reth-eth-wire", "reth-network", + "reth-network-types", "reth-primitives", - "secp256k1 0.27.0", + "secp256k1", "tokio", ] @@ -4711,22 +5043,6 @@ dependencies = [ "unicase", ] -[[package]] -name = "minimal" -version = "0.0.0" -dependencies = [ - "eyre", - "futures", - "reth", - "reth-exex", - "reth-node-api", - "reth-node-core", - "reth-node-ethereum", - "reth-primitives", - "reth-provider", - "tokio", -] - [[package]] name = "minimal-lexical" version = "0.2.1" @@ -4862,6 +5178,28 @@ dependencies = [ "unsigned-varint 0.7.2", ] +[[package]] +name = "network" +version = "0.0.0" +dependencies = [ + "eyre", + "futures", + "reth-network", + "reth-provider", + "tokio", +] + +[[package]] +name = "network-txpool" +version = "0.0.0" +dependencies = [ + "eyre", + "reth-network", + "reth-provider", + "reth-transaction-pool", + "tokio", +] + [[package]] name = "nibble_vec" version = "0.1.0" @@ -4882,6 +5220,27 @@ dependencies = [ "libc", ] +[[package]] +name = "node-custom-rpc" +version = "0.0.0" +dependencies = [ + "clap", + "eyre", + "jsonrpsee", + "reth", + "reth-node-ethereum", + "reth-transaction-pool", + "tokio", +] + +[[package]] +name = "node-event-hooks" +version = "0.0.0" +dependencies = [ + "reth", + "reth-node-ethereum", +] + [[package]] name = "nom" version = "7.1.3" @@ -5070,26 +5429,6 @@ version = "11.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0ab1bc2a289d34bd04a330323ac98a1b4bc82c9d9fcb1e66b63caa84da26b575" -[[package]] -name = "op-bridge" -version = "0.0.0" -dependencies = [ - "alloy-sol-types", - "eyre", - "futures", - "itertools 0.12.1", - "reth", - "reth-exex", - "reth-node-api", - "reth-node-core", - "reth-node-ethereum", - "reth-primitives", - "reth-provider", - "reth-tracing", - "rusqlite", - "tokio", -] - [[package]] name = "opaque-debug" version = "0.3.1" @@ -5157,21 +5496,7 @@ dependencies = [ "proc-macro-crate 2.0.0", "proc-macro2", "quote", - "syn 1.0.109", -] - -[[package]] -name = "parity-tokio-ipc" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9981e32fb75e004cc148f5fb70342f393830e0a4aa62e3cc93b50976218d42b6" -dependencies = [ - "futures", - "libc", - "log", - "rand 0.7.3", - "tokio", - "winapi", + "syn 1.0.109", ] [[package]] @@ -5193,12 +5518,12 @@ dependencies = [ [[package]] name = "parking_lot" -version = "0.12.1" +version = "0.12.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" +checksum = "7e4af0ca4f6caed20e900d564c242b8e5d4903fdacf31d3daf527b66fe6f42fb" dependencies = [ "lock_api", - "parking_lot_core 0.9.9", + "parking_lot_core 0.9.10", ] [[package]] @@ -5217,15 +5542,15 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.9.9" +version = "0.9.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c42a9226546d68acdd9c0a280d17ce19bfe27a46bf68784e4066115788d008e" +checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" dependencies = [ "cfg-if", "libc", - "redox_syscall 0.4.1", + "redox_syscall 0.5.1", "smallvec", - "windows-targets 0.48.5", + "windows-targets 0.52.5", ] [[package]] @@ -5253,6 +5578,16 @@ dependencies = [ "base64 0.13.1", ] +[[package]] +name = "pem" +version = "3.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e459365e590736a54c3fa561947c84837534b8e9af6fc5bf781307e82658fae" +dependencies = [ + "base64 0.22.0", + "serde", +] + [[package]] name = "percent-encoding" version = "2.3.1" @@ -5357,6 +5692,17 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" +[[package]] +name = "piper" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "668d31b1c4eba19242f2088b2bf3316b82ca31082a8335764db4e083db7485d4" +dependencies = [ + "atomic-waker", + "fastrand 2.1.0", + "futures-io", +] + [[package]] name = "pkcs8" version = "0.10.2" @@ -5431,7 +5777,7 @@ dependencies = [ "reth-primitives", "reth-provider", "reth-tracing", - "secp256k1 0.27.0", + "secp256k1", "serde_json", "tokio", "tokio-stream", @@ -5476,7 +5822,7 @@ dependencies = [ "log", "nix", "once_cell", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "smallvec", "symbolic-demangle", "tempfile", @@ -5814,19 +6160,20 @@ dependencies = [ [[package]] name = "ratatui" -version = "0.25.0" +version = "0.26.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5659e52e4ba6e07b2dad9f1158f578ef84a73762625ddb51536019f34d180eb" +checksum = "a564a852040e82671dc50a37d88f3aa83bbc690dfc6844cfe7a2591620206a80" dependencies = [ "bitflags 2.5.0", "cassowary", + "compact_str", "crossterm", "indoc", "itertools 0.12.1", "lru", "paste", "stability", - "strum 0.25.0", + "strum", "unicode-segmentation", "unicode-width", ] @@ -5871,11 +6218,11 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.4.1" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4722d768eff46b75989dd134e5c353f0d6296e5aaa3132e776cbdb56be7731aa" +checksum = "469052894dcb553421e483e4209ee581a45100d31b4018de03e5a7ad86374a7e" dependencies = [ - "bitflags 1.3.2", + "bitflags 2.5.0", ] [[package]] @@ -5958,7 +6305,6 @@ dependencies = [ "http 0.2.12", "http-body 0.4.6", "hyper 0.14.28", - "hyper-rustls", "ipnet", "js-sys", "log", @@ -5966,15 +6312,12 @@ dependencies = [ "once_cell", "percent-encoding", "pin-project-lite", - "rustls 0.21.11", - "rustls-pemfile 1.0.4", "serde", "serde_json", "serde_urlencoded", "sync_wrapper", "system-configuration", "tokio", - "tokio-rustls 0.24.1", "tokio-util", "tower-service", "url", @@ -5982,7 +6325,6 @@ dependencies = [ "wasm-bindgen-futures", "wasm-streams", "web-sys", - "webpki-roots 0.25.4", "winreg 0.50.0", ] @@ -6000,6 +6342,7 @@ dependencies = [ "http-body 1.0.0", "http-body-util", "hyper 1.3.1", + "hyper-rustls 0.26.0", "hyper-util", "ipnet", "js-sys", @@ -6008,16 +6351,22 @@ dependencies = [ "once_cell", "percent-encoding", "pin-project-lite", + "rustls 0.22.4", + "rustls-native-certs 0.7.0", + "rustls-pemfile 2.1.2", + "rustls-pki-types", "serde", "serde_json", "serde_urlencoded", "sync_wrapper", "tokio", + "tokio-rustls 0.25.0", "tower-service", "url", "wasm-bindgen", "wasm-bindgen-futures", "web-sys", + "webpki-roots", "winreg 0.52.0", ] @@ -6033,7 +6382,7 @@ dependencies = [ [[package]] name = "reth" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "ahash", "alloy-rlp", @@ -6058,17 +6407,18 @@ dependencies = [ "rand 0.8.5", "ratatui", "rayon", - "reth-auto-seal-consensus", "reth-basic-payload-builder", "reth-beacon-consensus", "reth-blockchain-tree", "reth-cli-runner", "reth-config", + "reth-consensus", "reth-consensus-common", "reth-db", "reth-discv4", "reth-downloaders", "reth-ethereum-payload-builder", + "reth-evm", "reth-exex", "reth-interfaces", "reth-network", @@ -6084,12 +6434,10 @@ dependencies = [ "reth-payload-validator", "reth-primitives", "reth-provider", - "reth-prune", "reth-revm", "reth-rpc", "reth-rpc-api", "reth-rpc-builder", - "reth-rpc-engine-api", "reth-rpc-types", "reth-rpc-types-compat", "reth-stages", @@ -6110,16 +6458,19 @@ dependencies = [ [[package]] name = "reth-auto-seal-consensus" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "futures-util", "reth-beacon-consensus", + "reth-consensus", "reth-engine-primitives", "reth-evm", "reth-interfaces", + "reth-network-types", "reth-primitives", "reth-provider", "reth-revm", + "reth-rpc-types", "reth-stages-api", "reth-transaction-pool", "tokio", @@ -6129,7 +6480,7 @@ dependencies = [ [[package]] name = "reth-basic-payload-builder" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "alloy-rlp", "futures-core", @@ -6151,18 +6502,20 @@ dependencies = [ [[package]] name = "reth-beacon-consensus" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "assert_matches", "futures", "metrics", - "reth-beacon-consensus-core", "reth-blockchain-tree", "reth-config", + "reth-consensus", "reth-db", "reth-downloaders", "reth-engine-primitives", + "reth-ethereum-consensus", "reth-ethereum-engine-primitives", + "reth-evm", "reth-evm-ethereum", "reth-interfaces", "reth-metrics", @@ -6172,12 +6525,14 @@ dependencies = [ "reth-provider", "reth-prune", "reth-revm", + "reth-rpc", "reth-rpc-types", "reth-rpc-types-compat", "reth-stages", "reth-stages-api", "reth-static-file", "reth-tasks", + "reth-testing-utils", "reth-tokio-util", "reth-tracing", "schnellru", @@ -6187,29 +6542,22 @@ dependencies = [ "tracing", ] -[[package]] -name = "reth-beacon-consensus-core" -version = "0.2.0-beta.6" -dependencies = [ - "reth-consensus-common", - "reth-interfaces", - "reth-primitives", -] - [[package]] name = "reth-blockchain-tree" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "aquamarine", "assert_matches", "linked_hash_set", "lru", "metrics", - "parking_lot 0.12.1", + "parking_lot 0.12.2", + "reth-consensus", "reth-db", + "reth-evm", + "reth-evm-ethereum", "reth-interfaces", "reth-metrics", - "reth-node-ethereum", "reth-primitives", "reth-provider", "reth-revm", @@ -6222,9 +6570,8 @@ dependencies = [ [[package]] name = "reth-cli-runner" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ - "futures", "reth-tasks", "tokio", "tracing", @@ -6232,9 +6579,10 @@ dependencies = [ [[package]] name = "reth-codecs" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ - "alloy-eips", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", + "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", "alloy-primitives", "arbitrary", "bytes", @@ -6249,7 +6597,7 @@ dependencies = [ [[package]] name = "reth-codecs-derive" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "convert_case 0.6.0", "proc-macro2", @@ -6260,7 +6608,7 @@ dependencies = [ [[package]] name = "reth-config" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "confy", "humantime-serde", @@ -6268,17 +6616,27 @@ dependencies = [ "reth-net-nat", "reth-network", "reth-primitives", - "secp256k1 0.27.0", + "secp256k1", "serde", "tempfile", "toml", ] +[[package]] +name = "reth-consensus" +version = "0.2.0-beta.7" +dependencies = [ + "auto_impl", + "reth-primitives", + "thiserror", +] + [[package]] name = "reth-consensus-common" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "mockall", + "reth-consensus", "reth-interfaces", "reth-primitives", "reth-provider", @@ -6286,7 +6644,7 @@ dependencies = [ [[package]] name = "reth-db" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "arbitrary", "assert_matches", @@ -6315,7 +6673,7 @@ dependencies = [ "rustc-hash", "serde", "serde_json", - "strum 0.26.2", + "strum", "tempfile", "test-fuzz", "thiserror", @@ -6323,20 +6681,21 @@ dependencies = [ [[package]] name = "reth-discv4" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "alloy-rlp", + "assert_matches", "discv5", "enr", "generic-array", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "rand 0.8.5", "reth-net-common", "reth-net-nat", + "reth-network-types", "reth-primitives", "reth-tracing", - "rlp", - "secp256k1 0.27.0", + "secp256k1", "serde", "thiserror", "tokio", @@ -6346,7 +6705,7 @@ dependencies = [ [[package]] name = "reth-discv5" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "alloy-rlp", "derive_more", @@ -6359,10 +6718,10 @@ dependencies = [ "multiaddr", "rand 0.8.5", "reth-metrics", + "reth-network-types", "reth-primitives", "reth-tracing", - "rlp", - "secp256k1 0.27.0", + "secp256k1", "thiserror", "tokio", "tracing", @@ -6370,18 +6729,20 @@ dependencies = [ [[package]] name = "reth-dns-discovery" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "alloy-rlp", "data-encoding", "enr", "linked_hash_set", - "parking_lot 0.12.1", + "parking_lot 0.12.2", + "rand 0.8.5", "reth-net-common", + "reth-network-types", "reth-primitives", "reth-tracing", "schnellru", - "secp256k1 0.27.0", + "secp256k1", "serde", "serde_with", "thiserror", @@ -6393,7 +6754,7 @@ dependencies = [ [[package]] name = "reth-downloaders" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "alloy-rlp", "assert_matches", @@ -6405,9 +6766,11 @@ dependencies = [ "rand 0.8.5", "rayon", "reth-config", + "reth-consensus", "reth-db", "reth-interfaces", "reth-metrics", + "reth-network-types", "reth-primitives", "reth-provider", "reth-tasks", @@ -6422,11 +6785,11 @@ dependencies = [ [[package]] name = "reth-e2e-test-utils" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ - "alloy-consensus", + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", "alloy-network", - "alloy-rpc-types", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", "alloy-signer", "alloy-signer-wallet", "eyre", @@ -6435,21 +6798,24 @@ dependencies = [ "rand 0.8.5", "reth", "reth-db", + "reth-node-builder", "reth-node-core", "reth-node-ethereum", "reth-payload-builder", "reth-primitives", + "reth-provider", "reth-rpc", "reth-tracing", - "secp256k1 0.27.0", + "secp256k1", "serde_json", "tokio", "tokio-stream", + "tracing", ] [[package]] name = "reth-ecies" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "aes 0.8.4", "alloy-rlp", @@ -6466,8 +6832,9 @@ dependencies = [ "pin-project", "rand 0.8.5", "reth-net-common", + "reth-network-types", "reth-primitives", - "secp256k1 0.27.0", + "secp256k1", "sha2 0.10.8", "sha3", "thiserror", @@ -6480,7 +6847,7 @@ dependencies = [ [[package]] name = "reth-engine-primitives" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "reth-primitives", "reth-rpc-types", @@ -6490,7 +6857,7 @@ dependencies = [ [[package]] name = "reth-eth-wire" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "alloy-rlp", "arbitrary", @@ -6509,9 +6876,10 @@ dependencies = [ "reth-eth-wire-types", "reth-metrics", "reth-net-common", + "reth-network-types", "reth-primitives", "reth-tracing", - "secp256k1 0.27.0", + "secp256k1", "serde", "snap", "test-fuzz", @@ -6524,7 +6892,7 @@ dependencies = [ [[package]] name = "reth-eth-wire-types" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "alloy-rlp", "arbitrary", @@ -6538,16 +6906,25 @@ dependencies = [ "reth-net-common", "reth-primitives", "reth-tracing", - "secp256k1 0.27.0", + "secp256k1", "serde", "test-fuzz", "thiserror", "tokio-util", ] +[[package]] +name = "reth-ethereum-consensus" +version = "0.2.0-beta.7" +dependencies = [ + "reth-consensus", + "reth-consensus-common", + "reth-primitives", +] + [[package]] name = "reth-ethereum-engine-primitives" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "alloy-rlp", "reth-engine-primitives", @@ -6562,7 +6939,7 @@ dependencies = [ [[package]] name = "reth-ethereum-forks" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "alloy-chains", "alloy-primitives", @@ -6577,7 +6954,7 @@ dependencies = [ [[package]] name = "reth-ethereum-payload-builder" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "reth-basic-payload-builder", "reth-payload-builder", @@ -6591,7 +6968,7 @@ dependencies = [ [[package]] name = "reth-etl" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "rayon", "reth-db", @@ -6601,8 +6978,10 @@ dependencies = [ [[package]] name = "reth-evm" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ + "futures-util", + "parking_lot 0.12.2", "reth-interfaces", "reth-primitives", "revm", @@ -6611,27 +6990,43 @@ dependencies = [ [[package]] name = "reth-evm-ethereum" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" +dependencies = [ + "reth-evm", + "reth-interfaces", + "reth-primitives", + "reth-revm", + "revm-primitives", + "tracing", +] + +[[package]] +name = "reth-evm-optimism" +version = "0.2.0-beta.7" dependencies = [ "reth-evm", "reth-interfaces", "reth-primitives", "reth-provider", "reth-revm", + "revm", "revm-primitives", + "thiserror", "tracing", ] [[package]] name = "reth-exex" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "eyre", "metrics", "reth-config", "reth-metrics", + "reth-network", "reth-node-api", "reth-node-core", + "reth-payload-builder", "reth-primitives", "reth-provider", "reth-tasks", @@ -6642,18 +7037,19 @@ dependencies = [ [[package]] name = "reth-interfaces" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "auto_impl", "clap", "futures", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "rand 0.8.5", + "reth-consensus", "reth-eth-wire-types", "reth-network-api", + "reth-network-types", "reth-primitives", - "reth-rpc-types", - "secp256k1 0.27.0", + "secp256k1", "thiserror", "tokio", "tracing", @@ -6661,14 +7057,17 @@ dependencies = [ [[package]] name = "reth-ipc" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "async-trait", "bytes", "futures", + "futures-util", + "interprocess", "jsonrpsee", - "parity-tokio-ipc", "pin-project", + "rand 0.8.5", + "reth-tracing", "serde_json", "thiserror", "tokio", @@ -6680,7 +7079,7 @@ dependencies = [ [[package]] name = "reth-libmdbx" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "bitflags 2.5.0", "byteorder", @@ -6690,7 +7089,7 @@ dependencies = [ "indexmap 2.2.6", "libc", "libffi", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "pprof", "rand 0.8.5", "rand_xorshift", @@ -6702,7 +7101,7 @@ dependencies = [ [[package]] name = "reth-mdbx-sys" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "bindgen", "cc", @@ -6711,7 +7110,7 @@ dependencies = [ [[package]] name = "reth-metrics" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "futures", "metrics", @@ -6722,7 +7121,7 @@ dependencies = [ [[package]] name = "reth-metrics-derive" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "metrics", "once_cell", @@ -6736,16 +7135,16 @@ dependencies = [ [[package]] name = "reth-net-common" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "pin-project", - "reth-primitives", + "reth-network-types", "tokio", ] [[package]] name = "reth-net-nat" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "igd-next", "pin-project-lite", @@ -6759,7 +7158,7 @@ dependencies = [ [[package]] name = "reth-network" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "alloy-node-bindings", "alloy-provider", @@ -6777,10 +7176,11 @@ dependencies = [ "itertools 0.12.1", "linked_hash_set", "metrics", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "pin-project", "pprof", "rand 0.8.5", + "reth-consensus", "reth-discv4", "reth-discv5", "reth-dns-discovery", @@ -6791,6 +7191,7 @@ dependencies = [ "reth-net-common", "reth-network", "reth-network-api", + "reth-network-types", "reth-primitives", "reth-provider", "reth-rpc-types", @@ -6799,7 +7200,7 @@ dependencies = [ "reth-tracing", "reth-transaction-pool", "schnellru", - "secp256k1 0.27.0", + "secp256k1", "serde", "serde_json", "serial_test", @@ -6814,11 +7215,12 @@ dependencies = [ [[package]] name = "reth-network-api" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "enr", "reth-discv4", "reth-eth-wire", + "reth-network-types", "reth-primitives", "reth-rpc-types", "serde", @@ -6826,9 +7228,24 @@ dependencies = [ "tokio", ] +[[package]] +name = "reth-network-types" +version = "0.2.0-beta.7" +dependencies = [ + "alloy-primitives", + "alloy-rlp", + "enr", + "rand 0.8.5", + "secp256k1", + "serde_json", + "serde_with", + "thiserror", + "url", +] + [[package]] name = "reth-nippy-jar" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "anyhow", "bincode", @@ -6849,7 +7266,7 @@ dependencies = [ [[package]] name = "reth-node-api" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "reth-db", "reth-engine-primitives", @@ -6863,7 +7280,7 @@ dependencies = [ [[package]] name = "reth-node-builder" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "aquamarine", "confy", @@ -6875,8 +7292,10 @@ dependencies = [ "reth-beacon-consensus", "reth-blockchain-tree", "reth-config", + "reth-consensus", "reth-db", "reth-downloaders", + "reth-evm", "reth-exex", "reth-interfaces", "reth-network", @@ -6887,7 +7306,6 @@ dependencies = [ "reth-primitives", "reth-provider", "reth-prune", - "reth-revm", "reth-rpc", "reth-rpc-engine-api", "reth-stages", @@ -6895,12 +7313,14 @@ dependencies = [ "reth-tasks", "reth-tracing", "reth-transaction-pool", + "tempfile", "tokio", + "tokio-stream", ] [[package]] name = "reth-node-core" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "assert_matches", "clap", @@ -6918,15 +7338,19 @@ dependencies = [ "metrics-process", "metrics-util", "once_cell", + "pin-project", "procfs", "proptest", "rand 0.8.5", "reth-beacon-consensus", + "reth-codecs", "reth-config", "reth-consensus-common", "reth-db", "reth-discv4", + "reth-discv5", "reth-engine-primitives", + "reth-etl", "reth-evm", "reth-interfaces", "reth-metrics", @@ -6944,7 +7368,8 @@ dependencies = [ "reth-tasks", "reth-tracing", "reth-transaction-pool", - "secp256k1 0.27.0", + "reth-trie", + "secp256k1", "serde", "serde_json", "shellexpand", @@ -6952,13 +7377,14 @@ dependencies = [ "thiserror", "tikv-jemalloc-ctl", "tokio", + "tokio-util", "tracing", "vergen", ] [[package]] name = "reth-node-ethereum" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "eyre", "futures", @@ -6986,19 +7412,19 @@ dependencies = [ [[package]] name = "reth-node-events" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "futures", "humantime", "pin-project", "reth-beacon-consensus", "reth-db", - "reth-interfaces", "reth-network", "reth-network-api", "reth-primitives", "reth-provider", "reth-prune", + "reth-rpc-types", "reth-stages", "reth-static-file", "tokio", @@ -7007,23 +7433,23 @@ dependencies = [ [[package]] name = "reth-node-optimism" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "alloy-primitives", "async-trait", "clap", "eyre", - "http 0.2.12", - "http-body 0.4.6", "hyper 0.14.28", "jsonrpsee", - "parking_lot 0.12.1", - "reqwest 0.11.27", + "parking_lot 0.12.2", + "reqwest 0.12.4", "reth", "reth-basic-payload-builder", + "reth-beacon-consensus", "reth-db", "reth-e2e-test-utils", "reth-evm", + "reth-evm-optimism", "reth-interfaces", "reth-network", "reth-node-api", @@ -7047,14 +7473,24 @@ dependencies = [ "tracing", ] +[[package]] +name = "reth-optimism-consensus" +version = "0.2.0-beta.7" +dependencies = [ + "reth-consensus", + "reth-consensus-common", + "reth-primitives", +] + [[package]] name = "reth-optimism-payload-builder" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "alloy-rlp", "reth-basic-payload-builder", "reth-engine-primitives", "reth-evm", + "reth-evm-optimism", "reth-payload-builder", "reth-primitives", "reth-provider", @@ -7070,7 +7506,7 @@ dependencies = [ [[package]] name = "reth-payload-builder" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "futures-util", "metrics", @@ -7092,7 +7528,7 @@ dependencies = [ [[package]] name = "reth-payload-validator" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "reth-primitives", "reth-rpc-types", @@ -7101,25 +7537,23 @@ dependencies = [ [[package]] name = "reth-primitives" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "alloy-chains", - "alloy-eips", - "alloy-genesis", + "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", + "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", "alloy-primitives", "alloy-rlp", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", "alloy-trie", - "anyhow", "arbitrary", "assert_matches", "byteorder", "bytes", "c-kzg", - "cfg-if", "clap", "criterion", "derive_more", - "enr", "hash-db", "itertools 0.12.1", "modular-bitfield", @@ -7133,16 +7567,14 @@ dependencies = [ "rayon", "reth-codecs", "reth-ethereum-forks", - "reth-rpc-types", + "reth-network-types", "revm", "revm-primitives", "roaring", - "secp256k1 0.27.0", + "secp256k1", "serde", "serde_json", - "serde_with", - "sha2 0.10.8", - "strum 0.26.2", + "strum", "sucds", "tempfile", "test-fuzz", @@ -7154,7 +7586,7 @@ dependencies = [ [[package]] name = "reth-provider" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "alloy-rlp", "assert_matches", @@ -7162,7 +7594,7 @@ dependencies = [ "dashmap", "itertools 0.12.1", "metrics", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "pin-project", "rand 0.8.5", "rayon", @@ -7173,9 +7605,10 @@ dependencies = [ "reth-metrics", "reth-nippy-jar", "reth-primitives", + "reth-rpc-types", "reth-trie", "revm", - "strum 0.26.2", + "strum", "tempfile", "tokio", "tokio-stream", @@ -7184,7 +7617,7 @@ dependencies = [ [[package]] name = "reth-prune" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "assert_matches", "derive_more", @@ -7208,22 +7641,20 @@ dependencies = [ [[package]] name = "reth-revm" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "reth-consensus-common", - "reth-evm", "reth-interfaces", "reth-primitives", "reth-provider", "reth-trie", "revm", - "revm-inspectors", "tracing", ] [[package]] name = "reth-rpc" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "alloy-dyn-abi", "alloy-primitives", @@ -7238,17 +7669,19 @@ dependencies = [ "http-body 0.4.6", "hyper 0.14.28", "jsonrpsee", - "jsonwebtoken", + "jsonwebtoken 8.3.0", "metrics", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "pin-project", "rand 0.8.5", "reth-consensus-common", "reth-evm", "reth-evm-ethereum", + "reth-evm-optimism", "reth-interfaces", "reth-metrics", "reth-network-api", + "reth-network-types", "reth-primitives", "reth-provider", "reth-revm", @@ -7262,7 +7695,7 @@ dependencies = [ "revm-inspectors", "revm-primitives", "schnellru", - "secp256k1 0.27.0", + "secp256k1", "serde", "serde_json", "tempfile", @@ -7276,10 +7709,11 @@ dependencies = [ [[package]] name = "reth-rpc-api" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "jsonrpsee", "reth-engine-primitives", + "reth-network-types", "reth-primitives", "reth-rpc-types", "serde", @@ -7288,7 +7722,7 @@ dependencies = [ [[package]] name = "reth-rpc-api-testing-util" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "futures", "jsonrpsee", @@ -7302,7 +7736,7 @@ dependencies = [ [[package]] name = "reth-rpc-builder" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "hyper 0.14.28", "jsonrpsee", @@ -7310,12 +7744,13 @@ dependencies = [ "pin-project", "reth-beacon-consensus", "reth-engine-primitives", + "reth-ethereum-engine-primitives", "reth-evm", + "reth-evm-ethereum", "reth-interfaces", "reth-ipc", "reth-metrics", "reth-network-api", - "reth-node-ethereum", "reth-payload-builder", "reth-primitives", "reth-provider", @@ -7329,7 +7764,7 @@ dependencies = [ "reth-transaction-pool", "serde", "serde_json", - "strum 0.26.2", + "strum", "thiserror", "tokio", "tower", @@ -7339,7 +7774,7 @@ dependencies = [ [[package]] name = "reth-rpc-engine-api" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "alloy-rlp", "assert_matches", @@ -7349,10 +7784,9 @@ dependencies = [ "metrics", "reth-beacon-consensus", "reth-engine-primitives", + "reth-ethereum-engine-primitives", "reth-interfaces", "reth-metrics", - "reth-node-ethereum", - "reth-node-optimism", "reth-payload-builder", "reth-primitives", "reth-provider", @@ -7368,39 +7802,35 @@ dependencies = [ [[package]] name = "reth-rpc-types" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ - "alloy-genesis", "alloy-primitives", - "alloy-rlp", - "alloy-rpc-types", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", "alloy-rpc-types-anvil", + "alloy-rpc-types-beacon", "alloy-rpc-types-engine", "alloy-rpc-types-trace", "arbitrary", "bytes", - "enr", "ethereum_ssz", "ethereum_ssz_derive", "jsonrpsee-types", "proptest", "proptest-derive", "rand 0.8.5", - "secp256k1 0.27.0", "serde", "serde_json", "serde_with", "similar-asserts", "thiserror", - "url", ] [[package]] name = "reth-rpc-types-compat" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "alloy-rlp", - "alloy-rpc-types", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", "reth-primitives", "reth-rpc-types", "serde_json", @@ -7408,7 +7838,7 @@ dependencies = [ [[package]] name = "reth-stages" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "alloy-rlp", "assert_matches", @@ -7422,12 +7852,15 @@ dependencies = [ "rayon", "reth-codecs", "reth-config", + "reth-consensus", "reth-db", "reth-downloaders", "reth-etl", + "reth-evm", "reth-evm-ethereum", "reth-exex", "reth-interfaces", + "reth-network-types", "reth-primitives", "reth-provider", "reth-revm", @@ -7443,18 +7876,20 @@ dependencies = [ [[package]] name = "reth-stages-api" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "aquamarine", "assert_matches", "auto_impl", "futures-util", "metrics", + "reth-consensus", "reth-db", "reth-interfaces", "reth-metrics", "reth-primitives", "reth-provider", + "reth-prune", "reth-static-file", "reth-tokio-util", "thiserror", @@ -7465,11 +7900,10 @@ dependencies = [ [[package]] name = "reth-static-file" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "assert_matches", - "clap", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "rayon", "reth-db", "reth-interfaces", @@ -7485,7 +7919,7 @@ dependencies = [ [[package]] name = "reth-tasks" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "dyn-clone", "futures-util", @@ -7499,9 +7933,18 @@ dependencies = [ "tracing-futures", ] +[[package]] +name = "reth-testing-utils" +version = "0.2.0-beta.7" +dependencies = [ + "alloy-genesis 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", + "reth-primitives", + "secp256k1", +] + [[package]] name = "reth-tokio-util" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "tokio", "tokio-stream", @@ -7509,7 +7952,7 @@ dependencies = [ [[package]] name = "reth-tracing" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "clap", "eyre", @@ -7523,7 +7966,7 @@ dependencies = [ [[package]] name = "reth-transaction-pool" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "alloy-rlp", "aquamarine", @@ -7531,23 +7974,24 @@ dependencies = [ "auto_impl", "bitflags 2.5.0", "criterion", - "fnv", "futures-util", "itertools 0.12.1", "metrics", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "paste", "pprof", "proptest", "rand 0.8.5", "reth-eth-wire", "reth-metrics", + "reth-network-types", "reth-primitives", "reth-provider", "reth-revm", "reth-tasks", "reth-tracing", "revm", + "rustc-hash", "schnellru", "serde", "serde_json", @@ -7561,7 +8005,7 @@ dependencies = [ [[package]] name = "reth-trie" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "alloy-rlp", "auto_impl", @@ -7587,7 +8031,7 @@ dependencies = [ [[package]] name = "reth-trie-parallel" -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" dependencies = [ "alloy-rlp", "criterion", @@ -7627,10 +8071,10 @@ dependencies = [ [[package]] name = "revm-inspectors" version = "0.1.0" -source = "git+https://github.com/paradigmxyz/evm-inspectors?rev=dc614ee#dc614eec85ee4d4af938865b121fad58ec7dad5f" +source = "git+https://github.com/paradigmxyz/evm-inspectors?rev=7168ac5#7168ac55682fb420da7a82ed94bfb0c30a034113" dependencies = [ "alloy-primitives", - "alloy-rpc-types", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=77c1240)", "alloy-rpc-types-trace", "alloy-sol-types", "anstyle", @@ -7664,7 +8108,7 @@ dependencies = [ "once_cell", "revm-primitives", "ripemd", - "secp256k1 0.28.2", + "secp256k1", "sha2 0.10.8", "substrate-bn", ] @@ -7889,9 +8333,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.32" +version = "0.38.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65e04861e65f21776e67888bfbea442b3642beaa0138fdb1dd7a84a52dffdb89" +checksum = "70dc5ec042f7a43c4a73241207cecc9873a06d45debb38b329f8541d85c2730f" dependencies = [ "bitflags 2.5.0", "errno", @@ -7902,9 +8346,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.21.11" +version = "0.21.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fecbfb7b1444f477b345853b1fce097a2c6fb637b2bfb87e6bc5db0f043fae4" +checksum = "3f56a14d1f48b391359b22f731fd4bd7e43c97f3c50eee276f3aa09c94784d3e" dependencies = [ "log", "ring 0.17.8", @@ -7921,7 +8365,7 @@ dependencies = [ "log", "ring 0.17.8", "rustls-pki-types", - "rustls-webpki 0.102.2", + "rustls-webpki 0.102.3", "subtle", "zeroize", ] @@ -7972,9 +8416,9 @@ dependencies = [ [[package]] name = "rustls-pki-types" -version = "1.4.1" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ecd36cc4259e3e4514335c4a138c6b43171a8d61d8f5c9348f9fc7529416f247" +checksum = "beb461507cee2c2ff151784c52762cf4d9ff6a61f3e80968600ed24fa837fa54" [[package]] name = "rustls-webpki" @@ -7988,9 +8432,9 @@ dependencies = [ [[package]] name = "rustls-webpki" -version = "0.102.2" +version = "0.102.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "faaa0a62740bedb9b2ef5afa303da42764c012f743917351dc9a237ea1663610" +checksum = "f3bce581c0dd41bce533ce695a1437fa16a7ab5ac3ccfa99fe1a620a7885eabf" dependencies = [ "ring 0.17.8", "rustls-pki-types", @@ -8047,6 +8491,15 @@ dependencies = [ "winapi-util", ] +[[package]] +name = "scc" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec96560eea317a9cc4e0bb1f6a2c93c09a19b8c4fc5cb3fcc0ec1c094cd783e2" +dependencies = [ + "sdd", +] + [[package]] name = "schannel" version = "0.1.23" @@ -8083,6 +8536,12 @@ dependencies = [ "untrusted 0.9.0", ] +[[package]] +name = "sdd" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b84345e4c9bd703274a082fb80caaa99b7612be48dfaa1dd9266577ec412309d" + [[package]] name = "sec1" version = "0.7.3" @@ -8097,17 +8556,6 @@ dependencies = [ "zeroize", ] -[[package]] -name = "secp256k1" -version = "0.27.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25996b82292a7a57ed3508f052cfff8640d38d32018784acd714758b43da9c8f" -dependencies = [ - "rand 0.8.5", - "secp256k1-sys 0.8.1", - "serde", -] - [[package]] name = "secp256k1" version = "0.28.2" @@ -8115,16 +8563,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d24b59d129cdadea20aea4fb2352fa053712e5d713eee47d700cd4b2bc002f10" dependencies = [ "rand 0.8.5", - "secp256k1-sys 0.9.2", -] - -[[package]] -name = "secp256k1-sys" -version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70a129b9e9efbfb223753b9163c4ab3b13cff7fd9c7f010fbac25ab4099fa07e" -dependencies = [ - "cc", + "secp256k1-sys", + "serde", ] [[package]] @@ -8194,9 +8634,9 @@ checksum = "f638d531eccd6e23b980caf34876660d38e265409d8e99b397ab71eb3612fad0" [[package]] name = "serde" -version = "1.0.198" +version = "1.0.199" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9846a40c979031340571da2545a4e5b7c4163bdae79b301d5f86d03979451fcc" +checksum = "0c9f6e76df036c77cd94996771fb40db98187f096dd0b9af39c6c6e452ba966a" dependencies = [ "serde_derive", ] @@ -8212,9 +8652,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.198" +version = "1.0.199" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e88edab869b01783ba905e7d0153f9fc1a6505a96e4ad3018011eedb838566d9" +checksum = "11bd257a6541e141e42ca6d24ae26f7714887b47e89aa739099104c7e4d3b7fc" dependencies = [ "proc-macro2", "quote", @@ -8267,11 +8707,11 @@ dependencies = [ [[package]] name = "serde_with" -version = "3.7.0" +version = "3.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee80b0e361bbf88fd2f6e242ccd19cfda072cb0faa6ae694ecee08199938569a" +checksum = "2c85f8e96d1d6857f13768fcbd895fcb06225510022a2774ed8b5150581847b0" dependencies = [ - "base64 0.21.7", + "base64 0.22.0", "chrono", "hex", "indexmap 1.9.3", @@ -8285,9 +8725,9 @@ dependencies = [ [[package]] name = "serde_with_macros" -version = "3.7.0" +version = "3.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6561dc161a9224638a31d876ccdfefbc1df91d3f3a8342eddb35f055d48c7655" +checksum = "c8b3a576c4eb2924262d5951a3b737ccaf16c931e39a2810c36f9a7e25575557" dependencies = [ "darling 0.20.8", "proc-macro2", @@ -8297,23 +8737,23 @@ dependencies = [ [[package]] name = "serial_test" -version = "3.0.0" +version = "3.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "953ad9342b3aaca7cb43c45c097dd008d4907070394bd0751a0aa8817e5a018d" +checksum = "4b4b487fe2acf240a021cf57c6b2b4903b1e78ca0ecd862a71b71d2a51fed77d" dependencies = [ - "dashmap", "futures", - "lazy_static", "log", - "parking_lot 0.12.1", + "once_cell", + "parking_lot 0.12.2", + "scc", "serial_test_derive", ] [[package]] name = "serial_test_derive" -version = "3.0.0" +version = "3.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b93fb4adc70021ac1b47f7d45e8cc4169baaa7ea58483bc5b721d19a26202212" +checksum = "82fe9db325bcef1fbcde82e078a5cc4efdf787e96b3b9cf45b50b529f2083d67" dependencies = [ "proc-macro2", "quote", @@ -8435,9 +8875,9 @@ dependencies = [ [[package]] name = "signal-hook-registry" -version = "1.4.1" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8229b473baa5980ac72ef434c4415e70c4b5e71b423043adb4ba059f89c99a1" +checksum = "a9e9e0b4211b72e7b8b6e85c807d36c212bdb33ea8587f7569562a84df5465b1" dependencies = [ "libc", ] @@ -8569,6 +9009,15 @@ version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" +[[package]] +name = "spinning" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d4f0e86297cad2658d92a707320d87bf4e6ae1050287f51d19b67ef3f153a7b" +dependencies = [ + "lock_api", +] + [[package]] name = "spki" version = "0.7.3" @@ -8587,12 +9036,12 @@ checksum = "3b9b39299b249ad65f3b7e96443bad61c02ca5cd3589f46cb6d610a0fd6c0d6a" [[package]] name = "stability" -version = "0.1.1" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebd1b177894da2a2d9120208c3386066af06a488255caabc5de8ddca22dbc3ce" +checksum = "2ff9eaf853dec4c8802325d8b6d3dffa86cc707fd7a1a4cdbf416e13b061787a" dependencies = [ "quote", - "syn 1.0.109", + "syn 2.0.60", ] [[package]] @@ -8631,35 +9080,13 @@ version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" -[[package]] -name = "strum" -version = "0.25.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "290d54ea6f91c969195bdbcd7442c8c2a2ba87da8bf60a7ee86a235d4bc1e125" -dependencies = [ - "strum_macros 0.25.3", -] - [[package]] name = "strum" version = "0.26.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5d8cec3501a5194c432b2b7976db6b7d10ec95c253208b45f83f7136aa985e29" dependencies = [ - "strum_macros 0.26.2", -] - -[[package]] -name = "strum_macros" -version = "0.25.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23dc1fa9ac9c169a78ba62f0b841814b7abae11bdd047b9c58f893439e309ea0" -dependencies = [ - "heck 0.4.1", - "proc-macro2", - "quote", - "rustversion", - "syn 2.0.60", + "strum_macros", ] [[package]] @@ -8751,9 +9178,9 @@ dependencies = [ [[package]] name = "syn-solidity" -version = "0.7.0" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4497156948bd342b52038035a6fa514a89626e37af9d2c52a5e8d8ebcc7ee479" +checksum = "5aa0cefd02f532035d83cfec82647c6eb53140b0485220760e669f4bad489e36" dependencies = [ "paste", "proc-macro2", @@ -8812,7 +9239,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "85b77fafb263dd9d05cbeac119526425676db3784113aa9295c88498cbf8bff1" dependencies = [ "cfg-if", - "fastrand 2.0.2", + "fastrand 2.1.0", "rustix", "windows-sys 0.52.0", ] @@ -9037,6 +9464,12 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" +[[package]] +name = "to_method" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7c4ceeeca15c8384bbc3e011dbd8fccb7f068a440b752b7d9b32ceb0ca0e2e8" + [[package]] name = "tokio" version = "1.37.0" @@ -9048,7 +9481,7 @@ dependencies = [ "libc", "mio", "num_cpus", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "pin-project-lite", "signal-hook-registry", "socket2 0.5.6", @@ -9073,7 +9506,7 @@ version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" dependencies = [ - "rustls 0.21.11", + "rustls 0.21.12", "tokio", ] @@ -9169,7 +9602,7 @@ dependencies = [ "serde", "serde_spanned", "toml_datetime", - "winnow 0.6.6", + "winnow 0.6.7", ] [[package]] @@ -9235,16 +9668,6 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" -[[package]] -name = "trace-transaction-cli" -version = "0.0.0" -dependencies = [ - "clap", - "futures-util", - "reth", - "reth-node-ethereum", -] - [[package]] name = "tracing" version = "0.1.40" @@ -9458,7 +9881,7 @@ dependencies = [ "ipconfig", "lru-cache", "once_cell", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "rand 0.8.5", "resolv-conf", "smallvec", @@ -9489,6 +9912,16 @@ dependencies = [ "toml", ] +[[package]] +name = "txpool-tracing" +version = "0.0.0" +dependencies = [ + "clap", + "futures-util", + "reth", + "reth-node-ethereum", +] + [[package]] name = "typenum" version = "1.17.0" @@ -9558,9 +9991,9 @@ checksum = "d4c87d22b6e3f4a18d4d40ef354e97c90fcb14dd91d7dc0aa9d8a1172ebf7202" [[package]] name = "unicode-width" -version = "0.1.11" +version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e51733f11c9c4f72aa0c160008246859e340b00807569a0da0e7a1079b27ba85" +checksum = "68f5e5f3158ecfd4b8ff6fe086db7c8467a2dfdac97fe420f2b7c4aa97af66d6" [[package]] name = "universal-hash" @@ -9808,12 +10241,6 @@ dependencies = [ "wasm-bindgen", ] -[[package]] -name = "webpki-roots" -version = "0.25.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f20c57d8d7db6d3b86154206ae5d8fba62dd39573114de97c2cb0578251f8e1" - [[package]] name = "webpki-roots" version = "0.26.1" @@ -9859,11 +10286,11 @@ checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" [[package]] name = "winapi-util" -version = "0.1.6" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f29e6f9198ba0d26b4c9f07dbe6f9ed633e1f3d5b8b414090084349e46a52596" +checksum = "4d4cc384e1e73b93bafa6fb4f1df8c41695c8a91cf9c4c64358067d15a7b6c6b" dependencies = [ - "winapi", + "windows-sys 0.52.0", ] [[package]] @@ -10041,9 +10468,9 @@ dependencies = [ [[package]] name = "winnow" -version = "0.6.6" +version = "0.6.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0c976aaaa0e1f90dbb21e9587cdaf1d9679a1cde8875c0d6bd83ab96a208352" +checksum = "14b9415ee827af173ebb3f15f9083df5a122eb93572ec28741fb153356ea2578" dependencies = [ "memchr", ] diff --git a/Cargo.toml b/Cargo.toml index 8cf53ef55..fe219f51b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -6,8 +6,9 @@ members = [ "crates/config/", "crates/consensus/auto-seal/", "crates/consensus/beacon/", - "crates/consensus/beacon-core/", + "crates/ethereum/consensus/", "crates/consensus/common/", + "crates/consensus/consensus/", "crates/ethereum-forks/", "crates/e2e-test-utils/", "crates/etl/", @@ -28,6 +29,7 @@ members = [ "crates/net/nat/", "crates/net/network/", "crates/net/network-api/", + "crates/net/types/", "crates/payload/basic/", "crates/payload/builder/", "crates/payload/ethereum/", @@ -47,9 +49,11 @@ members = [ "crates/rpc/rpc-types-compat/", "crates/engine-primitives/", "crates/ethereum/engine-primitives/", - "crates/node-ethereum/", - "crates/node-builder/", + "crates/ethereum/node", + "crates/node/builder/", + "crates/optimism/consensus", "crates/optimism/node/", + "crates/optimism/evm/", "crates/node-core/", "crates/node/api/", "crates/stages/", @@ -68,23 +72,25 @@ members = [ "crates/transaction-pool/", "crates/trie/", "crates/trie-parallel/", - "examples/", - "examples/additional-rpc-namespace-in-cli/", + "examples/node-custom-rpc/", "examples/beacon-api-sse/", - "examples/cli-extension-event-hooks/", + "examples/node-event-hooks/", "examples/custom-evm/", - "examples/custom-node/", + "examples/custom-engine-types/", "examples/custom-node-components/", "examples/custom-dev-node/", "examples/custom-payload-builder/", "examples/manual-p2p/", + "examples/network/", + "examples/network-txpool/", "examples/rpc-db/", - "examples/trace-transaction-cli/", + "examples/txpool-tracing/", "examples/polygon-p2p/", "examples/custom-inspector/", - "examples/exex/minimal/", - "examples/exex/op-bridge/", + "examples/exex/*", + "examples/db-access", "testing/ef-tests/", + "testing/testing-utils", ] default-members = ["bin/reth"] @@ -159,7 +165,7 @@ unnecessary_struct_initialization = "allow" use_self = "allow" [workspace.package] -version = "0.2.0-beta.6" +version = "0.2.0-beta.7" edition = "2021" rust-version = "1.76" license = "MIT OR Apache-2.0" @@ -202,11 +208,12 @@ reth = { path = "bin/reth" } reth-auto-seal-consensus = { path = "crates/consensus/auto-seal" } reth-basic-payload-builder = { path = "crates/payload/basic" } reth-beacon-consensus = { path = "crates/consensus/beacon" } -reth-beacon-consensus-core = { path = "crates/consensus/beacon-core" } +reth-ethereum-consensus = { path = "crates/ethereum/consensus" } reth-blockchain-tree = { path = "crates/blockchain-tree" } reth-cli-runner = { path = "crates/cli/runner" } reth-codecs = { path = "crates/storage/codecs" } reth-config = { path = "crates/config" } +reth-consensus = { path = "crates/consensus/consensus" } reth-consensus-common = { path = "crates/consensus/common" } reth-db = { path = "crates/storage/db" } reth-discv4 = { path = "crates/net/discv4" } @@ -215,9 +222,10 @@ reth-dns-discovery = { path = "crates/net/dns" } reth-e2e-test-utils = { path = "crates/e2e-test-utils" } reth-engine-primitives = { path = "crates/engine-primitives" } reth-ethereum-engine-primitives = { path = "crates/ethereum/engine-primitives" } -reth-node-builder = { path = "crates/node-builder" } -reth-node-ethereum = { path = "crates/node-ethereum" } +reth-node-builder = { path = "crates/node/builder" } +reth-node-ethereum = { path = "crates/ethereum/node" } reth-node-optimism = { path = "crates/optimism/node" } +reth-evm-optimism = { path = "crates/optimism/evm" } reth-node-core = { path = "crates/node-core" } reth-node-api = { path = "crates/node/api" } reth-downloaders = { path = "crates/net/downloaders" } @@ -241,6 +249,7 @@ reth-net-common = { path = "crates/net/common" } reth-net-nat = { path = "crates/net/nat" } reth-network = { path = "crates/net/network" } reth-network-api = { path = "crates/net/network-api" } +reth-network-types = { path = "crates/net/types" } reth-nippy-jar = { path = "crates/storage/nippy-jar" } reth-payload-builder = { path = "crates/payload/builder" } reth-payload-validator = { path = "crates/payload/validator" } @@ -264,39 +273,37 @@ reth-tracing = { path = "crates/tracing" } reth-transaction-pool = { path = "crates/transaction-pool" } reth-trie = { path = "crates/trie" } reth-trie-parallel = { path = "crates/trie-parallel" } +reth-optimism-consensus = { path = "crates/optimism/consensus" } reth-node-events = { path = "crates/node/events" } +reth-testing-utils = { path = "testing/testing-utils" } # revm -revm = { version = "8.0.0", features = [ - "std", - "secp256k1", -], default-features = false } -revm-primitives = { version = "3.1.0", features = [ - "std", -], default-features = false } -revm-inspectors = { git = "https://github.com/paradigmxyz/evm-inspectors", rev = "dc614ee" } +revm = { version = "8.0.0", features = ["std", "secp256k1"], default-features = false } +revm-primitives = { version = "3.1.0", features = ["std"], default-features = false } +revm-inspectors = { git = "https://github.com/paradigmxyz/evm-inspectors", rev = "7168ac5" } # eth alloy-chains = "0.1.15" -alloy-primitives = "0.7.0" -alloy-dyn-abi = "0.7.0" -alloy-sol-types = "0.7.0" +alloy-primitives = "0.7.2" +alloy-dyn-abi = "0.7.2" +alloy-sol-types = "0.7.2" alloy-rlp = "0.3.4" alloy-trie = "0.3.1" -alloy-rpc-types = { git = "https://github.com/alloy-rs/alloy", rev = "39b8695" } -alloy-rpc-types-anvil = { git = "https://github.com/alloy-rs/alloy", rev = "39b8695" } -alloy-rpc-types-trace = { git = "https://github.com/alloy-rs/alloy", rev = "39b8695" } -alloy-rpc-types-engine = { git = "https://github.com/alloy-rs/alloy", rev = "39b8695" } -alloy-genesis = { git = "https://github.com/alloy-rs/alloy", rev = "39b8695" } -alloy-node-bindings = { git = "https://github.com/alloy-rs/alloy", rev = "39b8695" } -alloy-provider = { git = "https://github.com/alloy-rs/alloy", rev = "39b8695", default-features = false, features = [ +alloy-rpc-types = { git = "https://github.com/alloy-rs/alloy", rev = "77c1240" } +alloy-rpc-types-anvil = { git = "https://github.com/alloy-rs/alloy", rev = "77c1240" } +alloy-rpc-types-trace = { git = "https://github.com/alloy-rs/alloy", rev = "77c1240" } +alloy-rpc-types-engine = { git = "https://github.com/alloy-rs/alloy", rev = "77c1240" } +alloy-rpc-types-beacon = { git = "https://github.com/alloy-rs/alloy", rev = "77c1240" } +alloy-genesis = { git = "https://github.com/alloy-rs/alloy", rev = "77c1240" } +alloy-node-bindings = { git = "https://github.com/alloy-rs/alloy", rev = "77c1240" } +alloy-provider = { git = "https://github.com/alloy-rs/alloy", rev = "77c1240", default-features = false, features = [ "reqwest", ] } -alloy-eips = { git = "https://github.com/alloy-rs/alloy", default-features = false, rev = "39b8695" } -alloy-signer = { git = "https://github.com/alloy-rs/alloy", rev = "39b8695" } -alloy-signer-wallet = { git = "https://github.com/alloy-rs/alloy", rev = "39b8695" } -alloy-network = { git = "https://github.com/alloy-rs/alloy", rev = "39b8695" } -alloy-consensus = { git = "https://github.com/alloy-rs/alloy", rev = "39b8695" } +alloy-eips = { git = "https://github.com/alloy-rs/alloy", default-features = false, rev = "77c1240" } +alloy-signer = { git = "https://github.com/alloy-rs/alloy", rev = "77c1240" } +alloy-signer-wallet = { git = "https://github.com/alloy-rs/alloy", rev = "77c1240" } +alloy-network = { git = "https://github.com/alloy-rs/alloy", rev = "77c1240" } +alloy-consensus = { git = "https://github.com/alloy-rs/alloy", rev = "77c1240" } # misc auto_impl = "1" @@ -304,7 +311,9 @@ aquamarine = "0.5" bytes = "1.5" bitflags = "2.4" clap = "4" +dashmap = "5.5" derive_more = "0.99.17" +fdlimit = "0.3.0" eyre = "0.6" tracing = "0.1.0" tracing-appender = "0.2" @@ -315,6 +324,7 @@ serde_with = "3.3.0" humantime = "2.1" humantime-serde = "1.1" rand = "0.8.5" +rustc-hash = "1.1.0" schnellru = "0.2" strum = "0.26" rayon = "1.7" @@ -323,7 +333,6 @@ parking_lot = "0.12" # Needed for `metrics-macro` to resolve the crate using `::metrics` notation metrics = "0.21.1" modular-bitfield = "0.11.2" -hex-literal = "0.4" once_cell = "1.17" syn = "2.0" nybbles = "0.2.1" @@ -331,6 +340,7 @@ smallvec = "1" dyn-clone = "1.0.17" sha2 = { version = "0.10", default-features = false } paste = "1.0" +url = "2.3" # proc-macros proc-macro2 = "1.0" @@ -348,13 +358,14 @@ futures = "0.3.26" pin-project = "1.0.12" futures-util = "0.3.25" hyper = "0.14.25" +reqwest = { version = "0.12", default-features = false } tower = "0.4" tower-http = "0.4" http = "0.2.8" http-body = "0.4.5" # p2p -discv5 = { git = "https://github.com/sigp/discv5", rev = "04ac004" } +discv5 = "0.6.0" igd-next = "0.14.3" # rpc @@ -363,11 +374,12 @@ jsonrpsee-core = "0.22" jsonrpsee-types = "0.22" # crypto -secp256k1 = { version = "0.27.0", default-features = false, features = [ +secp256k1 = { version = "0.28", default-features = false, features = [ "global-context", "recovery", ] } -enr = { version = "=0.10.0", default-features = false, features = ["k256"] } +# TODO: Remove `k256` feature: https://github.com/sigp/enr/pull/74 +enr = { version = "0.12.0", default-features = false, features = ["k256", "rust-secp256k1"] } # for eip-4844 c-kzg = "1.0.0" diff --git a/DockerfileOp.cross b/DockerfileOp.cross new file mode 100644 index 000000000..47606a828 --- /dev/null +++ b/DockerfileOp.cross @@ -0,0 +1,15 @@ +# This image is meant to enable cross-architecture builds. +# It assumes the reth binary has already been compiled for `$TARGETPLATFORM` and is +# locatable in `./dist/bin/$TARGETARCH` +FROM --platform=$TARGETPLATFORM ubuntu:22.04 + +LABEL org.opencontainers.image.source=https://github.com/paradigmxyz/reth +LABEL org.opencontainers.image.licenses="MIT OR Apache-2.0" + +# Filled by docker buildx +ARG TARGETARCH + +COPY ./dist/bin/$TARGETARCH/op-reth /usr/local/bin/op-reth + +EXPOSE 30303 30303/udp 9001 8545 8546 +ENTRYPOINT ["/usr/local/bin/op-reth"] diff --git a/Makefile b/Makefile index c8adf4ff9..ada2149b8 100644 --- a/Makefile +++ b/Makefile @@ -95,6 +95,7 @@ op-build-aarch64-unknown-linux-gnu: export JEMALLOC_SYS_WITH_LG_PAGE=16 # No jemalloc on Windows build-x86_64-pc-windows-gnu: FEATURES := $(filter-out jemalloc jemalloc-prof,$(FEATURES)) +op-build-x86_64-pc-windows-gnu: FEATURES := $(filter-out jemalloc jemalloc-prof,$(FEATURES)) # Note: The additional rustc compiler flags are for intrinsics needed by MDBX. # See: https://github.com/cross-rs/cross/wiki/FAQ#undefined-reference-with-build-std @@ -116,6 +117,10 @@ build-x86_64-apple-darwin: $(MAKE) build-native-x86_64-apple-darwin build-aarch64-apple-darwin: $(MAKE) build-native-aarch64-apple-darwin +op-build-x86_64-apple-darwin: + $(MAKE) op-build-native-x86_64-apple-darwin +op-build-aarch64-apple-darwin: + $(MAKE) op-build-native-aarch64-apple-darwin # Create a `.tar.gz` containing a binary for a specific target. define tarball_release_binary @@ -227,6 +232,50 @@ define docker_build_push --push endef +##@ Optimism docker + +# Note: This requires a buildx builder with emulation support. For example: +# +# `docker run --privileged --rm tonistiigi/binfmt --install amd64,arm64` +# `docker buildx create --use --driver docker-container --name cross-builder` +.PHONY: op-docker-build-push +op-docker-build-push: ## Build and push a cross-arch Docker image tagged with the latest git tag. + $(call op_docker_build_push,$(GIT_TAG),$(GIT_TAG)) + +# Note: This requires a buildx builder with emulation support. For example: +# +# `docker run --privileged --rm tonistiigi/binfmt --install amd64,arm64` +# `docker buildx create --use --driver docker-container --name cross-builder` +.PHONY: op-docker-build-push-latest +op-docker-build-push-latest: ## Build and push a cross-arch Docker image tagged with the latest git tag and `latest`. + $(call op_docker_build_push,$(GIT_TAG),latest) + +# Note: This requires a buildx builder with emulation support. For example: +# +# `docker run --privileged --rm tonistiigi/binfmt --install amd64,arm64` +# `docker buildx create --use --name cross-builder` +.PHONY: op-docker-build-push-nightly +op-docker-build-push-nightly: ## Build and push cross-arch Docker image tagged with the latest git tag with a `-nightly` suffix, and `latest-nightly`. + $(call op_docker_build_push,$(GIT_TAG)-nightly,latest-nightly) + +# Create a cross-arch Docker image with the given tags and push it +define op_docker_build_push + $(MAKE) op-build-x86_64-unknown-linux-gnu + mkdir -p $(BIN_DIR)/amd64 + cp $(BUILD_PATH)/x86_64-unknown-linux-gnu/$(PROFILE)/op-reth $(BIN_DIR)/amd64/op-reth + + $(MAKE) op-build-aarch64-unknown-linux-gnu + mkdir -p $(BIN_DIR)/arm64 + cp $(BUILD_PATH)/aarch64-unknown-linux-gnu/$(PROFILE)/op-reth $(BIN_DIR)/arm64/op-reth + + docker buildx build --file ./DockerfileOp.cross . \ + --platform linux/amd64,linux/arm64 \ + --tag $(DOCKER_IMAGE_NAME):$(1) \ + --tag $(DOCKER_IMAGE_NAME):$(2) \ + --provenance=false \ + --push +endef + ##@ Other .PHONY: clean @@ -263,6 +312,10 @@ update-book-cli: ## Update book cli documentation. maxperf: ## Builds `reth` with the most aggressive optimisations. RUSTFLAGS="-C target-cpu=native" cargo build --profile maxperf --features jemalloc,asm-keccak +.PHONY: maxperf-op +maxperf-op: ## Builds `op-reth` with the most aggressive optimisations. + RUSTFLAGS="-C target-cpu=native" cargo build --profile maxperf --features jemalloc,asm-keccak,optimism --bin op-reth + .PHONY: maxperf-no-asm maxperf-no-asm: ## Builds `reth` with the most aggressive optimisations, minus the "asm-keccak" feature. RUSTFLAGS="-C target-cpu=native" cargo build --profile maxperf --features jemalloc @@ -303,11 +356,21 @@ lint-other-targets: --all-features \ -- -D warnings +lint-codespell: ensure-codespell + codespell + +ensure-codespell: + @if ! command -v codespell &> /dev/null; then \ + echo "codespell not found. Please install it by running the command `pip install codespell` or refer to the following link for more information: https://github.com/codespell-project/codespell" \ + exit 1; \ + fi + lint: make fmt && \ make lint-reth && \ make lint-op-reth && \ - make lint-other-targets + make lint-other-targets && \ + make lint-codespell fix-lint-reth: cargo +nightly clippy \ @@ -404,8 +467,11 @@ test: make test-doc && \ make test-other-targets +cfg-check: + cargo +nightly -Zcheck-cfg c + pr: - make fmt && \ + make cfg-check && \ make lint && \ make docs && \ make test diff --git a/README.md b/README.md index 3f5e434ee..47d833712 100644 --- a/README.md +++ b/README.md @@ -45,7 +45,7 @@ We actively recommend professional node operators to switch to Reth in productio While we are aware of parties running Reth staking nodes in production, we do *not* encourage usage in production staking environments by non-professionals until our audits are done, and the 1.0 version of Reth is released, but we are available to support without warranty or liability. More historical context below: -* We are releasing 1.0 "production-ready" stable Reth once our Reth & Revm audits are done. ETA ~April 2024. +* We are releasing 1.0 "production-ready" stable Reth once our Reth & Revm audits are done. ETA ~May 2024. * Reth is currently undergoing an audit with [Sigma Prime](https://sigmaprime.io/), the developers of [Lighthouse](https://github.com/sigp/lighthouse), the Rust Consensus Layer implementation. * Revm (the EVM used in Reth) is undergoing an audit with [Guido Vranken](https://twitter.com/guidovranken) (#1 [Ethereum Bug Bounty](https://ethereum.org/en/bug-bounty)). * We are releasing [beta](https://github.com/paradigmxyz/reth/releases/tag/v0.2.0-beta.1) on Monday March 4th 2024, our first breaking change to the database model, providing faster query speed, smaller database footprint, and allowing "history" to be mounted on separate drives. diff --git a/bin/reth/Cargo.toml b/bin/reth/Cargo.toml index ea1ee87f0..c1ed8981a 100644 --- a/bin/reth/Cargo.toml +++ b/bin/reth/Cargo.toml @@ -19,16 +19,15 @@ reth-primitives = { workspace = true, features = ["arbitrary", "clap"] } reth-db = { workspace = true, features = ["mdbx"] } reth-exex.workspace = true reth-provider = { workspace = true } +reth-evm.workspace = true reth-revm.workspace = true reth-stages.workspace = true reth-interfaces = { workspace = true, features = ["clap"] } reth-transaction-pool.workspace = true reth-beacon-consensus.workspace = true -reth-auto-seal-consensus.workspace = true reth-cli-runner.workspace = true reth-consensus-common.workspace = true reth-blockchain-tree.workspace = true -reth-rpc-engine-api.workspace = true reth-rpc-builder.workspace = true reth-rpc.workspace = true reth-rpc-types.workspace = true @@ -44,8 +43,7 @@ reth-payload-builder.workspace = true reth-payload-validator.workspace = true reth-basic-payload-builder.workspace = true reth-discv4.workspace = true -reth-prune.workspace = true -reth-static-file = { workspace = true, features = ["clap"] } +reth-static-file = { workspace = true } reth-trie = { workspace = true, features = ["metrics"] } reth-nippy-jar.workspace = true reth-node-api.workspace = true @@ -56,6 +54,7 @@ reth-node-optimism = { workspace = true, optional = true, features = [ reth-node-core.workspace = true reth-node-builder.workspace = true reth-node-events.workspace = true +reth-consensus.workspace = true # crypto alloy-rlp.workspace = true @@ -64,7 +63,7 @@ alloy-rlp.workspace = true tracing.workspace = true # io -fdlimit = "0.3.0" +fdlimit.workspace = true serde.workspace = true serde_json.workspace = true confy.workspace = true @@ -80,7 +79,9 @@ rand.workspace = true # tui comfy-table = "7.0" crossterm = "0.27.0" -ratatui = "0.25.0" +ratatui = { version = "0.26", default-features = false, features = [ + "crossterm", +] } human_bytes = "0.4.1" # async @@ -131,13 +132,10 @@ min-trace-logs = ["tracing/release_max_level_trace"] optimism = [ "reth-primitives/optimism", - "reth-revm/optimism", "reth-interfaces/optimism", "reth-rpc/optimism", - "reth-rpc-engine-api/optimism", "reth-provider/optimism", "reth-beacon-consensus/optimism", - "reth-auto-seal-consensus/optimism", "reth-blockchain-tree/optimism", "dep:reth-node-optimism", "reth-node-core/optimism", diff --git a/bin/reth/src/cli/mod.rs b/bin/reth/src/cli/mod.rs index d511d7182..deece5b62 100644 --- a/bin/reth/src/cli/mod.rs +++ b/bin/reth/src/cli/mod.rs @@ -1,20 +1,23 @@ //! CLI definition and entrypoint to executable +#[cfg(feature = "optimism")] +use crate::commands::import_op; use crate::{ args::{ utils::{chain_help, genesis_value_parser, SUPPORTED_CHAINS}, LogArgs, }, commands::{ - config_cmd, db, debug_cmd, dump_genesis, import, init_cmd, node, node::NoArgs, p2p, - recover, stage, test_vectors, + config_cmd, db, debug_cmd, dump_genesis, import, import_receipts, init_cmd, init_state, + node::{self, NoArgs}, + p2p, recover, stage, test_vectors, }, version::{LONG_VERSION, SHORT_VERSION}, }; use clap::{value_parser, Parser, Subcommand}; use reth_cli_runner::CliRunner; use reth_db::DatabaseEnv; -use reth_node_builder::{InitState, WithLaunchContext}; +use reth_node_builder::{NodeBuilder, WithLaunchContext}; use reth_primitives::ChainSpec; use reth_tracing::FileWorkerGuard; use std::{ffi::OsString, fmt, future::Future, sync::Arc}; @@ -130,7 +133,7 @@ impl Cli { /// ```` pub fn run(mut self, launcher: L) -> eyre::Result<()> where - L: FnOnce(WithLaunchContext, InitState>, Ext) -> Fut, + L: FnOnce(WithLaunchContext>>, Ext) -> Fut, Fut: Future>, { // add network name to logs dir @@ -145,7 +148,13 @@ impl Cli { runner.run_command_until_exit(|ctx| command.execute(ctx, launcher)) } Commands::Init(command) => runner.run_blocking_until_ctrl_c(command.execute()), + Commands::InitState(command) => runner.run_blocking_until_ctrl_c(command.execute()), Commands::Import(command) => runner.run_blocking_until_ctrl_c(command.execute()), + Commands::ImportReceipts(command) => { + runner.run_blocking_until_ctrl_c(command.execute()) + } + #[cfg(feature = "optimism")] + Commands::ImportOp(command) => runner.run_blocking_until_ctrl_c(command.execute()), Commands::DumpGenesis(command) => runner.run_blocking_until_ctrl_c(command.execute()), Commands::Db(command) => runner.run_blocking_until_ctrl_c(command.execute()), Commands::Stage(command) => runner.run_command_until_exit(|ctx| command.execute(ctx)), @@ -176,9 +185,19 @@ pub enum Commands { /// Initialize the database from a genesis file. #[command(name = "init")] Init(init_cmd::InitCommand), + /// Initialize the database from a state dump file. + #[command(name = "init-state")] + InitState(init_state::InitStateCommand), /// This syncs RLP encoded blocks from a file. #[command(name = "import")] Import(import::ImportCommand), + /// This imports RLP encoded receipts from a file. + #[command(name = "import-receipts")] + ImportReceipts(import_receipts::ImportReceiptsCommand), + /// This syncs RLP encoded OP blocks below Bedrock from a file, without executing. + #[cfg(feature = "optimism")] + #[command(name = "import-op")] + ImportOp(import_op::ImportOpCommand), /// Dumps genesis block JSON configuration to stdout. DumpGenesis(dump_genesis::DumpGenesisCommand), /// Database debugging utilities diff --git a/bin/reth/src/commands/db/clear.rs b/bin/reth/src/commands/db/clear.rs index a7c32cac1..f985be8ab 100644 --- a/bin/reth/src/commands/db/clear.rs +++ b/bin/reth/src/commands/db/clear.rs @@ -7,7 +7,7 @@ use reth_db::{ TableViewer, Tables, }; use reth_primitives::{static_file::find_fixed_range, StaticFileSegment}; -use reth_provider::ProviderFactory; +use reth_provider::{ProviderFactory, StaticFileProviderFactory}; /// The arguments for the `reth db clear` command #[derive(Parser, Debug)] diff --git a/bin/reth/src/commands/db/get.rs b/bin/reth/src/commands/db/get.rs index 958ced09f..80e3ae393 100644 --- a/bin/reth/src/commands/db/get.rs +++ b/bin/reth/src/commands/db/get.rs @@ -7,6 +7,7 @@ use reth_db::{ tables, RawKey, RawTable, Receipts, TableViewer, Transactions, }; use reth_primitives::{BlockHash, Header, StaticFileSegment}; +use reth_provider::StaticFileProviderFactory; use tracing::error; /// The arguments for the `reth db get` command diff --git a/bin/reth/src/commands/db/mod.rs b/bin/reth/src/commands/db/mod.rs index f28f8375f..6eedabcc7 100644 --- a/bin/reth/src/commands/db/mod.rs +++ b/bin/reth/src/commands/db/mod.rs @@ -108,9 +108,9 @@ impl Command { pub async fn execute(self) -> eyre::Result<()> { // add network name to data dir let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain); - let db_path = data_dir.db_path(); + let db_path = data_dir.db(); let db_args = self.db.database_args(); - let static_files_path = data_dir.static_files_path(); + let static_files_path = data_dir.static_files(); match self.command { // TODO: We'll need to add this on the DB trait. @@ -159,7 +159,7 @@ impl Command { let provider_factory = ProviderFactory::new(db, self.chain.clone(), static_files_path.clone())?; - let mut tool = DbTool::new(provider_factory, self.chain.clone())?; + let tool = DbTool::new(provider_factory, self.chain.clone())?; tool.drop(db_path, static_files_path)?; } Subcommands::Clear(command) => { diff --git a/bin/reth/src/commands/db/static_files/mod.rs b/bin/reth/src/commands/db/static_files/mod.rs index 9391db76c..8f5930e10 100644 --- a/bin/reth/src/commands/db/static_files/mod.rs +++ b/bin/reth/src/commands/db/static_files/mod.rs @@ -96,11 +96,10 @@ impl Command { }); let db = open_db_read_only( - data_dir.db_path().as_path(), + data_dir.db().as_path(), db_args.with_max_read_transaction_duration(Some(MaxReadTransactionDuration::Unbounded)), )?; - let provider_factory = - Arc::new(ProviderFactory::new(db, chain, data_dir.static_files_path())?); + let provider_factory = Arc::new(ProviderFactory::new(db, chain, data_dir.static_files())?); { if !self.only_bench { diff --git a/bin/reth/src/commands/db/stats.rs b/bin/reth/src/commands/db/stats.rs index 5ffc136dd..b47e7980b 100644 --- a/bin/reth/src/commands/db/stats.rs +++ b/bin/reth/src/commands/db/stats.rs @@ -25,11 +25,11 @@ use tracing::info; pub struct Command { /// Show only the total size for static files. #[arg(long, default_value_t = false)] - only_total_size: bool, + detailed_sizes: bool, - /// Show only the summary per static file segment. + /// Show detailed information per static file segment. #[arg(long, default_value_t = false)] - summary: bool, + detailed_segments: bool, /// Show a checksum of each table in the database. /// @@ -152,7 +152,7 @@ impl Command { let mut table = ComfyTable::new(); table.load_preset(comfy_table::presets::ASCII_MARKDOWN); - if !self.only_total_size { + if self.detailed_sizes { table.set_header([ "Segment", "Block Range", @@ -174,8 +174,8 @@ impl Command { ]); } - let static_files = iter_static_files(data_dir.static_files_path())?; - let static_file_provider = StaticFileProvider::new(data_dir.static_files_path())?; + let static_files = iter_static_files(data_dir.static_files())?; + let static_file_provider = StaticFileProvider::new(data_dir.static_files())?; let mut total_data_size = 0; let mut total_index_size = 0; @@ -216,18 +216,7 @@ impl Command { .map(|metadata| metadata.len()) .unwrap_or_default(); - if self.summary { - if segment_columns > 0 { - assert_eq!(segment_columns, columns); - } else { - segment_columns = columns; - } - segment_rows += rows; - segment_data_size += data_size; - segment_index_size += index_size; - segment_offsets_size += offsets_size; - segment_config_size += config_size; - } else { + if self.detailed_segments { let mut row = Row::new(); row.add_cell(Cell::new(segment)) .add_cell(Cell::new(format!("{block_range}"))) @@ -235,7 +224,7 @@ impl Command { tx_range.map_or("N/A".to_string(), |tx_range| format!("{tx_range}")), )) .add_cell(Cell::new(format!("{columns} x {rows}"))); - if !self.only_total_size { + if self.detailed_sizes { row.add_cell(Cell::new(human_bytes(data_size as f64))) .add_cell(Cell::new(human_bytes(index_size as f64))) .add_cell(Cell::new(human_bytes(offsets_size as f64))) @@ -245,6 +234,17 @@ impl Command { (data_size + index_size + offsets_size + config_size) as f64, ))); table.add_row(row); + } else { + if segment_columns > 0 { + assert_eq!(segment_columns, columns); + } else { + segment_columns = columns; + } + segment_rows += rows; + segment_data_size += data_size; + segment_index_size += index_size; + segment_offsets_size += offsets_size; + segment_config_size += config_size; } total_data_size += data_size; @@ -253,7 +253,7 @@ impl Command { total_config_size += config_size; } - if self.summary { + if !self.detailed_segments { let first_ranges = ranges.first().expect("not empty list of ranges"); let last_ranges = ranges.last().expect("not empty list of ranges"); @@ -271,7 +271,7 @@ impl Command { tx_range.map_or("N/A".to_string(), |tx_range| format!("{tx_range}")), )) .add_cell(Cell::new(format!("{segment_columns} x {segment_rows}"))); - if !self.only_total_size { + if self.detailed_sizes { row.add_cell(Cell::new(human_bytes(segment_data_size as f64))) .add_cell(Cell::new(human_bytes(segment_index_size as f64))) .add_cell(Cell::new(human_bytes(segment_offsets_size as f64))) @@ -299,7 +299,7 @@ impl Command { .add_cell(Cell::new("")) .add_cell(Cell::new("")) .add_cell(Cell::new("")); - if !self.only_total_size { + if self.detailed_sizes { row.add_cell(Cell::new(human_bytes(total_data_size as f64))) .add_cell(Cell::new(human_bytes(total_index_size as f64))) .add_cell(Cell::new(human_bytes(total_offsets_size as f64))) diff --git a/bin/reth/src/commands/debug_cmd/build_block.rs b/bin/reth/src/commands/debug_cmd/build_block.rs index 566198ec8..72cc9e1fa 100644 --- a/bin/reth/src/commands/debug_cmd/build_block.rs +++ b/bin/reth/src/commands/debug_cmd/build_block.rs @@ -6,6 +6,7 @@ use crate::{ DatabaseArgs, }, dirs::{DataDirPath, MaybePlatformPath}, + macros::block_executor, }; use alloy_rlp::Decodable; use clap::Parser; @@ -13,16 +14,16 @@ use eyre::Context; use reth_basic_payload_builder::{ BuildArguments, BuildOutcome, Cancelled, PayloadBuilder, PayloadConfig, }; -use reth_beacon_consensus::BeaconConsensus; +use reth_beacon_consensus::EthBeaconConsensus; use reth_blockchain_tree::{ BlockchainTree, BlockchainTreeConfig, ShareableBlockchainTree, TreeExternals, }; use reth_cli_runner::CliContext; +use reth_consensus::Consensus; use reth_db::{init_db, DatabaseEnv}; -use reth_interfaces::{consensus::Consensus, RethResult}; +use reth_evm::execute::{BlockExecutionOutput, BlockExecutorProvider, Executor}; +use reth_interfaces::RethResult; use reth_node_api::PayloadBuilderAttributes; -#[cfg(not(feature = "optimism"))] -use reth_node_ethereum::EthEvmConfig; use reth_payload_builder::database::CachedReads; use reth_primitives::{ constants::eip4844::{LoadKzgSettingsError, MAINNET_KZG_TRUSTED_SETUP}, @@ -30,13 +31,14 @@ use reth_primitives::{ revm_primitives::KzgSettings, stage::StageId, Address, BlobTransaction, BlobTransactionSidecar, Bytes, ChainSpec, PooledTransactionsElement, - SealedBlock, SealedBlockWithSenders, Transaction, TransactionSigned, TxEip4844, B256, U256, + Receipts, SealedBlock, SealedBlockWithSenders, Transaction, TransactionSigned, TxEip4844, B256, + U256, }; use reth_provider::{ - providers::BlockchainProvider, BlockHashReader, BlockReader, BlockWriter, ExecutorFactory, - ProviderFactory, StageCheckpointReader, StateProviderFactory, + providers::BlockchainProvider, BlockHashReader, BlockReader, BlockWriter, + BundleStateWithReceipts, ProviderFactory, StageCheckpointReader, StateProviderFactory, }; -use reth_revm::EvmProcessorFactory; +use reth_revm::database::StateProviderDatabase; #[cfg(feature = "optimism")] use reth_rpc_types::engine::OptimismPayloadAttributes; use reth_rpc_types::engine::{BlobsBundleV1, PayloadAttributes}; @@ -113,7 +115,7 @@ impl Command { let factory = ProviderFactory::new( db, self.chain.clone(), - self.datadir.unwrap_or_chain_default(self.chain.chain).static_files_path(), + self.datadir.unwrap_or_chain_default(self.chain.chain).static_files(), )?; let provider = factory.provider()?; @@ -147,7 +149,7 @@ impl Command { pub async fn execute(self, ctx: CliContext) -> eyre::Result<()> { // add network name to data dir let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain); - let db_path = data_dir.db_path(); + let db_path = data_dir.db(); fs::create_dir_all(&db_path)?; // initialize the database @@ -155,25 +157,19 @@ impl Command { let provider_factory = ProviderFactory::new( Arc::clone(&db), Arc::clone(&self.chain), - data_dir.static_files_path(), + data_dir.static_files(), )?; - let consensus: Arc = Arc::new(BeaconConsensus::new(Arc::clone(&self.chain))); + let consensus: Arc = + Arc::new(EthBeaconConsensus::new(Arc::clone(&self.chain))); - #[cfg(feature = "optimism")] - let evm_config = reth_node_optimism::OptimismEvmConfig::default(); - - #[cfg(not(feature = "optimism"))] - let evm_config = EthEvmConfig::default(); + let executor = block_executor!(self.chain.clone()); // configure blockchain tree - let tree_externals = TreeExternals::new( - provider_factory.clone(), - Arc::clone(&consensus), - EvmProcessorFactory::new(self.chain.clone(), evm_config), - ); + let tree_externals = + TreeExternals::new(provider_factory.clone(), Arc::clone(&consensus), executor); let tree = BlockchainTree::new(tree_externals, BlockchainTreeConfig::default(), None)?; - let blockchain_tree = ShareableBlockchainTree::new(tree); + let blockchain_tree = Arc::new(ShareableBlockchainTree::new(tree)); // fetch the best block from the database let best_block = @@ -218,7 +214,7 @@ impl Command { ))?; let sidecar: BlobTransactionSidecar = - blobs_bundle.pop_sidecar(blob_versioned_hashes.len()).into(); + blobs_bundle.pop_sidecar(blob_versioned_hashes.len()); // first construct the tx, calculating the length of the tx with sidecar before // insertion @@ -308,11 +304,16 @@ impl Command { let block_with_senders = SealedBlockWithSenders::new(block.clone(), senders).unwrap(); - let executor_factory = EvmProcessorFactory::new(self.chain.clone(), evm_config); - let mut executor = executor_factory.with_state(blockchain_db.latest()?); - executor - .execute_and_verify_receipt(&block_with_senders.clone().unseal(), U256::MAX)?; - let state = executor.take_output_state(); + let db = StateProviderDatabase::new(blockchain_db.latest()?); + let executor = block_executor!(self.chain.clone()).executor(db); + + let BlockExecutionOutput { state, receipts, .. } = + executor.execute((&block_with_senders.clone().unseal(), U256::MAX).into())?; + let state = BundleStateWithReceipts::new( + state, + Receipts::from_block_receipt(receipts), + block.number, + ); debug!(target: "reth::cli", ?state, "Executed block"); let hashed_state = state.hash_state_slow(); diff --git a/bin/reth/src/commands/debug_cmd/execution.rs b/bin/reth/src/commands/debug_cmd/execution.rs index 10f485a73..50e93dfbc 100644 --- a/bin/reth/src/commands/debug_cmd/execution.rs +++ b/bin/reth/src/commands/debug_cmd/execution.rs @@ -7,31 +7,32 @@ use crate::{ DatabaseArgs, NetworkArgs, }, dirs::{DataDirPath, MaybePlatformPath}, + macros::block_executor, utils::get_single_header, }; use clap::Parser; use futures::{stream::select as stream_select, StreamExt}; -use reth_beacon_consensus::BeaconConsensus; +use reth_beacon_consensus::EthBeaconConsensus; use reth_cli_runner::CliContext; use reth_config::{config::EtlConfig, Config}; +use reth_consensus::Consensus; use reth_db::{database::Database, init_db, DatabaseEnv}; use reth_downloaders::{ bodies::bodies::BodiesDownloaderBuilder, headers::reverse_headers::ReverseHeadersDownloaderBuilder, }; use reth_exex::ExExManagerHandle; -use reth_interfaces::{ - consensus::Consensus, - p2p::{bodies::client::BodiesClient, headers::client::HeadersClient}, -}; +use reth_interfaces::p2p::{bodies::client::BodiesClient, headers::client::HeadersClient}; use reth_network::{NetworkEvents, NetworkHandle}; use reth_network_api::NetworkInfo; use reth_node_core::init::init_genesis; -use reth_node_ethereum::EthEvmConfig; use reth_primitives::{ fs, stage::StageId, BlockHashOrNumber, BlockNumber, ChainSpec, PruneModes, B256, }; -use reth_provider::{BlockExecutionWriter, HeaderSyncMode, ProviderFactory, StageCheckpointReader}; +use reth_provider::{ + BlockExecutionWriter, HeaderSyncMode, ProviderFactory, StageCheckpointReader, + StaticFileProviderFactory, +}; use reth_stages::{ sets::DefaultStages, stages::{ExecutionStage, ExecutionStageThresholds, SenderRecoveryStage}, @@ -110,8 +111,7 @@ impl Command { let stage_conf = &config.stages; let (tip_tx, tip_rx) = watch::channel(B256::ZERO); - let factory = - reth_revm::EvmProcessorFactory::new(self.chain.clone(), EthEvmConfig::default()); + let executor = block_executor!(self.chain.clone()); let header_mode = HeaderSyncMode::Tip(tip_rx); let pipeline = Pipeline::builder() @@ -123,14 +123,14 @@ impl Command { Arc::clone(&consensus), header_downloader, body_downloader, - factory.clone(), + executor.clone(), stage_conf.etl.clone(), ) .set(SenderRecoveryStage { commit_threshold: stage_conf.sender_recovery.commit_threshold, }) .set(ExecutionStage::new( - factory, + executor, ExecutionStageThresholds { max_blocks: None, max_changes: None, @@ -172,7 +172,7 @@ impl Command { .build(ProviderFactory::new( db, self.chain.clone(), - self.datadir.unwrap_or_chain_default(self.chain.chain).static_files_path(), + self.datadir.unwrap_or_chain_default(self.chain.chain).static_files(), )?) .start_network() .await?; @@ -205,33 +205,34 @@ impl Command { let mut config = Config::default(); let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain); - let db_path = data_dir.db_path(); + let db_path = data_dir.db(); // Make sure ETL doesn't default to /tmp/, but to whatever datadir is set to if config.stages.etl.dir.is_none() { - config.stages.etl.dir = Some(EtlConfig::from_datadir(&data_dir.data_dir_path())); + config.stages.etl.dir = Some(EtlConfig::from_datadir(data_dir.data_dir())); } fs::create_dir_all(&db_path)?; let db = Arc::new(init_db(db_path, self.db.database_args())?); let provider_factory = - ProviderFactory::new(db.clone(), self.chain.clone(), data_dir.static_files_path())?; + ProviderFactory::new(db.clone(), self.chain.clone(), data_dir.static_files())?; debug!(target: "reth::cli", chain=%self.chain.chain, genesis=?self.chain.genesis_hash(), "Initializing genesis"); init_genesis(provider_factory.clone())?; - let consensus: Arc = Arc::new(BeaconConsensus::new(Arc::clone(&self.chain))); + let consensus: Arc = + Arc::new(EthBeaconConsensus::new(Arc::clone(&self.chain))); // Configure and build network let network_secret_path = - self.network.p2p_secret_key.clone().unwrap_or_else(|| data_dir.p2p_secret_path()); + self.network.p2p_secret_key.clone().unwrap_or_else(|| data_dir.p2p_secret()); let network = self .build_network( &config, ctx.task_executor.clone(), db.clone(), network_secret_path, - data_dir.known_peers_path(), + data_dir.known_peers(), ) .await?; diff --git a/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs b/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs index 3632f4cff..f51426015 100644 --- a/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs +++ b/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs @@ -7,6 +7,7 @@ use crate::{ DatabaseArgs, NetworkArgs, }, dirs::{DataDirPath, MaybePlatformPath}, + macros::block_executor, utils::{get_single_body, get_single_header}, }; use backon::{ConstantBuilder, Retryable}; @@ -14,15 +15,17 @@ use clap::Parser; use reth_cli_runner::CliContext; use reth_config::Config; use reth_db::{init_db, DatabaseEnv}; +use reth_evm::execute::{BlockExecutionOutput, BlockExecutorProvider, Executor}; use reth_interfaces::executor::BlockValidationError; use reth_network::NetworkHandle; use reth_network_api::NetworkInfo; -use reth_node_ethereum::EthEvmConfig; -use reth_primitives::{fs, stage::StageId, BlockHashOrNumber, ChainSpec}; +use reth_primitives::{fs, stage::StageId, BlockHashOrNumber, ChainSpec, Receipts}; use reth_provider::{ - AccountExtReader, ExecutorFactory, HashingWriter, HeaderProvider, LatestStateProviderRef, - OriginalValuesKnown, ProviderFactory, StageCheckpointReader, StorageReader, + AccountExtReader, BundleStateWithReceipts, HashingWriter, HeaderProvider, + LatestStateProviderRef, OriginalValuesKnown, ProviderFactory, StageCheckpointReader, + StateWriter, StaticFileProviderFactory, StorageReader, }; +use reth_revm::database::StateProviderDatabase; use reth_tasks::TaskExecutor; use reth_trie::{updates::TrieKey, StateRoot}; use std::{net::SocketAddr, path::PathBuf, sync::Arc}; @@ -93,7 +96,7 @@ impl Command { .build(ProviderFactory::new( db, self.chain.clone(), - self.datadir.unwrap_or_chain_default(self.chain.chain).static_files_path(), + self.datadir.unwrap_or_chain_default(self.chain.chain).static_files(), )?) .start_network() .await?; @@ -108,12 +111,12 @@ impl Command { // add network name to data dir let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain); - let db_path = data_dir.db_path(); + let db_path = data_dir.db(); fs::create_dir_all(&db_path)?; // initialize the database let db = Arc::new(init_db(db_path, self.db.database_args())?); - let factory = ProviderFactory::new(&db, self.chain.clone(), data_dir.static_files_path())?; + let factory = ProviderFactory::new(&db, self.chain.clone(), data_dir.static_files())?; let provider = factory.provider()?; // Look up merkle checkpoint @@ -125,14 +128,14 @@ impl Command { // Configure and build network let network_secret_path = - self.network.p2p_secret_key.clone().unwrap_or_else(|| data_dir.p2p_secret_path()); + self.network.p2p_secret_key.clone().unwrap_or_else(|| data_dir.p2p_secret()); let network = self .build_network( &config, ctx.task_executor.clone(), db.clone(), network_secret_path, - data_dir.known_peers_path(), + data_dir.known_peers(), ) .await?; @@ -161,24 +164,31 @@ impl Command { ) .await?; - let executor_factory = - reth_revm::EvmProcessorFactory::new(self.chain.clone(), EthEvmConfig::default()); - let mut executor = executor_factory.with_state(LatestStateProviderRef::new( + let db = StateProviderDatabase::new(LatestStateProviderRef::new( provider.tx_ref(), factory.static_file_provider(), )); + let executor = block_executor!(self.chain.clone()).executor(db); + let merkle_block_td = provider.header_td_by_number(merkle_block_number)?.unwrap_or_default(); - executor.execute_and_verify_receipt( - &block - .clone() - .unseal() - .with_recovered_senders() - .ok_or(BlockValidationError::SenderRecoveryError)?, - merkle_block_td + block.difficulty, + let BlockExecutionOutput { state, receipts, .. } = executor.execute( + ( + &block + .clone() + .unseal() + .with_recovered_senders() + .ok_or(BlockValidationError::SenderRecoveryError)?, + merkle_block_td + block.difficulty, + ) + .into(), )?; - let block_state = executor.take_output_state(); + let block_state = BundleStateWithReceipts::new( + state, + Receipts::from_block_receipt(receipts), + block.number, + ); // Unpacked `BundleState::state_root_slow` function let (in_memory_state_root, in_memory_updates) = diff --git a/bin/reth/src/commands/debug_cmd/merkle.rs b/bin/reth/src/commands/debug_cmd/merkle.rs index ed8783e96..f452e2e52 100644 --- a/bin/reth/src/commands/debug_cmd/merkle.rs +++ b/bin/reth/src/commands/debug_cmd/merkle.rs @@ -7,37 +7,35 @@ use crate::{ DatabaseArgs, NetworkArgs, }, dirs::{DataDirPath, MaybePlatformPath}, + macros::block_executor, utils::get_single_header, }; use backon::{ConstantBuilder, Retryable}; use clap::Parser; -use reth_beacon_consensus::BeaconConsensus; +use reth_beacon_consensus::EthBeaconConsensus; use reth_cli_runner::CliContext; use reth_config::Config; +use reth_consensus::Consensus; use reth_db::{cursor::DbCursorRO, init_db, tables, transaction::DbTx, DatabaseEnv}; -use reth_exex::ExExManagerHandle; -use reth_interfaces::{consensus::Consensus, p2p::full_block::FullBlockClient}; +use reth_evm::execute::{BatchBlockExecutionOutput, BatchExecutor, BlockExecutorProvider}; +use reth_interfaces::p2p::full_block::FullBlockClient; use reth_network::NetworkHandle; use reth_network_api::NetworkInfo; -use reth_node_ethereum::EthEvmConfig; -use reth_primitives::{ - fs, - stage::{StageCheckpoint, StageId}, - BlockHashOrNumber, ChainSpec, PruneModes, +use reth_primitives::{fs, stage::StageCheckpoint, BlockHashOrNumber, ChainSpec, PruneModes}; +use reth_provider::{ + BlockNumReader, BlockWriter, BundleStateWithReceipts, HeaderProvider, LatestStateProviderRef, + OriginalValuesKnown, ProviderError, ProviderFactory, StateWriter, }; -use reth_provider::{BlockWriter, ProviderFactory, StageCheckpointReader}; +use reth_revm::database::StateProviderDatabase; use reth_stages::{ - stages::{ - AccountHashingStage, ExecutionStage, ExecutionStageThresholds, MerkleStage, - StorageHashingStage, MERKLE_STAGE_DEFAULT_CLEAN_THRESHOLD, - }, + stages::{AccountHashingStage, MerkleStage, StorageHashingStage}, ExecInput, Stage, }; use reth_tasks::TaskExecutor; use std::{net::SocketAddr, path::PathBuf, sync::Arc}; -use tracing::{debug, info, warn}; +use tracing::*; -/// `reth merkle-debug` command +/// `reth debug merkle` command #[derive(Debug, Parser)] pub struct Command { /// The path to the data dir for all reth files and subdirectories. @@ -103,7 +101,7 @@ impl Command { .build(ProviderFactory::new( db, self.chain.clone(), - self.datadir.unwrap_or_chain_default(self.chain.chain).static_files_path(), + self.datadir.unwrap_or_chain_default(self.chain.chain).static_files(), )?) .start_network() .await?; @@ -118,27 +116,29 @@ impl Command { // add network name to data dir let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain); - let db_path = data_dir.db_path(); + let db_path = data_dir.db(); fs::create_dir_all(&db_path)?; // initialize the database let db = Arc::new(init_db(db_path, self.db.database_args())?); - let factory = ProviderFactory::new(&db, self.chain.clone(), data_dir.static_files_path())?; + let factory = ProviderFactory::new(&db, self.chain.clone(), data_dir.static_files())?; let provider_rw = factory.provider_rw()?; // Configure and build network let network_secret_path = - self.network.p2p_secret_key.clone().unwrap_or_else(|| data_dir.p2p_secret_path()); + self.network.p2p_secret_key.clone().unwrap_or_else(|| data_dir.p2p_secret()); let network = self .build_network( &config, ctx.task_executor.clone(), db.clone(), network_secret_path, - data_dir.known_peers_path(), + data_dir.known_peers(), ) .await?; + let executor_provider = block_executor!(self.chain.clone()); + // Initialize the fetch client info!(target: "reth::cli", target_block_number=self.to, "Downloading tip of block range"); let fetch_client = network.fetch_client().await?; @@ -156,228 +156,185 @@ impl Command { info!(target: "reth::cli", target_block_number=self.to, "Finished downloading tip of block range"); // build the full block client - let consensus: Arc = Arc::new(BeaconConsensus::new(Arc::clone(&self.chain))); + let consensus: Arc = + Arc::new(EthBeaconConsensus::new(Arc::clone(&self.chain))); let block_range_client = FullBlockClient::new(fetch_client, consensus); - // get the execution checkpoint - let execution_checkpoint_block = - provider_rw.get_stage_checkpoint(StageId::Execution)?.unwrap_or_default().block_number; - assert!(execution_checkpoint_block < self.to, "Nothing to run"); + // get best block number + let best_block_number = provider_rw.best_block_number()?; + assert!(best_block_number < self.to, "Nothing to run"); // get the block range from the network - info!(target: "reth::cli", target_block_number=?self.to, "Downloading range of blocks"); - let block_range = block_range_client - .get_full_block_range(to_header.hash_slow(), self.to - execution_checkpoint_block) + let block_range = best_block_number + 1..=self.to; + info!(target: "reth::cli", ?block_range, "Downloading range of blocks"); + let blocks = block_range_client + .get_full_block_range(to_header.hash_slow(), self.to - best_block_number) .await; - // recover senders - let blocks_with_senders = - block_range.into_iter().map(|block| block.try_seal_with_senders()); - - // insert the blocks - for senders_res in blocks_with_senders { - let sealed_block = match senders_res { - Ok(senders) => senders, - Err(err) => { - warn!(target: "reth::cli", "Error sealing block with senders: {err:?}. Skipping..."); - continue - } - }; - provider_rw.insert_block(sealed_block, None)?; - } - - // Check if any of hashing or merkle stages aren't on the same block number as - // Execution stage or have any intermediate progress. - let should_reset_stages = - [StageId::AccountHashing, StageId::StorageHashing, StageId::MerkleExecute] - .into_iter() - .map(|stage_id| provider_rw.get_stage_checkpoint(stage_id)) - .collect::, _>>()? - .into_iter() - .map(Option::unwrap_or_default) - .any(|checkpoint| { - checkpoint.block_number != execution_checkpoint_block || - checkpoint.stage_checkpoint.is_some() - }); - - let factory = - reth_revm::EvmProcessorFactory::new(self.chain.clone(), EthEvmConfig::default()); - let mut execution_stage = ExecutionStage::new( - factory, - ExecutionStageThresholds { - max_blocks: Some(1), - max_changes: None, - max_cumulative_gas: None, - max_duration: None, - }, - MERKLE_STAGE_DEFAULT_CLEAN_THRESHOLD, - PruneModes::all(), - ExExManagerHandle::empty(), - ); + let mut td = provider_rw + .header_td_by_number(best_block_number)? + .ok_or(ProviderError::TotalDifficultyNotFound(best_block_number))?; let mut account_hashing_stage = AccountHashingStage::default(); let mut storage_hashing_stage = StorageHashingStage::default(); let mut merkle_stage = MerkleStage::default_execution(); - for block in execution_checkpoint_block + 1..=self.to { - tracing::trace!(target: "reth::cli", block, "Executing block"); - let progress = - if (!should_reset_stages || block > execution_checkpoint_block + 1) && block > 0 { - Some(block - 1) - } else { - None - }; - - execution_stage.execute( - &provider_rw, - ExecInput { - target: Some(block), - checkpoint: block.checked_sub(1).map(StageCheckpoint::new), - }, + for block in blocks.into_iter().rev() { + let block_number = block.number; + let sealed_block = block + .try_seal_with_senders() + .map_err(|block| eyre::eyre!("Error sealing block with senders: {block:?}"))?; + trace!(target: "reth::cli", block_number, "Executing block"); + + provider_rw.insert_block(sealed_block.clone(), None)?; + + td += sealed_block.difficulty; + let mut executor = executor_provider.batch_executor( + StateProviderDatabase::new(LatestStateProviderRef::new( + provider_rw.tx_ref(), + provider_rw.static_file_provider().clone(), + )), + PruneModes::none(), + ); + executor.execute_one((&sealed_block.clone().unseal(), td).into())?; + let BatchBlockExecutionOutput { bundle, receipts, first_block } = executor.finalize(); + BundleStateWithReceipts::new(bundle, receipts, first_block).write_to_storage( + provider_rw.tx_ref(), + None, + OriginalValuesKnown::Yes, )?; + let checkpoint = Some(StageCheckpoint::new(block_number - 1)); + let mut account_hashing_done = false; while !account_hashing_done { - let output = account_hashing_stage.execute( - &provider_rw, - ExecInput { - target: Some(block), - checkpoint: progress.map(StageCheckpoint::new), - }, - )?; + let output = account_hashing_stage + .execute(&provider_rw, ExecInput { target: Some(block_number), checkpoint })?; account_hashing_done = output.done; } let mut storage_hashing_done = false; while !storage_hashing_done { - let output = storage_hashing_stage.execute( - &provider_rw, - ExecInput { - target: Some(block), - checkpoint: progress.map(StageCheckpoint::new), - }, - )?; + let output = storage_hashing_stage + .execute(&provider_rw, ExecInput { target: Some(block_number), checkpoint })?; storage_hashing_done = output.done; } - let incremental_result = merkle_stage.execute( - &provider_rw, - ExecInput { target: Some(block), checkpoint: progress.map(StageCheckpoint::new) }, - ); + let incremental_result = merkle_stage + .execute(&provider_rw, ExecInput { target: Some(block_number), checkpoint }); - if incremental_result.is_err() { - tracing::warn!(target: "reth::cli", block, "Incremental calculation failed, retrying from scratch"); - let incremental_account_trie = provider_rw - .tx_ref() - .cursor_read::()? - .walk_range(..)? - .collect::, _>>()?; - let incremental_storage_trie = provider_rw - .tx_ref() - .cursor_dup_read::()? - .walk_range(..)? - .collect::, _>>()?; - - let clean_input = ExecInput { target: Some(block), checkpoint: None }; - loop { - let clean_result = merkle_stage.execute(&provider_rw, clean_input); - assert!(clean_result.is_ok(), "Clean state root calculation failed"); - if clean_result.unwrap().done { - break - } + if incremental_result.is_ok() { + debug!(target: "reth::cli", block_number, "Successfully computed incremental root"); + continue + } + + warn!(target: "reth::cli", block_number, "Incremental calculation failed, retrying from scratch"); + let incremental_account_trie = provider_rw + .tx_ref() + .cursor_read::()? + .walk_range(..)? + .collect::, _>>()?; + let incremental_storage_trie = provider_rw + .tx_ref() + .cursor_dup_read::()? + .walk_range(..)? + .collect::, _>>()?; + + let clean_input = ExecInput { target: Some(sealed_block.number), checkpoint: None }; + loop { + let clean_result = merkle_stage.execute(&provider_rw, clean_input); + assert!(clean_result.is_ok(), "Clean state root calculation failed"); + if clean_result.unwrap().done { + break } + } - let clean_account_trie = provider_rw - .tx_ref() - .cursor_read::()? - .walk_range(..)? - .collect::, _>>()?; - let clean_storage_trie = provider_rw - .tx_ref() - .cursor_dup_read::()? - .walk_range(..)? - .collect::, _>>()?; - - tracing::info!(target: "reth::cli", block, "Comparing incremental trie vs clean trie"); - - // Account trie - let mut incremental_account_mismatched = Vec::new(); - let mut clean_account_mismatched = Vec::new(); - let mut incremental_account_trie_iter = - incremental_account_trie.into_iter().peekable(); - let mut clean_account_trie_iter = clean_account_trie.into_iter().peekable(); - while incremental_account_trie_iter.peek().is_some() || - clean_account_trie_iter.peek().is_some() - { - match (incremental_account_trie_iter.next(), clean_account_trie_iter.next()) { - (Some(incremental), Some(clean)) => { - similar_asserts::assert_eq!( - incremental.0, - clean.0, - "Nibbles don't match" - ); - if incremental.1 != clean.1 && - clean.0 .0.len() > self.skip_node_depth.unwrap_or_default() - { - incremental_account_mismatched.push(incremental); - clean_account_mismatched.push(clean); - } - } - (Some(incremental), None) => { - tracing::warn!(target: "reth::cli", next = ?incremental, "Incremental account trie has more entries"); - } - (None, Some(clean)) => { - tracing::warn!(target: "reth::cli", next = ?clean, "Clean account trie has more entries"); - } - (None, None) => { - tracing::info!(target: "reth::cli", "Exhausted all account trie entries"); + let clean_account_trie = provider_rw + .tx_ref() + .cursor_read::()? + .walk_range(..)? + .collect::, _>>()?; + let clean_storage_trie = provider_rw + .tx_ref() + .cursor_dup_read::()? + .walk_range(..)? + .collect::, _>>()?; + + info!(target: "reth::cli", block_number, "Comparing incremental trie vs clean trie"); + + // Account trie + let mut incremental_account_mismatched = Vec::new(); + let mut clean_account_mismatched = Vec::new(); + let mut incremental_account_trie_iter = incremental_account_trie.into_iter().peekable(); + let mut clean_account_trie_iter = clean_account_trie.into_iter().peekable(); + while incremental_account_trie_iter.peek().is_some() || + clean_account_trie_iter.peek().is_some() + { + match (incremental_account_trie_iter.next(), clean_account_trie_iter.next()) { + (Some(incremental), Some(clean)) => { + similar_asserts::assert_eq!(incremental.0, clean.0, "Nibbles don't match"); + if incremental.1 != clean.1 && + clean.0 .0.len() > self.skip_node_depth.unwrap_or_default() + { + incremental_account_mismatched.push(incremental); + clean_account_mismatched.push(clean); } } + (Some(incremental), None) => { + warn!(target: "reth::cli", next = ?incremental, "Incremental account trie has more entries"); + } + (None, Some(clean)) => { + warn!(target: "reth::cli", next = ?clean, "Clean account trie has more entries"); + } + (None, None) => { + info!(target: "reth::cli", "Exhausted all account trie entries"); + } } + } - // Stoarge trie - let mut first_mismatched_storage = None; - let mut incremental_storage_trie_iter = - incremental_storage_trie.into_iter().peekable(); - let mut clean_storage_trie_iter = clean_storage_trie.into_iter().peekable(); - while incremental_storage_trie_iter.peek().is_some() || - clean_storage_trie_iter.peek().is_some() - { - match (incremental_storage_trie_iter.next(), clean_storage_trie_iter.next()) { - (Some(incremental), Some(clean)) => { - if incremental != clean && - clean.1.nibbles.len() > self.skip_node_depth.unwrap_or_default() - { - first_mismatched_storage = Some((incremental, clean)); - break - } - } - (Some(incremental), None) => { - tracing::warn!(target: "reth::cli", next = ?incremental, "Incremental storage trie has more entries"); - } - (None, Some(clean)) => { - tracing::warn!(target: "reth::cli", next = ?clean, "Clean storage trie has more entries") - } - (None, None) => { - tracing::info!(target: "reth::cli", "Exhausted all storage trie entries.") + // Stoarge trie + let mut first_mismatched_storage = None; + let mut incremental_storage_trie_iter = incremental_storage_trie.into_iter().peekable(); + let mut clean_storage_trie_iter = clean_storage_trie.into_iter().peekable(); + while incremental_storage_trie_iter.peek().is_some() || + clean_storage_trie_iter.peek().is_some() + { + match (incremental_storage_trie_iter.next(), clean_storage_trie_iter.next()) { + (Some(incremental), Some(clean)) => { + if incremental != clean && + clean.1.nibbles.len() > self.skip_node_depth.unwrap_or_default() + { + first_mismatched_storage = Some((incremental, clean)); + break } } + (Some(incremental), None) => { + warn!(target: "reth::cli", next = ?incremental, "Incremental storage trie has more entries"); + } + (None, Some(clean)) => { + warn!(target: "reth::cli", next = ?clean, "Clean storage trie has more entries") + } + (None, None) => { + info!(target: "reth::cli", "Exhausted all storage trie entries.") + } } - - similar_asserts::assert_eq!( - ( - incremental_account_mismatched, - first_mismatched_storage.as_ref().map(|(incremental, _)| incremental) - ), - ( - clean_account_mismatched, - first_mismatched_storage.as_ref().map(|(_, clean)| clean) - ), - "Mismatched trie nodes" - ); } + + similar_asserts::assert_eq!( + ( + incremental_account_mismatched, + first_mismatched_storage.as_ref().map(|(incremental, _)| incremental) + ), + ( + clean_account_mismatched, + first_mismatched_storage.as_ref().map(|(_, clean)| clean) + ), + "Mismatched trie nodes" + ); } + info!(target: "reth::cli", ?block_range, "Successfully validated incremental roots"); + Ok(()) } } diff --git a/bin/reth/src/commands/debug_cmd/replay_engine.rs b/bin/reth/src/commands/debug_cmd/replay_engine.rs index 0ef866396..b86e707a8 100644 --- a/bin/reth/src/commands/debug_cmd/replay_engine.rs +++ b/bin/reth/src/commands/debug_cmd/replay_engine.rs @@ -5,33 +5,34 @@ use crate::{ DatabaseArgs, NetworkArgs, }, dirs::{DataDirPath, MaybePlatformPath}, + macros::block_executor, }; use clap::Parser; use eyre::Context; use reth_basic_payload_builder::{BasicPayloadJobGenerator, BasicPayloadJobGeneratorConfig}; -use reth_beacon_consensus::{hooks::EngineHooks, BeaconConsensus, BeaconConsensusEngine}; +use reth_beacon_consensus::{hooks::EngineHooks, BeaconConsensusEngine, EthBeaconConsensus}; use reth_blockchain_tree::{ BlockchainTree, BlockchainTreeConfig, ShareableBlockchainTree, TreeExternals, }; use reth_cli_runner::CliContext; use reth_config::Config; +use reth_consensus::Consensus; use reth_db::{init_db, DatabaseEnv}; -use reth_interfaces::consensus::Consensus; use reth_network::NetworkHandle; use reth_network_api::NetworkInfo; -use reth_node_core::engine_api_store::{EngineApiStore, StoredEngineApiMessage}; -#[cfg(not(feature = "optimism"))] -use reth_node_ethereum::{EthEngineTypes, EthEvmConfig}; +use reth_node_core::engine::engine_store::{EngineMessageStore, StoredEngineApiMessage}; use reth_payload_builder::{PayloadBuilderHandle, PayloadBuilderService}; use reth_primitives::{fs, ChainSpec, PruneModes}; -use reth_provider::{providers::BlockchainProvider, CanonStateSubscriptions, ProviderFactory}; -use reth_revm::EvmProcessorFactory; +use reth_provider::{ + providers::BlockchainProvider, CanonStateSubscriptions, ProviderFactory, + StaticFileProviderFactory, +}; use reth_stages::Pipeline; use reth_static_file::StaticFileProducer; use reth_tasks::TaskExecutor; use reth_transaction_pool::noop::NoopTransactionPool; use std::{net::SocketAddr, path::PathBuf, sync::Arc, time::Duration}; -use tokio::sync::{mpsc, oneshot}; +use tokio::sync::oneshot; use tracing::*; /// `reth debug replay-engine` command @@ -98,7 +99,7 @@ impl Command { .build(ProviderFactory::new( db, self.chain.clone(), - self.datadir.unwrap_or_chain_default(self.chain.chain).static_files_path(), + self.datadir.unwrap_or_chain_default(self.chain.chain).static_files(), )?) .start_network() .await?; @@ -113,45 +114,38 @@ impl Command { // Add network name to data dir let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain); - let db_path = data_dir.db_path(); + let db_path = data_dir.db(); fs::create_dir_all(&db_path)?; // Initialize the database let db = Arc::new(init_db(db_path, self.db.database_args())?); let provider_factory = - ProviderFactory::new(db.clone(), self.chain.clone(), data_dir.static_files_path())?; - - let consensus: Arc = Arc::new(BeaconConsensus::new(Arc::clone(&self.chain))); + ProviderFactory::new(db.clone(), self.chain.clone(), data_dir.static_files())?; - #[cfg(not(feature = "optimism"))] - let evm_config = EthEvmConfig::default(); + let consensus: Arc = + Arc::new(EthBeaconConsensus::new(Arc::clone(&self.chain))); - #[cfg(feature = "optimism")] - let evm_config = reth_node_optimism::OptimismEvmConfig::default(); + let executor = block_executor!(self.chain.clone()); // Configure blockchain tree - let tree_externals = TreeExternals::new( - provider_factory.clone(), - Arc::clone(&consensus), - EvmProcessorFactory::new(self.chain.clone(), evm_config), - ); + let tree_externals = + TreeExternals::new(provider_factory.clone(), Arc::clone(&consensus), executor); let tree = BlockchainTree::new(tree_externals, BlockchainTreeConfig::default(), None)?; - let blockchain_tree = ShareableBlockchainTree::new(tree); + let blockchain_tree = Arc::new(ShareableBlockchainTree::new(tree)); // Set up the blockchain provider - let blockchain_db = - BlockchainProvider::new(provider_factory.clone(), blockchain_tree.clone())?; + let blockchain_db = BlockchainProvider::new(provider_factory.clone(), blockchain_tree)?; // Set up network let network_secret_path = - self.network.p2p_secret_key.clone().unwrap_or_else(|| data_dir.p2p_secret_path()); + self.network.p2p_secret_key.clone().unwrap_or_else(|| data_dir.p2p_secret()); let network = self .build_network( &config, ctx.task_executor.clone(), db.clone(), network_secret_path, - data_dir.known_peers_path(), + data_dir.known_peers(), ) .await?; @@ -182,15 +176,16 @@ impl Command { ) = PayloadBuilderService::new(payload_generator, blockchain_db.canonical_state_stream()); #[cfg(not(feature = "optimism"))] - let (payload_service, payload_builder): (_, PayloadBuilderHandle) = - PayloadBuilderService::new(payload_generator, blockchain_db.canonical_state_stream()); + let (payload_service, payload_builder): ( + _, + PayloadBuilderHandle, + ) = PayloadBuilderService::new(payload_generator, blockchain_db.canonical_state_stream()); ctx.task_executor.spawn_critical("payload builder service", payload_service); // Configure the consensus engine let network_client = network.fetch_client().await?; - let (consensus_engine_tx, consensus_engine_rx) = mpsc::unbounded_channel(); - let (beacon_consensus_engine, beacon_engine_handle) = BeaconConsensusEngine::with_channel( + let (beacon_consensus_engine, beacon_engine_handle) = BeaconConsensusEngine::new( network_client, Pipeline::builder().build( provider_factory.clone(), @@ -208,8 +203,6 @@ impl Command { payload_builder, None, u64::MAX, - consensus_engine_tx, - consensus_engine_rx, EngineHooks::new(), )?; info!(target: "reth::cli", "Consensus engine initialized"); @@ -222,7 +215,7 @@ impl Command { let _ = tx.send(res); }); - let engine_api_store = EngineApiStore::new(self.engine_api_store.clone()); + let engine_api_store = EngineMessageStore::new(self.engine_api_store.clone()); for filepath in engine_api_store.engine_messages_iter()? { let contents = fs::read(&filepath).wrap_err(format!("failed to read: {}", filepath.display()))?; diff --git a/bin/reth/src/commands/import.rs b/bin/reth/src/commands/import.rs index dc3140924..354787f32 100644 --- a/bin/reth/src/commands/import.rs +++ b/bin/reth/src/commands/import.rs @@ -6,32 +6,33 @@ use crate::{ DatabaseArgs, }, dirs::{DataDirPath, MaybePlatformPath}, + macros::block_executor, version::SHORT_VERSION, }; use clap::Parser; use eyre::Context; use futures::{Stream, StreamExt}; -use reth_beacon_consensus::BeaconConsensus; +use reth_beacon_consensus::EthBeaconConsensus; use reth_config::{config::EtlConfig, Config}; -use reth_db::{database::Database, init_db}; +use reth_consensus::Consensus; +use reth_db::{database::Database, init_db, tables, transaction::DbTx}; use reth_downloaders::{ bodies::bodies::BodiesDownloaderBuilder, file_client::{ChunkedFileReader, FileClient, DEFAULT_BYTE_LEN_CHUNK_CHAIN_FILE}, headers::reverse_headers::ReverseHeadersDownloaderBuilder, }; use reth_exex::ExExManagerHandle; -use reth_interfaces::{ - consensus::Consensus, - p2p::{ - bodies::downloader::BodyDownloader, - headers::downloader::{HeaderDownloader, SyncTarget}, - }, +use reth_interfaces::p2p::{ + bodies::downloader::BodyDownloader, + headers::downloader::{HeaderDownloader, SyncTarget}, }; use reth_node_core::init::init_genesis; -use reth_node_ethereum::EthEvmConfig; use reth_node_events::node::NodeEvent; use reth_primitives::{stage::StageId, ChainSpec, PruneModes, B256}; -use reth_provider::{HeaderSyncMode, ProviderFactory, StageCheckpointReader}; +use reth_provider::{ + BlockNumReader, ChainSpecProvider, HeaderProvider, HeaderSyncMode, ProviderError, + ProviderFactory, StageCheckpointReader, StaticFileProviderFactory, +}; use reth_stages::{ prelude::*, stages::{ExecutionStage, ExecutionStageThresholds, SenderRecoveryStage}, @@ -40,18 +41,7 @@ use reth_stages::{ use reth_static_file::StaticFileProducer; use std::{path::PathBuf, sync::Arc}; use tokio::sync::watch; -use tracing::{debug, info}; - -/// Stages that require state. -const STATE_STAGES: &[StageId] = &[ - StageId::Execution, - StageId::MerkleUnwind, - StageId::AccountHashing, - StageId::StorageHashing, - StageId::MerkleExecute, - StageId::IndexStorageHistory, - StageId::IndexAccountHistory, -]; +use tracing::{debug, error, info}; /// Syncs RLP encoded blocks from a file. #[derive(Debug, Parser)] @@ -86,11 +76,6 @@ pub struct ImportCommand { #[arg(long, verbatim_doc_comment)] no_state: bool, - /// Import OP Mainnet chain below Bedrock. Caution! Flag must be set as env var, since the env - /// var is read by another process too, in order to make below Bedrock import work. - #[arg(long, verbatim_doc_comment, env = "OP_RETH_MAINNET_BELOW_BEDROCK")] - op_mainnet_below_bedrock: bool, - /// Chunk byte length. #[arg(long, value_name = "CHUNK_LEN", verbatim_doc_comment)] chunk_len: Option, @@ -108,76 +93,76 @@ pub struct ImportCommand { impl ImportCommand { /// Execute `import` command - pub async fn execute(mut self) -> eyre::Result<()> { + pub async fn execute(self) -> eyre::Result<()> { info!(target: "reth::cli", "reth {} starting", SHORT_VERSION); - if self.op_mainnet_below_bedrock { - self.no_state = true; - debug!(target: "reth::cli", "Importing OP mainnet below bedrock"); - } - if self.no_state { - debug!(target: "reth::cli", "Stages requiring state disabled"); + info!(target: "reth::cli", "Disabled stages requiring state"); } debug!(target: "reth::cli", - chunk_byte_len=self.chunk_len.unwrap_or(DEFAULT_BYTE_LEN_CHUNK_CHAIN_FILE), "Chunking chain import" + chunk_byte_len=self.chunk_len.unwrap_or(DEFAULT_BYTE_LEN_CHUNK_CHAIN_FILE), + "Chunking chain import" ); // add network name to data dir let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain); - let config_path = self.config.clone().unwrap_or_else(|| data_dir.config_path()); + let config_path = self.config.clone().unwrap_or_else(|| data_dir.config()); - let mut config: Config = self.load_config(config_path.clone())?; + let mut config: Config = load_config(config_path.clone())?; info!(target: "reth::cli", path = ?config_path, "Configuration loaded"); // Make sure ETL doesn't default to /tmp/, but to whatever datadir is set to if config.stages.etl.dir.is_none() { - config.stages.etl.dir = Some(EtlConfig::from_datadir(&data_dir.data_dir_path())); + config.stages.etl.dir = Some(EtlConfig::from_datadir(data_dir.data_dir())); } - let db_path = data_dir.db_path(); + let db_path = data_dir.db(); info!(target: "reth::cli", path = ?db_path, "Opening database"); let db = Arc::new(init_db(db_path, self.db.database_args())?); info!(target: "reth::cli", "Database opened"); let provider_factory = - ProviderFactory::new(db.clone(), self.chain.clone(), data_dir.static_files_path())?; + ProviderFactory::new(db.clone(), self.chain.clone(), data_dir.static_files())?; debug!(target: "reth::cli", chain=%self.chain.chain, genesis=?self.chain.genesis_hash(), "Initializing genesis"); init_genesis(provider_factory.clone())?; - let consensus = Arc::new(BeaconConsensus::new(self.chain.clone())); + let consensus = Arc::new(EthBeaconConsensus::new(self.chain.clone())); info!(target: "reth::cli", "Consensus engine initialized"); // open file let mut reader = ChunkedFileReader::new(&self.path, self.chunk_len).await?; - while let Some(file_client) = reader.next_chunk().await? { + let mut total_decoded_blocks = 0; + let mut total_decoded_txns = 0; + + while let Some(file_client) = reader.next_chunk::().await? { // create a new FileClient from chunk read from file info!(target: "reth::cli", "Importing chain file chunk" ); - // override the tip - let tip = file_client.tip().expect("file client has no tip"); + let tip = file_client.tip().ok_or(eyre::eyre!("file client has no tip"))?; info!(target: "reth::cli", "Chain file chunk read"); - let (mut pipeline, events) = self - .build_import_pipeline( - &config, + total_decoded_blocks += file_client.headers_len(); + total_decoded_txns += file_client.total_transactions(); + + let (mut pipeline, events) = build_import_pipeline( + &config, + provider_factory.clone(), + &consensus, + Arc::new(file_client), + StaticFileProducer::new( provider_factory.clone(), - &consensus, - Arc::new(file_client), - StaticFileProducer::new( - provider_factory.clone(), - provider_factory.static_file_provider(), - PruneModes::default(), - ), - self.no_state, - ) - .await?; + provider_factory.static_file_provider(), + PruneModes::default(), + ), + self.no_state, + ) + .await?; // override the tip pipeline.set_tip(tip); @@ -202,94 +187,128 @@ impl ImportCommand { } } - info!(target: "reth::cli", "Chain file imported"); - Ok(()) - } + let provider = provider_factory.provider()?; - async fn build_import_pipeline( - &self, - config: &Config, - provider_factory: ProviderFactory, - consensus: &Arc, - file_client: Arc, - static_file_producer: StaticFileProducer, - no_state: bool, - ) -> eyre::Result<(Pipeline, impl Stream)> - where - DB: Database + Clone + Unpin + 'static, - C: Consensus + 'static, - { - if !file_client.has_canonical_blocks() { - eyre::bail!("unable to import non canonical blocks"); - } + let total_imported_blocks = provider.tx_ref().entries::()?; + let total_imported_txns = provider.tx_ref().entries::()?; - let mut header_downloader = ReverseHeadersDownloaderBuilder::new(config.stages.headers) - .build(file_client.clone(), consensus.clone()) - .into_task(); - header_downloader.update_local_head(file_client.start_header().unwrap()); - header_downloader.update_sync_target(SyncTarget::Tip(file_client.tip().unwrap())); - - let mut body_downloader = BodiesDownloaderBuilder::new(config.stages.bodies) - .build(file_client.clone(), consensus.clone(), provider_factory.clone()) - .into_task(); - body_downloader - .set_download_range(file_client.min_block().unwrap()..=file_client.max_block().unwrap()) - .expect("failed to set download range"); - - let (tip_tx, tip_rx) = watch::channel(B256::ZERO); - let factory = - reth_revm::EvmProcessorFactory::new(self.chain.clone(), EthEvmConfig::default()); - - let max_block = file_client.max_block().unwrap_or(0); - - let mut pipeline = Pipeline::builder() - .with_tip_sender(tip_tx) - // we want to sync all blocks the file client provides or 0 if empty - .with_max_block(max_block) - .add_stages( - DefaultStages::new( - provider_factory.clone(), - HeaderSyncMode::Tip(tip_rx), - consensus.clone(), - header_downloader, - body_downloader, - factory.clone(), - config.stages.etl.clone(), - ) - .set(SenderRecoveryStage { - commit_threshold: config.stages.sender_recovery.commit_threshold, - }) - .set(ExecutionStage::new( - factory, - ExecutionStageThresholds { - max_blocks: config.stages.execution.max_blocks, - max_changes: config.stages.execution.max_changes, - max_cumulative_gas: config.stages.execution.max_cumulative_gas, - max_duration: config.stages.execution.max_duration, - }, - config - .stages - .merkle - .clean_threshold - .max(config.stages.account_hashing.clean_threshold) - .max(config.stages.storage_hashing.clean_threshold), - config.prune.as_ref().map(|prune| prune.segments.clone()).unwrap_or_default(), - ExExManagerHandle::empty(), - )) - .disable_all_if(STATE_STAGES, || no_state), - ) - .build(provider_factory, static_file_producer); + if total_decoded_blocks != total_imported_blocks || + total_decoded_txns != total_imported_txns + { + error!(target: "reth::cli", + total_decoded_blocks, + total_imported_blocks, + total_decoded_txns, + total_imported_txns, + "Chain was partially imported" + ); + } - let events = pipeline.events().map(Into::into); + info!(target: "reth::cli", + total_imported_blocks, + total_imported_txns, + "Chain file imported" + ); - Ok((pipeline, events)) + Ok(()) } +} - /// Loads the reth config - fn load_config(&self, config_path: PathBuf) -> eyre::Result { - confy::load_path::(config_path.clone()) - .wrap_err_with(|| format!("Could not load config file {config_path:?}")) +/// Builds import pipeline. +/// +/// If configured to execute, all stages will run. Otherwise, only stages that don't require state +/// will run. +pub async fn build_import_pipeline( + config: &Config, + provider_factory: ProviderFactory, + consensus: &Arc, + file_client: Arc, + static_file_producer: StaticFileProducer, + should_exec: bool, +) -> eyre::Result<(Pipeline, impl Stream)> +where + DB: Database + Clone + Unpin + 'static, + C: Consensus + 'static, +{ + if !file_client.has_canonical_blocks() { + eyre::bail!("unable to import non canonical blocks"); } + + // Retrieve latest header found in the database. + let last_block_number = provider_factory.last_block_number()?; + let local_head = provider_factory + .sealed_header(last_block_number)? + .ok_or(ProviderError::HeaderNotFound(last_block_number.into()))?; + + let mut header_downloader = ReverseHeadersDownloaderBuilder::new(config.stages.headers) + .build(file_client.clone(), consensus.clone()) + .into_task(); + // TODO: The pipeline should correctly configure the downloader on its own. + // Find the possibility to remove unnecessary pre-configuration. + header_downloader.update_local_head(local_head); + header_downloader.update_sync_target(SyncTarget::Tip(file_client.tip().unwrap())); + + let mut body_downloader = BodiesDownloaderBuilder::new(config.stages.bodies) + .build(file_client.clone(), consensus.clone(), provider_factory.clone()) + .into_task(); + // TODO: The pipeline should correctly configure the downloader on its own. + // Find the possibility to remove unnecessary pre-configuration. + body_downloader + .set_download_range(file_client.min_block().unwrap()..=file_client.max_block().unwrap()) + .expect("failed to set download range"); + + let (tip_tx, tip_rx) = watch::channel(B256::ZERO); + let executor = block_executor!(provider_factory.chain_spec()); + + let max_block = file_client.max_block().unwrap_or(0); + + let mut pipeline = Pipeline::builder() + .with_tip_sender(tip_tx) + // we want to sync all blocks the file client provides or 0 if empty + .with_max_block(max_block) + .add_stages( + DefaultStages::new( + provider_factory.clone(), + HeaderSyncMode::Tip(tip_rx), + consensus.clone(), + header_downloader, + body_downloader, + executor.clone(), + config.stages.etl.clone(), + ) + .set(SenderRecoveryStage { + commit_threshold: config.stages.sender_recovery.commit_threshold, + }) + .set(ExecutionStage::new( + executor, + ExecutionStageThresholds { + max_blocks: config.stages.execution.max_blocks, + max_changes: config.stages.execution.max_changes, + max_cumulative_gas: config.stages.execution.max_cumulative_gas, + max_duration: config.stages.execution.max_duration, + }, + config + .stages + .merkle + .clean_threshold + .max(config.stages.account_hashing.clean_threshold) + .max(config.stages.storage_hashing.clean_threshold), + config.prune.as_ref().map(|prune| prune.segments.clone()).unwrap_or_default(), + ExExManagerHandle::empty(), + )) + .disable_all_if(&StageId::STATE_REQUIRED, || should_exec), + ) + .build(provider_factory, static_file_producer); + + let events = pipeline.events().map(Into::into); + + Ok((pipeline, events)) +} + +/// Loads the reth config +pub fn load_config(config_path: PathBuf) -> eyre::Result { + confy::load_path::(config_path.clone()) + .wrap_err_with(|| format!("Could not load config file {config_path:?}")) } #[cfg(test)] diff --git a/bin/reth/src/commands/import_op.rs b/bin/reth/src/commands/import_op.rs new file mode 100644 index 000000000..5362b45b0 --- /dev/null +++ b/bin/reth/src/commands/import_op.rs @@ -0,0 +1,250 @@ +//! Command that initializes the node by importing OP Mainnet chain segment below Bedrock, from a +//! file. + +use crate::{ + args::{ + utils::{genesis_value_parser, SUPPORTED_CHAINS}, + DatabaseArgs, + }, + commands::import::{build_import_pipeline, load_config}, + dirs::{DataDirPath, MaybePlatformPath}, + version::SHORT_VERSION, +}; +use clap::Parser; +use reth_beacon_consensus::EthBeaconConsensus; +use reth_config::{config::EtlConfig, Config}; + +use reth_db::{init_db, tables, transaction::DbTx}; +use reth_downloaders::file_client::{ + ChunkedFileReader, FileClient, DEFAULT_BYTE_LEN_CHUNK_CHAIN_FILE, +}; + +use reth_node_core::init::init_genesis; + +use reth_primitives::{hex, stage::StageId, PruneModes, TxHash}; +use reth_provider::{ProviderFactory, StageCheckpointReader, StaticFileProviderFactory}; +use reth_static_file::StaticFileProducer; +use std::{path::PathBuf, sync::Arc}; + +use tracing::{debug, error, info}; + +/// Syncs RLP encoded blocks from a file. +#[derive(Debug, Parser)] +pub struct ImportOpCommand { + /// The path to the configuration file to use. + #[arg(long, value_name = "FILE", verbatim_doc_comment)] + config: Option, + + /// The path to the data dir for all reth files and subdirectories. + /// + /// Defaults to the OS-specific data directory: + /// + /// - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` + /// - Windows: `{FOLDERID_RoamingAppData}/reth/` + /// - macOS: `$HOME/Library/Application Support/reth/` + #[arg(long, value_name = "DATA_DIR", verbatim_doc_comment, default_value_t)] + datadir: MaybePlatformPath, + + /// Chunk byte length. + #[arg(long, value_name = "CHUNK_LEN", verbatim_doc_comment)] + chunk_len: Option, + + #[command(flatten)] + db: DatabaseArgs, + + /// The path to a block file for import. + /// + /// The online stages (headers and bodies) are replaced by a file import, after which the + /// remaining stages are executed. + #[arg(value_name = "IMPORT_PATH", verbatim_doc_comment)] + path: PathBuf, +} + +impl ImportOpCommand { + /// Execute `import` command + pub async fn execute(self) -> eyre::Result<()> { + info!(target: "reth::cli", "reth {} starting", SHORT_VERSION); + + info!(target: "reth::cli", + "Disabled stages requiring state, since cannot execute OVM state changes" + ); + + debug!(target: "reth::cli", + chunk_byte_len=self.chunk_len.unwrap_or(DEFAULT_BYTE_LEN_CHUNK_CHAIN_FILE), + "Chunking chain import" + ); + + let chain_spec = genesis_value_parser(SUPPORTED_CHAINS[0])?; + + // add network name to data dir + let data_dir = self.datadir.unwrap_or_chain_default(chain_spec.chain); + let config_path = self.config.clone().unwrap_or_else(|| data_dir.config()); + + let mut config: Config = load_config(config_path.clone())?; + info!(target: "reth::cli", path = ?config_path, "Configuration loaded"); + + // Make sure ETL doesn't default to /tmp/, but to whatever datadir is set to + if config.stages.etl.dir.is_none() { + config.stages.etl.dir = Some(EtlConfig::from_datadir(data_dir.data_dir())); + } + + let db_path = data_dir.db(); + + info!(target: "reth::cli", path = ?db_path, "Opening database"); + let db = Arc::new(init_db(db_path, self.db.database_args())?); + + info!(target: "reth::cli", "Database opened"); + let provider_factory = + ProviderFactory::new(db.clone(), chain_spec.clone(), data_dir.static_files())?; + + debug!(target: "reth::cli", chain=%chain_spec.chain, genesis=?chain_spec.genesis_hash(), "Initializing genesis"); + + init_genesis(provider_factory.clone())?; + + let consensus = Arc::new(EthBeaconConsensus::new(chain_spec.clone())); + info!(target: "reth::cli", "Consensus engine initialized"); + + // open file + let mut reader = ChunkedFileReader::new(&self.path, self.chunk_len).await?; + + let mut total_decoded_blocks = 0; + let mut total_decoded_txns = 0; + let mut total_filtered_out_dup_txns = 0; + + while let Some(mut file_client) = reader.next_chunk::().await? { + // create a new FileClient from chunk read from file + info!(target: "reth::cli", + "Importing chain file chunk" + ); + + let tip = file_client.tip().ok_or(eyre::eyre!("file client has no tip"))?; + info!(target: "reth::cli", "Chain file chunk read"); + + total_decoded_blocks += file_client.headers_len(); + total_decoded_txns += file_client.bodies_len(); + + for (block_number, body) in file_client.bodies_iter_mut() { + body.transactions.retain(|tx| { + if is_duplicate(tx.hash, *block_number) { + total_filtered_out_dup_txns += 1; + return false + } + true + }) + } + + let (mut pipeline, events) = build_import_pipeline( + &config, + provider_factory.clone(), + &consensus, + Arc::new(file_client), + StaticFileProducer::new( + provider_factory.clone(), + provider_factory.static_file_provider(), + PruneModes::default(), + ), + false, + ) + .await?; + + // override the tip + pipeline.set_tip(tip); + debug!(target: "reth::cli", ?tip, "Tip manually set"); + + let provider = provider_factory.provider()?; + + let latest_block_number = + provider.get_stage_checkpoint(StageId::Finish)?.map(|ch| ch.block_number); + tokio::spawn(reth_node_events::node::handle_events( + None, + latest_block_number, + events, + db.clone(), + )); + + // Run pipeline + info!(target: "reth::cli", "Starting sync pipeline"); + tokio::select! { + res = pipeline.run() => res?, + _ = tokio::signal::ctrl_c() => {}, + } + } + + let provider = provider_factory.provider()?; + + let total_imported_blocks = provider.tx_ref().entries::()?; + let total_imported_txns = provider.tx_ref().entries::()?; + + if total_decoded_blocks != total_imported_blocks || + total_decoded_txns != total_imported_txns + { + error!(target: "reth::cli", + total_decoded_blocks, + total_imported_blocks, + total_decoded_txns, + total_imported_txns, + "Chain was partially imported" + ); + } + + info!(target: "reth::cli", + total_imported_blocks, + total_imported_txns, + "Chain file imported" + ); + + Ok(()) + } +} + +/// A transaction that has been replayed in chain below Bedrock. +#[derive(Debug)] +pub struct ReplayedTx { + tx_hash: TxHash, + original_block: u64, +} + +impl ReplayedTx { + /// Returns a new instance. + pub const fn new(tx_hash: TxHash, original_block: u64) -> Self { + Self { tx_hash, original_block } + } +} + +/// Transaction 0x9ed8..9cb9, first seen in block 985. +pub const TX_BLOCK_985: ReplayedTx = ReplayedTx::new( + TxHash::new(hex!("9ed8f713b2cc6439657db52dcd2fdb9cc944915428f3c6e2a7703e242b259cb9")), + 985, +); + +/// Transaction 0x86f8..76e5, first seen in block 123 322. +pub const TX_BLOCK_123_322: ReplayedTx = ReplayedTx::new( + TxHash::new(hex!("c033250c5a45f9d104fc28640071a776d146d48403cf5e95ed0015c712e26cb6")), + 123_322, +); + +/// Transaction 0x86f8..76e5, first seen in block 1 133 328. +pub const TX_BLOCK_1_133_328: ReplayedTx = ReplayedTx::new( + TxHash::new(hex!("86f8c77cfa2b439e9b4e92a10f6c17b99fce1220edf4001e4158b57f41c576e5")), + 1_133_328, +); + +/// Transaction 0x3cc2..cd4e, first seen in block 1 244 152. +pub const TX_BLOCK_1_244_152: ReplayedTx = ReplayedTx::new( + TxHash::new(hex!("3cc27e7cc8b7a9380b2b2f6c224ea5ef06ade62a6af564a9dd0bcca92131cd4e")), + 1_244_152, +); + +/// List of original occurrences of all duplicate transactions below Bedrock. +pub const TX_DUP_ORIGINALS: [ReplayedTx; 4] = + [TX_BLOCK_985, TX_BLOCK_123_322, TX_BLOCK_1_133_328, TX_BLOCK_1_244_152]; + +/// Returns `true` if transaction is the second or third appearance of the transaction. +pub fn is_duplicate(tx_hash: TxHash, block_number: u64) -> bool { + for ReplayedTx { tx_hash: dup_tx_hash, original_block } in TX_DUP_ORIGINALS { + if tx_hash == dup_tx_hash && block_number != original_block { + return true + } + } + false +} diff --git a/bin/reth/src/commands/import_receipts.rs b/bin/reth/src/commands/import_receipts.rs new file mode 100644 index 000000000..e6aae327a --- /dev/null +++ b/bin/reth/src/commands/import_receipts.rs @@ -0,0 +1,168 @@ +//! Command that imports receipts from a file. + +use crate::{ + args::{ + utils::{chain_help, genesis_value_parser, SUPPORTED_CHAINS}, + DatabaseArgs, + }, + dirs::{DataDirPath, MaybePlatformPath}, +}; +use clap::Parser; +use reth_db::{database::Database, init_db, transaction::DbTx, DatabaseEnv}; +use reth_downloaders::{ + file_client::{ChunkedFileReader, DEFAULT_BYTE_LEN_CHUNK_CHAIN_FILE}, + receipt_file_client::ReceiptFileClient, +}; +use reth_node_core::version::SHORT_VERSION; +use reth_primitives::{stage::StageId, ChainSpec, StaticFileSegment}; +use reth_provider::{ + BundleStateWithReceipts, OriginalValuesKnown, ProviderFactory, StageCheckpointReader, + StateWriter, StaticFileProviderFactory, StaticFileWriter, +}; +use tracing::{debug, error, info}; + +use std::{path::PathBuf, sync::Arc}; + +/// Initializes the database with the genesis block. +#[derive(Debug, Parser)] +pub struct ImportReceiptsCommand { + /// The path to the data dir for all reth files and subdirectories. + /// + /// Defaults to the OS-specific data directory: + /// + /// - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` + /// - Windows: `{FOLDERID_RoamingAppData}/reth/` + /// - macOS: `$HOME/Library/Application Support/reth/` + #[arg(long, value_name = "DATA_DIR", verbatim_doc_comment, default_value_t)] + datadir: MaybePlatformPath, + + /// The chain this node is running. + /// + /// Possible values are either a built-in chain or the path to a chain specification file. + #[arg( + long, + value_name = "CHAIN_OR_PATH", + long_help = chain_help(), + default_value = SUPPORTED_CHAINS[0], + value_parser = genesis_value_parser + )] + chain: Arc, + + /// Chunk byte length. + #[arg(long, value_name = "CHUNK_LEN", verbatim_doc_comment)] + chunk_len: Option, + + #[command(flatten)] + db: DatabaseArgs, + + /// The path to a receipts file for import. File must use `HackReceiptCodec` (used for + /// exporting OP chain segment below Bedrock block via testinprod/op-geth). + /// + /// + #[arg(value_name = "IMPORT_PATH", verbatim_doc_comment)] + path: PathBuf, +} + +impl ImportReceiptsCommand { + /// Execute `import` command + pub async fn execute(self) -> eyre::Result<()> { + info!(target: "reth::cli", "reth {} starting", SHORT_VERSION); + + debug!(target: "reth::cli", + chunk_byte_len=self.chunk_len.unwrap_or(DEFAULT_BYTE_LEN_CHUNK_CHAIN_FILE), + "Chunking receipts import" + ); + + // add network name to data dir + let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain); + + let db_path = data_dir.db(); + info!(target: "reth::cli", path = ?db_path, "Opening database"); + + let db = Arc::new(init_db(db_path, self.db.database_args())?); + info!(target: "reth::cli", "Database opened"); + let provider_factory = + ProviderFactory::new(db.clone(), self.chain.clone(), data_dir.static_files())?; + + let provider = provider_factory.provider_rw()?; + let static_file_provider = provider_factory.static_file_provider(); + + for stage in StageId::ALL { + let checkpoint = provider.get_stage_checkpoint(stage)?; + debug!(target: "reth::cli", + ?stage, + ?checkpoint, + "Read stage checkpoints from db" + ); + } + + // prepare the tx for `write_to_storage` + let tx = provider.into_tx(); + let mut total_decoded_receipts = 0; + + // open file + let mut reader = ChunkedFileReader::new(&self.path, self.chunk_len).await?; + + while let Some(file_client) = reader.next_chunk::().await? { + // create a new file client from chunk read from file + let ReceiptFileClient { receipts, first_block, total_receipts: total_receipts_chunk } = + file_client; + + // mark these as decoded + total_decoded_receipts += total_receipts_chunk; + + info!(target: "reth::cli", + first_receipts_block=?first_block, + total_receipts_chunk, + "Importing receipt file chunk" + ); + + // We're reusing receipt writing code internal to + // `BundleStateWithReceipts::write_to_storage`, so we just use a default empty + // `BundleState`. + let bundled_state = + BundleStateWithReceipts::new(Default::default(), receipts, first_block); + + let static_file_producer = + static_file_provider.get_writer(first_block, StaticFileSegment::Receipts)?; + + // finally, write the receipts + bundled_state.write_to_storage::<::TXMut>( + &tx, + Some(static_file_producer), + OriginalValuesKnown::Yes, + )?; + } + + tx.commit()?; + // as static files works in file ranges, internally it will be committing when creating the + // next file range already, so we only need to call explicitly at the end. + static_file_provider.commit()?; + + if total_decoded_receipts == 0 { + error!(target: "reth::cli", "No receipts were imported, ensure the receipt file is valid and not empty"); + return Ok(()) + } + + // compare the highest static file block to the number of receipts we decoded + // + // `HeaderNumbers` and `TransactionHashNumbers` tables serve as additional indexes, but + // nothing like this needs to exist for Receipts. So `tx.entries::` would + // return zero here. + let total_imported_receipts = static_file_provider + .get_highest_static_file_block(StaticFileSegment::Receipts) + .expect("static files must exist after ensuring we decoded more than zero"); + + if total_imported_receipts != total_decoded_receipts as u64 { + error!(target: "reth::cli", + total_decoded_receipts, + total_imported_receipts, + "Receipts were partially imported" + ); + } + + info!(target: "reth::cli", total_imported_receipts, "Receipt file imported"); + + Ok(()) + } +} diff --git a/bin/reth/src/commands/init_cmd.rs b/bin/reth/src/commands/init_cmd.rs index 7a2988ebd..bdd8acb52 100644 --- a/bin/reth/src/commands/init_cmd.rs +++ b/bin/reth/src/commands/init_cmd.rs @@ -51,12 +51,12 @@ impl InitCommand { // add network name to data dir let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain); - let db_path = data_dir.db_path(); + let db_path = data_dir.db(); info!(target: "reth::cli", path = ?db_path, "Opening database"); let db = Arc::new(init_db(&db_path, self.db.database_args())?); info!(target: "reth::cli", "Database opened"); - let provider_factory = ProviderFactory::new(db, self.chain, data_dir.static_files_path())?; + let provider_factory = ProviderFactory::new(db, self.chain, data_dir.static_files())?; info!(target: "reth::cli", "Writing genesis block"); diff --git a/bin/reth/src/commands/init_state.rs b/bin/reth/src/commands/init_state.rs new file mode 100644 index 000000000..ef640e01c --- /dev/null +++ b/bin/reth/src/commands/init_state.rs @@ -0,0 +1,110 @@ +//! Command that initializes the node from a genesis file. + +use crate::{ + args::{ + utils::{chain_help, genesis_value_parser, SUPPORTED_CHAINS}, + DatabaseArgs, + }, + dirs::{DataDirPath, MaybePlatformPath}, +}; +use clap::Parser; +use reth_config::config::EtlConfig; +use reth_db::{database::Database, init_db}; +use reth_node_core::init::init_from_state_dump; +use reth_primitives::{ChainSpec, B256}; +use reth_provider::ProviderFactory; + +use std::{fs::File, io::BufReader, path::PathBuf, sync::Arc}; +use tracing::info; + +/// Initializes the database with the genesis block. +#[derive(Debug, Parser)] +pub struct InitStateCommand { + /// The path to the data dir for all reth files and subdirectories. + /// + /// Defaults to the OS-specific data directory: + /// + /// - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` + /// - Windows: `{FOLDERID_RoamingAppData}/reth/` + /// - macOS: `$HOME/Library/Application Support/reth/` + #[arg(long, value_name = "DATA_DIR", verbatim_doc_comment, default_value_t)] + datadir: MaybePlatformPath, + + /// The chain this node is running. + /// + /// Possible values are either a built-in chain or the path to a chain specification file. + #[arg( + long, + value_name = "CHAIN_OR_PATH", + long_help = chain_help(), + default_value = SUPPORTED_CHAINS[0], + value_parser = genesis_value_parser + )] + chain: Arc, + + /// JSONL file with state dump. + /// + /// Must contain accounts in following format, additional account fields are ignored. Must + /// also contain { "root": \ } as first line. + /// { + /// "balance": "\", + /// "nonce": \, + /// "code": "\", + /// "storage": { + /// "\": "\", + /// .. + /// }, + /// "address": "\", + /// } + /// + /// Allows init at a non-genesis block. Caution! Blocks must be manually imported up until + /// and including the non-genesis block to init chain at. See 'import' command. + #[arg(value_name = "STATE_DUMP_FILE", verbatim_doc_comment)] + state: PathBuf, + + #[command(flatten)] + db: DatabaseArgs, +} + +impl InitStateCommand { + /// Execute the `init` command + pub async fn execute(self) -> eyre::Result<()> { + info!(target: "reth::cli", "Reth init-state starting"); + + // add network name to data dir + let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain); + let db_path = data_dir.db(); + info!(target: "reth::cli", path = ?db_path, "Opening database"); + let db = Arc::new(init_db(&db_path, self.db.database_args())?); + info!(target: "reth::cli", "Database opened"); + + let provider_factory = ProviderFactory::new(db, self.chain, data_dir.static_files())?; + let etl_config = EtlConfig::new( + Some(EtlConfig::from_datadir(data_dir.data_dir())), + EtlConfig::default_file_size(), + ); + + info!(target: "reth::cli", "Initiating state dump"); + + let hash = init_at_state(self.state, provider_factory, etl_config)?; + + info!(target: "reth::cli", hash = ?hash, "Genesis block written"); + Ok(()) + } +} + +/// Initialize chain with state at specific block, from a file with state dump. +pub fn init_at_state( + state_dump_path: PathBuf, + factory: ProviderFactory, + etl_config: EtlConfig, +) -> eyre::Result { + info!(target: "reth::cli", + path=?state_dump_path, + "Opening state dump"); + + let file = File::open(state_dump_path)?; + let reader = BufReader::new(file); + + init_from_state_dump(reader, factory, etl_config) +} diff --git a/bin/reth/src/commands/mod.rs b/bin/reth/src/commands/mod.rs index 278531f71..9e6ff8f84 100644 --- a/bin/reth/src/commands/mod.rs +++ b/bin/reth/src/commands/mod.rs @@ -5,8 +5,11 @@ pub mod db; pub mod debug_cmd; pub mod dump_genesis; pub mod import; +pub mod import_op; +pub mod import_receipts; pub mod init_cmd; +pub mod init_state; pub mod node; pub mod p2p; diff --git a/bin/reth/src/commands/node/mod.rs b/bin/reth/src/commands/node/mod.rs index 349130486..9f2a4d67a 100644 --- a/bin/reth/src/commands/node/mod.rs +++ b/bin/reth/src/commands/node/mod.rs @@ -11,7 +11,7 @@ use crate::{ use clap::{value_parser, Args, Parser}; use reth_cli_runner::CliContext; use reth_db::{init_db, DatabaseEnv}; -use reth_node_builder::{InitState, NodeBuilder, WithLaunchContext}; +use reth_node_builder::{NodeBuilder, WithLaunchContext}; use reth_node_core::{node_config::NodeConfig, version}; use reth_primitives::ChainSpec; use std::{ffi::OsString, fmt, future::Future, net::SocketAddr, path::PathBuf, sync::Arc}; @@ -136,7 +136,7 @@ impl NodeCommand { /// closure. pub async fn execute(self, ctx: CliContext, launcher: L) -> eyre::Result<()> where - L: FnOnce(WithLaunchContext, InitState>, Ext) -> Fut, + L: FnOnce(WithLaunchContext>>, Ext) -> Fut, Fut: Future>, { tracing::info!(target: "reth::cli", version = ?version::SHORT_VERSION, "Starting reth"); @@ -180,7 +180,7 @@ impl NodeCommand { let _ = node_config.install_prometheus_recorder()?; let data_dir = datadir.unwrap_or_chain_default(node_config.chain.chain); - let db_path = data_dir.db_path(); + let db_path = data_dir.db(); tracing::info!(target: "reth::cli", path = ?db_path, "Opening database"); let database = Arc::new(init_db(db_path.clone(), self.db.database_args())?.with_metrics()); @@ -280,14 +280,14 @@ mod tests { NodeCommand::try_parse_args_from(["reth", "--config", "my/path/to/reth.toml"]).unwrap(); // always store reth.toml in the data dir, not the chain specific data dir let data_dir = cmd.datadir.unwrap_or_chain_default(cmd.chain.chain); - let config_path = cmd.config.unwrap_or_else(|| data_dir.config_path()); + let config_path = cmd.config.unwrap_or_else(|| data_dir.config()); assert_eq!(config_path, Path::new("my/path/to/reth.toml")); let cmd = NodeCommand::try_parse_args_from(["reth"]).unwrap(); // always store reth.toml in the data dir, not the chain specific data dir let data_dir = cmd.datadir.unwrap_or_chain_default(cmd.chain.chain); - let config_path = cmd.config.clone().unwrap_or_else(|| data_dir.config_path()); + let config_path = cmd.config.clone().unwrap_or_else(|| data_dir.config()); let end = format!("reth/{}/reth.toml", SUPPORTED_CHAINS[0]); assert!(config_path.ends_with(end), "{:?}", cmd.config); } @@ -296,14 +296,14 @@ mod tests { fn parse_db_path() { let cmd = NodeCommand::try_parse_args_from(["reth"]).unwrap(); let data_dir = cmd.datadir.unwrap_or_chain_default(cmd.chain.chain); - let db_path = data_dir.db_path(); + let db_path = data_dir.db(); let end = format!("reth/{}/db", SUPPORTED_CHAINS[0]); assert!(db_path.ends_with(end), "{:?}", cmd.config); let cmd = NodeCommand::try_parse_args_from(["reth", "--datadir", "my/custom/path"]).unwrap(); let data_dir = cmd.datadir.unwrap_or_chain_default(cmd.chain.chain); - let db_path = data_dir.db_path(); + let db_path = data_dir.db(); assert_eq!(db_path, Path::new("my/custom/path/db")); } diff --git a/bin/reth/src/commands/p2p/mod.rs b/bin/reth/src/commands/p2p/mod.rs index b67881e64..c3ad0231b 100644 --- a/bin/reth/src/commands/p2p/mod.rs +++ b/bin/reth/src/commands/p2p/mod.rs @@ -18,7 +18,11 @@ use reth_discv4::NatResolver; use reth_interfaces::p2p::bodies::client::BodiesClient; use reth_primitives::{BlockHashOrNumber, ChainSpec, NodeRecord}; use reth_provider::ProviderFactory; -use std::{net::SocketAddr, path::PathBuf, sync::Arc}; +use std::{ + net::{SocketAddrV4, SocketAddrV6}, + path::PathBuf, + sync::Arc, +}; /// `reth p2p` command #[derive(Debug, Parser)] @@ -105,7 +109,7 @@ impl Command { // add network name to data dir let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain); - let config_path = self.config.clone().unwrap_or_else(|| data_dir.config_path()); + let config_path = self.config.clone().unwrap_or_else(|| data_dir.config()); let mut config: Config = confy::load_path(&config_path).unwrap_or_default(); @@ -119,13 +123,14 @@ impl Command { config.peers.trusted_nodes_only = self.trusted_only; - let default_secret_key_path = data_dir.p2p_secret_path(); + let default_secret_key_path = data_dir.p2p_secret(); let secret_key_path = self.p2p_secret_key.clone().unwrap_or(default_secret_key_path); let p2p_secret_key = get_secret_key(&secret_key_path)?; let mut network_config_builder = config .network_config(self.nat, None, p2p_secret_key) .chain_spec(self.chain.clone()) + .disable_discv4_discovery_if(self.chain.chain.is_optimism()) .boot_nodes(self.chain.bootnodes().unwrap_or_default()); network_config_builder = self.discovery.apply_to_builder(network_config_builder); @@ -133,20 +138,37 @@ impl Command { let mut network_config = network_config_builder.build(Arc::new(ProviderFactory::new( noop_db, self.chain.clone(), - data_dir.static_files_path(), + data_dir.static_files(), )?)); - if self.discovery.enable_discv5_discovery { + if !self.discovery.disable_discovery && + (self.discovery.enable_discv5_discovery || + network_config.chain_spec.chain.is_optimism()) + { network_config = network_config.discovery_v5_with_config_builder(|builder| { - let DiscoveryArgs { discv5_addr, discv5_port, .. } = self.discovery; + let DiscoveryArgs { + discv5_addr: discv5_addr_ipv4, + discv5_addr_ipv6, + discv5_port: discv5_port_ipv4, + discv5_port_ipv6, + discv5_lookup_interval, + discv5_bootstrap_lookup_interval, + discv5_bootstrap_lookup_countdown, + .. + } = self.discovery; + builder .discv5_config( - discv5::ConfigBuilder::new(ListenConfig::from(Into::::into(( - discv5_addr, - discv5_port, - )))) + discv5::ConfigBuilder::new(ListenConfig::from_two_sockets( + discv5_addr_ipv4.map(|addr| SocketAddrV4::new(addr, discv5_port_ipv4)), + discv5_addr_ipv6 + .map(|addr| SocketAddrV6::new(addr, discv5_port_ipv6, 0, 0)), + )) .build(), ) + .lookup_interval(discv5_lookup_interval) + .bootstrap_lookup_interval(discv5_bootstrap_lookup_interval) + .bootstrap_lookup_countdown(discv5_bootstrap_lookup_countdown) .build() }); } diff --git a/bin/reth/src/commands/recover/storage_tries.rs b/bin/reth/src/commands/recover/storage_tries.rs index 7a1c2ccc2..025a170a0 100644 --- a/bin/reth/src/commands/recover/storage_tries.rs +++ b/bin/reth/src/commands/recover/storage_tries.rs @@ -50,11 +50,11 @@ impl Command { /// Execute `storage-tries` recovery command pub async fn execute(self, _ctx: CliContext) -> eyre::Result<()> { let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain); - let db_path = data_dir.db_path(); + let db_path = data_dir.db(); fs::create_dir_all(&db_path)?; let db = Arc::new(init_db(db_path, self.db.database_args())?); - let factory = ProviderFactory::new(&db, self.chain.clone(), data_dir.static_files_path())?; + let factory = ProviderFactory::new(&db, self.chain.clone(), data_dir.static_files())?; debug!(target: "reth::cli", chain=%self.chain.chain, genesis=?self.chain.genesis_hash(), "Initializing genesis"); init_genesis(factory.clone())?; diff --git a/bin/reth/src/commands/stage/drop.rs b/bin/reth/src/commands/stage/drop.rs index e79a4c33b..625a3f36b 100644 --- a/bin/reth/src/commands/stage/drop.rs +++ b/bin/reth/src/commands/stage/drop.rs @@ -15,7 +15,7 @@ use reth_node_core::init::{insert_genesis_header, insert_genesis_history, insert use reth_primitives::{ fs, stage::StageId, static_file::find_fixed_range, ChainSpec, StaticFileSegment, }; -use reth_provider::{providers::StaticFileWriter, ProviderFactory}; +use reth_provider::{providers::StaticFileWriter, ProviderFactory, StaticFileProviderFactory}; use std::sync::Arc; /// `reth drop-stage` command @@ -54,12 +54,12 @@ impl Command { pub async fn execute(self) -> eyre::Result<()> { // add network name to data dir let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain); - let db_path = data_dir.db_path(); + let db_path = data_dir.db(); fs::create_dir_all(&db_path)?; let db = open_db(db_path.as_ref(), self.db.database_args())?; let provider_factory = - ProviderFactory::new(db, self.chain.clone(), data_dir.static_files_path())?; + ProviderFactory::new(db, self.chain.clone(), data_dir.static_files())?; let static_file_provider = provider_factory.static_file_provider(); let tool = DbTool::new(provider_factory, self.chain.clone())?; diff --git a/bin/reth/src/commands/stage/dump/execution.rs b/bin/reth/src/commands/stage/dump/execution.rs index 7d2d8f0ba..d8f12b50a 100644 --- a/bin/reth/src/commands/stage/dump/execution.rs +++ b/bin/reth/src/commands/stage/dump/execution.rs @@ -1,15 +1,12 @@ use super::setup; -use crate::utils::DbTool; -use eyre::Result; +use crate::{macros::block_executor, utils::DbTool}; use reth_db::{ cursor::DbCursorRO, database::Database, table::TableImporter, tables, transaction::DbTx, DatabaseEnv, }; use reth_node_core::dirs::{ChainPath, DataDirPath}; -use reth_node_ethereum::EthEvmConfig; use reth_primitives::stage::StageCheckpoint; use reth_provider::{ChainSpecProvider, ProviderFactory}; -use reth_revm::EvmProcessorFactory; use reth_stages::{stages::ExecutionStage, Stage, UnwindInput}; use tracing::info; @@ -19,8 +16,8 @@ pub(crate) async fn dump_execution_stage( to: u64, output_datadir: ChainPath, should_run: bool, -) -> Result<()> { - let (output_db, tip_block_number) = setup(from, to, &output_datadir.db_path(), db_tool)?; +) -> eyre::Result<()> { + let (output_db, tip_block_number) = setup(from, to, &output_datadir.db(), db_tool)?; import_tables_with_range(&output_db, db_tool, from, to)?; @@ -28,11 +25,7 @@ pub(crate) async fn dump_execution_stage( if should_run { dry_run( - ProviderFactory::new( - output_db, - db_tool.chain.clone(), - output_datadir.static_files_path(), - )?, + ProviderFactory::new(output_db, db_tool.chain.clone(), output_datadir.static_files())?, to, from, ) @@ -131,10 +124,8 @@ async fn unwind_and_copy( ) -> eyre::Result<()> { let provider = db_tool.provider_factory.provider_rw()?; - let mut exec_stage = ExecutionStage::new_with_factory(EvmProcessorFactory::new( - db_tool.chain.clone(), - EthEvmConfig::default(), - )); + let executor = block_executor!(db_tool.chain.clone()); + let mut exec_stage = ExecutionStage::new_with_executor(executor); exec_stage.unwind( &provider, @@ -163,10 +154,8 @@ async fn dry_run( ) -> eyre::Result<()> { info!(target: "reth::cli", "Executing stage. [dry-run]"); - let mut exec_stage = ExecutionStage::new_with_factory(EvmProcessorFactory::new( - output_provider_factory.chain_spec(), - EthEvmConfig::default(), - )); + let executor = block_executor!(output_provider_factory.chain_spec()); + let mut exec_stage = ExecutionStage::new_with_executor(executor); let input = reth_stages::ExecInput { target: Some(to), checkpoint: Some(StageCheckpoint::new(from)) }; diff --git a/bin/reth/src/commands/stage/dump/hashing_account.rs b/bin/reth/src/commands/stage/dump/hashing_account.rs index 1888f0e30..2f28ba129 100644 --- a/bin/reth/src/commands/stage/dump/hashing_account.rs +++ b/bin/reth/src/commands/stage/dump/hashing_account.rs @@ -15,7 +15,7 @@ pub(crate) async fn dump_hashing_account_stage( output_datadir: ChainPath, should_run: bool, ) -> Result<()> { - let (output_db, tip_block_number) = setup(from, to, &output_datadir.db_path(), db_tool)?; + let (output_db, tip_block_number) = setup(from, to, &output_datadir.db(), db_tool)?; // Import relevant AccountChangeSets output_db.update(|tx| { @@ -30,11 +30,7 @@ pub(crate) async fn dump_hashing_account_stage( if should_run { dry_run( - ProviderFactory::new( - output_db, - db_tool.chain.clone(), - output_datadir.static_files_path(), - )?, + ProviderFactory::new(output_db, db_tool.chain.clone(), output_datadir.static_files())?, to, from, ) @@ -69,7 +65,7 @@ fn unwind_and_copy( Ok(()) } -/// Try to re-execute the stage straightaway +/// Try to re-execute the stage straight away async fn dry_run( output_provider_factory: ProviderFactory, to: u64, diff --git a/bin/reth/src/commands/stage/dump/hashing_storage.rs b/bin/reth/src/commands/stage/dump/hashing_storage.rs index 7f827b25c..7d38892dc 100644 --- a/bin/reth/src/commands/stage/dump/hashing_storage.rs +++ b/bin/reth/src/commands/stage/dump/hashing_storage.rs @@ -15,17 +15,13 @@ pub(crate) async fn dump_hashing_storage_stage( output_datadir: ChainPath, should_run: bool, ) -> Result<()> { - let (output_db, tip_block_number) = setup(from, to, &output_datadir.db_path(), db_tool)?; + let (output_db, tip_block_number) = setup(from, to, &output_datadir.db(), db_tool)?; unwind_and_copy(db_tool, from, tip_block_number, &output_db)?; if should_run { dry_run( - ProviderFactory::new( - output_db, - db_tool.chain.clone(), - output_datadir.static_files_path(), - )?, + ProviderFactory::new(output_db, db_tool.chain.clone(), output_datadir.static_files())?, to, from, ) @@ -65,7 +61,7 @@ fn unwind_and_copy( Ok(()) } -/// Try to re-execute the stage straightaway +/// Try to re-execute the stage straight away async fn dry_run( output_provider_factory: ProviderFactory, to: u64, diff --git a/bin/reth/src/commands/stage/dump/merkle.rs b/bin/reth/src/commands/stage/dump/merkle.rs index 08ac0a3aa..9b421be7c 100644 --- a/bin/reth/src/commands/stage/dump/merkle.rs +++ b/bin/reth/src/commands/stage/dump/merkle.rs @@ -1,11 +1,10 @@ use super::setup; -use crate::utils::DbTool; +use crate::{macros::block_executor, utils::DbTool}; use eyre::Result; use reth_config::config::EtlConfig; use reth_db::{database::Database, table::TableImporter, tables, DatabaseEnv}; use reth_exex::ExExManagerHandle; use reth_node_core::dirs::{ChainPath, DataDirPath}; -use reth_node_ethereum::EthEvmConfig; use reth_primitives::{stage::StageCheckpoint, BlockNumber, PruneModes}; use reth_provider::ProviderFactory; use reth_stages::{ @@ -24,7 +23,7 @@ pub(crate) async fn dump_merkle_stage( output_datadir: ChainPath, should_run: bool, ) -> Result<()> { - let (output_db, tip_block_number) = setup(from, to, &output_datadir.db_path(), db_tool)?; + let (output_db, tip_block_number) = setup(from, to, &output_datadir.db(), db_tool)?; output_db.update(|tx| { tx.import_table_with_range::( @@ -46,11 +45,7 @@ pub(crate) async fn dump_merkle_stage( if should_run { dry_run( - ProviderFactory::new( - output_db, - db_tool.chain.clone(), - output_datadir.static_files_path(), - )?, + ProviderFactory::new(output_db, db_tool.chain.clone(), output_datadir.static_files())?, to, from, ) @@ -85,9 +80,11 @@ async fn unwind_and_copy( MerkleStage::default_unwind().unwind(&provider, unwind)?; + let executor = block_executor!(db_tool.chain.clone()); + // Bring Plainstate to TO (hashing stage execution requires it) let mut exec_stage = ExecutionStage::new( - reth_revm::EvmProcessorFactory::new(db_tool.chain.clone(), EthEvmConfig::default()), + executor, ExecutionStageThresholds { max_blocks: Some(u64::MAX), max_changes: None, @@ -138,7 +135,7 @@ async fn unwind_and_copy( Ok(()) } -/// Try to re-execute the stage straightaway +/// Try to re-execute the stage straight away async fn dry_run( output_provider_factory: ProviderFactory, to: u64, diff --git a/bin/reth/src/commands/stage/dump/mod.rs b/bin/reth/src/commands/stage/dump/mod.rs index 4e1cace6e..fa4184356 100644 --- a/bin/reth/src/commands/stage/dump/mod.rs +++ b/bin/reth/src/commands/stage/dump/mod.rs @@ -102,11 +102,11 @@ impl Command { pub async fn execute(self) -> eyre::Result<()> { // add network name to data dir let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain); - let db_path = data_dir.db_path(); + let db_path = data_dir.db(); info!(target: "reth::cli", path = ?db_path, "Opening database"); let db = Arc::new(init_db(db_path, self.db.database_args())?); let provider_factory = - ProviderFactory::new(db, self.chain.clone(), data_dir.static_files_path())?; + ProviderFactory::new(db, self.chain.clone(), data_dir.static_files())?; info!(target: "reth::cli", "Database opened"); diff --git a/bin/reth/src/commands/stage/run.rs b/bin/reth/src/commands/stage/run.rs index 32550718f..59d26fc29 100644 --- a/bin/reth/src/commands/stage/run.rs +++ b/bin/reth/src/commands/stage/run.rs @@ -9,19 +9,21 @@ use crate::{ DatabaseArgs, NetworkArgs, StageEnum, }, dirs::{DataDirPath, MaybePlatformPath}, + macros::block_executor, prometheus_exporter, version::SHORT_VERSION, }; use clap::Parser; -use reth_beacon_consensus::BeaconConsensus; +use reth_beacon_consensus::EthBeaconConsensus; use reth_cli_runner::CliContext; use reth_config::{config::EtlConfig, Config}; use reth_db::init_db; use reth_downloaders::bodies::bodies::BodiesDownloaderBuilder; use reth_exex::ExExManagerHandle; -use reth_node_ethereum::EthEvmConfig; use reth_primitives::ChainSpec; -use reth_provider::{ProviderFactory, StageCheckpointReader, StageCheckpointWriter}; +use reth_provider::{ + ProviderFactory, StageCheckpointReader, StageCheckpointWriter, StaticFileProviderFactory, +}; use reth_stages::{ stages::{ AccountHashingStage, BodyStage, ExecutionStage, ExecutionStageThresholds, @@ -128,23 +130,20 @@ impl Command { // add network name to data dir let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain); - let config_path = self.config.clone().unwrap_or_else(|| data_dir.config_path()); + let config_path = self.config.clone().unwrap_or_else(|| data_dir.config()); let config: Config = confy::load_path(config_path).unwrap_or_default(); info!(target: "reth::cli", "reth {} starting stage {:?}", SHORT_VERSION, self.stage); // use the overridden db path if specified - let db_path = data_dir.db_path(); + let db_path = data_dir.db(); info!(target: "reth::cli", path = ?db_path, "Opening database"); let db = Arc::new(init_db(db_path, self.db.database_args())?); info!(target: "reth::cli", "Database opened"); - let factory = ProviderFactory::new( - Arc::clone(&db), - self.chain.clone(), - data_dir.static_files_path(), - )?; + let factory = + ProviderFactory::new(Arc::clone(&db), self.chain.clone(), data_dir.static_files())?; let mut provider_rw = factory.provider_rw()?; if let Some(listen_addr) = self.metrics { @@ -163,16 +162,14 @@ impl Command { let batch_size = self.batch_size.unwrap_or(self.to.saturating_sub(self.from) + 1); let etl_config = EtlConfig::new( - Some( - self.etl_dir.unwrap_or_else(|| EtlConfig::from_datadir(&data_dir.data_dir_path())), - ), + Some(self.etl_dir.unwrap_or_else(|| EtlConfig::from_datadir(data_dir.data_dir()))), self.etl_file_size.unwrap_or(EtlConfig::default_file_size()), ); let (mut exec_stage, mut unwind_stage): (Box>, Option>>) = match self.stage { StageEnum::Bodies => { - let consensus = Arc::new(BeaconConsensus::new(self.chain.clone())); + let consensus = Arc::new(EthBeaconConsensus::new(self.chain.clone())); let mut config = config; config.peers.trusted_nodes_only = self.network.trusted_only; @@ -186,15 +183,15 @@ impl Command { .network .p2p_secret_key .clone() - .unwrap_or_else(|| data_dir.p2p_secret_path()); + .unwrap_or_else(|| data_dir.p2p_secret()); let p2p_secret_key = get_secret_key(&network_secret_path)?; - let default_peers_path = data_dir.known_peers_path(); + let default_peers_path = data_dir.known_peers(); let provider_factory = Arc::new(ProviderFactory::new( db.clone(), self.chain.clone(), - data_dir.static_files_path(), + data_dir.static_files(), )?); let network = self @@ -227,13 +224,10 @@ impl Command { } StageEnum::Senders => (Box::new(SenderRecoveryStage::new(batch_size)), None), StageEnum::Execution => { - let factory = reth_revm::EvmProcessorFactory::new( - self.chain.clone(), - EthEvmConfig::default(), - ); + let executor = block_executor!(self.chain.clone()); ( Box::new(ExecutionStage::new( - factory, + executor, ExecutionStageThresholds { max_blocks: Some(batch_size), max_changes: None, diff --git a/bin/reth/src/commands/stage/unwind.rs b/bin/reth/src/commands/stage/unwind.rs index 7810a4416..1f0c7fc45 100644 --- a/bin/reth/src/commands/stage/unwind.rs +++ b/bin/reth/src/commands/stage/unwind.rs @@ -1,32 +1,18 @@ //! Unwinding a certain block range -use crate::{ - args::{ - utils::{chain_help, genesis_value_parser, SUPPORTED_CHAINS}, - DatabaseArgs, - }, - dirs::{DataDirPath, MaybePlatformPath}, -}; use clap::{Parser, Subcommand}; -use reth_beacon_consensus::BeaconConsensus; -use reth_config::{Config, PruneConfig}; +use reth_beacon_consensus::EthBeaconConsensus; +use reth_config::Config; +use reth_consensus::Consensus; use reth_db::{database::Database, open_db}; -use reth_downloaders::{ - bodies::bodies::BodiesDownloaderBuilder, - headers::reverse_headers::ReverseHeadersDownloaderBuilder, -}; +use reth_downloaders::{bodies::noop::NoopBodiesDownloader, headers::noop::NoopHeaderDownloader}; use reth_exex::ExExManagerHandle; -use reth_interfaces::consensus::Consensus; -use reth_node_core::{ - args::{get_secret_key, NetworkArgs}, - dirs::ChainPath, -}; -use reth_node_ethereum::EthEvmConfig; +use reth_node_core::args::NetworkArgs; use reth_primitives::{BlockHashOrNumber, ChainSpec, PruneModes, B256}; use reth_provider::{ BlockExecutionWriter, BlockNumReader, ChainSpecProvider, HeaderSyncMode, ProviderFactory, + StaticFileProviderFactory, }; -use reth_prune::PrunerBuilder; use reth_stages::{ sets::DefaultStages, stages::{ @@ -41,6 +27,15 @@ use std::{ops::RangeInclusive, sync::Arc}; use tokio::sync::watch; use tracing::info; +use crate::{ + args::{ + utils::{chain_help, genesis_value_parser, SUPPORTED_CHAINS}, + DatabaseArgs, + }, + dirs::{DataDirPath, MaybePlatformPath}, + macros::block_executor, +}; + /// `reth stage unwind` command #[derive(Debug, Parser)] pub struct Command { @@ -82,16 +77,16 @@ impl Command { pub async fn execute(self) -> eyre::Result<()> { // add network name to data dir let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain); - let db_path = data_dir.db_path(); + let db_path = data_dir.db(); if !db_path.exists() { eyre::bail!("Database {db_path:?} does not exist.") } - let config_path = data_dir.config_path(); + let config_path = data_dir.config(); let config: Config = confy::load_path(config_path).unwrap_or_default(); let db = Arc::new(open_db(db_path.as_ref(), self.db.database_args())?); let provider_factory = - ProviderFactory::new(db, self.chain.clone(), data_dir.static_files_path())?; + ProviderFactory::new(db, self.chain.clone(), data_dir.static_files())?; let range = self.command.unwind_range(provider_factory.clone())?; if *range.start() == 0 { @@ -108,18 +103,10 @@ impl Command { .filter(|highest_static_file_block| highest_static_file_block >= range.start()) { info!(target: "reth::cli", ?range, ?highest_static_block, "Executing a pipeline unwind."); - let mut pipeline = - self.build_pipeline(data_dir, config, provider_factory.clone()).await?; + let mut pipeline = self.build_pipeline(config, provider_factory.clone()).await?; // Move all applicable data from database to static files. - pipeline.produce_static_files()?; - - // Run the pruner so we don't potentially end up with higher height in the database vs - // static files. - let mut pruner = PrunerBuilder::new(PruneConfig::default()) - .prune_delete_limit(usize::MAX) - .build(provider_factory); - pruner.run(*range.end())?; + pipeline.move_to_static_files()?; pipeline.unwind((*range.start()).saturating_sub(1), None)?; } else { @@ -140,47 +127,15 @@ impl Command { async fn build_pipeline( self, - data_dir: ChainPath, config: Config, provider_factory: ProviderFactory>, ) -> Result>, eyre::Error> { - // Even though we are not planning to download anything, we need to initialize Body and - // Header stage with a network client - let network_secret_path = - self.network.p2p_secret_key.clone().unwrap_or_else(|| data_dir.p2p_secret_path()); - let p2p_secret_key = get_secret_key(&network_secret_path)?; - let default_peers_path = data_dir.known_peers_path(); - let network = self - .network - .network_config( - &config, - provider_factory.chain_spec(), - p2p_secret_key, - default_peers_path, - ) - .build(provider_factory.clone()) - .start_network() - .await?; - let consensus: Arc = - Arc::new(BeaconConsensus::new(provider_factory.chain_spec())); - - // building network downloaders using the fetch client - let fetch_client = network.fetch_client().await?; - let header_downloader = ReverseHeadersDownloaderBuilder::new(config.stages.headers) - .build(fetch_client.clone(), Arc::clone(&consensus)); - let body_downloader = BodiesDownloaderBuilder::new(config.stages.bodies).build( - fetch_client, - Arc::clone(&consensus), - provider_factory.clone(), - ); + Arc::new(EthBeaconConsensus::new(provider_factory.chain_spec())); let stage_conf = &config.stages; let (tip_tx, tip_rx) = watch::channel(B256::ZERO); - let factory = reth_revm::EvmProcessorFactory::new( - provider_factory.chain_spec(), - EthEvmConfig::default(), - ); + let executor = block_executor!(provider_factory.chain_spec()); let header_mode = HeaderSyncMode::Tip(tip_rx); let pipeline = Pipeline::builder() @@ -190,16 +145,16 @@ impl Command { provider_factory.clone(), header_mode, Arc::clone(&consensus), - header_downloader, - body_downloader, - factory.clone(), + NoopHeaderDownloader::default(), + NoopBodiesDownloader::default(), + executor.clone(), stage_conf.etl.clone(), ) .set(SenderRecoveryStage { commit_threshold: stage_conf.sender_recovery.commit_threshold, }) .set(ExecutionStage::new( - factory, + executor, ExecutionStageThresholds { max_blocks: None, max_changes: None, @@ -236,10 +191,12 @@ impl Command { /// `reth stage unwind` subcommand #[derive(Subcommand, Debug, Eq, PartialEq)] enum Subcommands { - /// Unwinds the database until the given block number (range is inclusive). + /// Unwinds the database from the latest block, until the given block number or hash has been + /// reached, that block is not included. #[command(name = "to-block")] ToBlock { target: BlockHashOrNumber }, - /// Unwinds the given number of blocks from the database. + /// Unwinds the database from the latest block, until the given number of blocks have been + /// reached. #[command(name = "num-blocks")] NumBlocks { amount: u64 }, } @@ -263,6 +220,9 @@ impl Subcommands { }, Subcommands::NumBlocks { amount } => last.saturating_sub(*amount), } + 1; + if target > last { + eyre::bail!("Target block number is higher than the latest block number") + } Ok(target..=last) } } diff --git a/bin/reth/src/commands/test_vectors/tables.rs b/bin/reth/src/commands/test_vectors/tables.rs index 6399c81ac..181ed0e3e 100644 --- a/bin/reth/src/commands/test_vectors/tables.rs +++ b/bin/reth/src/commands/test_vectors/tables.rs @@ -81,7 +81,7 @@ where { let mut rows = vec![]; let mut seen_keys = HashSet::new(); - let strat = proptest::collection::vec( + let strategy = proptest::collection::vec( any_with::>(( ::Parameters::default(), ::Parameters::default(), @@ -94,7 +94,7 @@ where while rows.len() < per_table { // Generate all `per_table` rows: (Key, Value) rows.extend( - &mut strat + &mut strategy .new_tree(runner) .map_err(|e| eyre::eyre!("{e}"))? .current() diff --git a/bin/reth/src/lib.rs b/bin/reth/src/lib.rs index 42f26115c..9dd43bcd2 100644 --- a/bin/reth/src/lib.rs +++ b/bin/reth/src/lib.rs @@ -31,6 +31,7 @@ pub mod cli; pub mod commands; +mod macros; pub mod utils; /// Re-exported payload related types diff --git a/bin/reth/src/macros.rs b/bin/reth/src/macros.rs new file mode 100644 index 000000000..7ff81a0f9 --- /dev/null +++ b/bin/reth/src/macros.rs @@ -0,0 +1,20 @@ +//! Helper macros + +/// Creates the block executor type based on the configured feature. +/// +/// Note(mattsse): This is incredibly horrible and will be replaced +#[cfg(not(feature = "optimism"))] +macro_rules! block_executor { + ($chain_spec:expr) => { + reth_node_ethereum::EthExecutorProvider::ethereum($chain_spec) + }; +} + +#[cfg(feature = "optimism")] +macro_rules! block_executor { + ($chain_spec:expr) => { + reth_node_optimism::OpExecutorProvider::optimism($chain_spec) + }; +} + +pub(crate) use block_executor; diff --git a/bin/reth/src/optimism.rs b/bin/reth/src/optimism.rs index 0c0a483dd..581718797 100644 --- a/bin/reth/src/optimism.rs +++ b/bin/reth/src/optimism.rs @@ -2,11 +2,7 @@ use clap::Parser; use reth::cli::Cli; -use reth_node_builder::NodeHandle; -use reth_node_optimism::{ - args::RollupArgs, rpc::SequencerClient, OptimismEngineTypes, OptimismNode, -}; -use reth_provider::BlockReaderIdExt; +use reth_node_optimism::{args::RollupArgs, rpc::SequencerClient, OptimismNode}; use std::sync::Arc; // We use jemalloc for performance reasons @@ -27,11 +23,11 @@ fn main() { } if let Err(err) = Cli::::parse().run(|builder, rollup_args| async move { - let NodeHandle { node, node_exit_future } = builder + let handle = builder .node(OptimismNode::new(rollup_args.clone())) .extend_rpc_modules(move |ctx| { // register sequencer tx forwarder - if let Some(sequencer_http) = rollup_args.sequencer_http.clone() { + if let Some(sequencer_http) = rollup_args.sequencer_http { ctx.registry.set_eth_raw_transaction_forwarder(Arc::new(SequencerClient::new( sequencer_http, ))); @@ -42,29 +38,7 @@ fn main() { .launch() .await?; - // If `enable_genesis_walkback` is set to true, the rollup client will need to - // perform the derivation pipeline from genesis, validating the data dir. - // When set to false, set the finalized, safe, and unsafe head block hashes - // on the rollup client using a fork choice update. This prevents the rollup - // client from performing the derivation pipeline from genesis, and instead - // starts syncing from the current tip in the DB. - if node.chain_spec().is_optimism() && !rollup_args.enable_genesis_walkback { - let client = node.rpc_server_handles.auth.http_client(); - if let Ok(Some(head)) = node.provider.latest_header() { - reth_rpc_api::EngineApiClient::::fork_choice_updated_v2( - &client, - reth_rpc_types::engine::ForkchoiceState { - head_block_hash: head.hash(), - safe_block_hash: head.hash(), - finalized_block_hash: head.hash(), - }, - None, - ) - .await?; - } - } - - node_exit_future.await + handle.node_exit_future.await }) { eprintln!("Error: {err:?}"); std::process::exit(1); diff --git a/bin/reth/src/utils.rs b/bin/reth/src/utils.rs index 5c56476a8..650fc9d70 100644 --- a/bin/reth/src/utils.rs +++ b/bin/reth/src/utils.rs @@ -132,7 +132,7 @@ impl DbTool { /// Drops the database and the static files at the given path. pub fn drop( - &mut self, + &self, db_path: impl AsRef, static_files_path: impl AsRef, ) -> Result<()> { @@ -149,7 +149,7 @@ impl DbTool { } /// Drops the provided table from the database. - pub fn drop_table(&mut self) -> Result<()> { + pub fn drop_table(&self) -> Result<()> { self.provider_factory.db_ref().update(|tx| tx.clear::())??; Ok(()) } diff --git a/book/SUMMARY.md b/book/SUMMARY.md index ffd5f67e0..fc6deb282 100644 --- a/book/SUMMARY.md +++ b/book/SUMMARY.md @@ -30,11 +30,13 @@ - [`reth`](./cli/reth.md) - [`reth node`](./cli/reth/node.md) - [`reth init`](./cli/reth/init.md) + - [`reth init-state`](./cli/reth/init-state.md) - [`reth import`](./cli/reth/import.md) - [`reth dump-genesis`](./cli/reth/dump-genesis.md) - [`reth db`](./cli/reth/db.md) - [`reth db stats`](./cli/reth/db/stats.md) - [`reth db list`](./cli/reth/db/list.md) + - [`reth db checksum`](./cli/reth/db/checksum.md) - [`reth db diff`](./cli/reth/db/diff.md) - [`reth db get`](./cli/reth/db/get.md) - [`reth db get mdbx`](./cli/reth/db/get/mdbx.md) diff --git a/book/cli/SUMMARY.md b/book/cli/SUMMARY.md index 07711434e..ee3d714b2 100644 --- a/book/cli/SUMMARY.md +++ b/book/cli/SUMMARY.md @@ -1,11 +1,13 @@ - [`reth`](./reth.md) - [`reth node`](./reth/node.md) - [`reth init`](./reth/init.md) + - [`reth init-state`](./reth/init-state.md) - [`reth import`](./reth/import.md) - [`reth dump-genesis`](./reth/dump-genesis.md) - [`reth db`](./reth/db.md) - [`reth db stats`](./reth/db/stats.md) - [`reth db list`](./reth/db/list.md) + - [`reth db checksum`](./reth/db/checksum.md) - [`reth db diff`](./reth/db/diff.md) - [`reth db get`](./reth/db/get.md) - [`reth db get mdbx`](./reth/db/get/mdbx.md) diff --git a/book/cli/reth.md b/book/cli/reth.md index f213a30f2..8b6f757c9 100644 --- a/book/cli/reth.md +++ b/book/cli/reth.md @@ -9,6 +9,7 @@ Usage: reth [OPTIONS] Commands: node Start the node init Initialize the database from a genesis file + init-state Initialize the database from a state dump file import This syncs RLP encoded blocks from a file dump-genesis Dumps genesis block JSON configuration to stdout db Database debugging utilities diff --git a/book/cli/reth/db.md b/book/cli/reth/db.md index 77137dadb..bd5989d7f 100644 --- a/book/cli/reth/db.md +++ b/book/cli/reth/db.md @@ -9,6 +9,7 @@ Usage: reth db [OPTIONS] Commands: stats Lists all the tables, their entry count and their size list Lists the contents of a table + checksum Calculates the content checksum of a table diff Create a diff between two database tables or two entire databases get Gets the content of a table for the given key drop Deletes all database entries diff --git a/book/cli/reth/db/checksum.md b/book/cli/reth/db/checksum.md new file mode 100644 index 000000000..6f080c74b --- /dev/null +++ b/book/cli/reth/db/checksum.md @@ -0,0 +1,124 @@ +# reth db checksum + +Calculates the content checksum of a table + +```bash +$ reth db checksum --help +Usage: reth db checksum [OPTIONS] + +Arguments: +
+ The table name + +Options: + --datadir + The path to the data dir for all reth files and subdirectories. + + Defaults to the OS-specific data directory: + + - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` + - Windows: `{FOLDERID_RoamingAppData}/reth/` + - macOS: `$HOME/Library/Application Support/reth/` + + [default: default] + + --chain + The chain this node is running. + Possible values are either a built-in chain or the path to a chain specification file. + + Built-in chains: + mainnet, sepolia, goerli, holesky, dev + + [default: mainnet] + + --instance + Add a new instance of a node. + + Configures the ports of the node to avoid conflicts with the defaults. This is useful for running multiple nodes on the same machine. + + Max number of instances is 200. It is chosen in a way so that it's not possible to have port numbers that conflict with each other. + + Changes to the following port numbers: - DISCOVERY_PORT: default + `instance` - 1 - AUTH_PORT: default + `instance` * 100 - 100 - HTTP_RPC_PORT: default - `instance` + 1 - WS_RPC_PORT: default + `instance` * 2 - 2 + + [default: 1] + + -h, --help + Print help (see a summary with '-h') + +Logging: + --log.stdout.format + The format to use for logs written to stdout + + [default: terminal] + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + --log.stdout.filter + The filter to use for logs written to stdout + + [default: ] + + --log.file.format + The format to use for logs written to the log file + + [default: terminal] + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + --log.file.filter + The filter to use for logs written to the log file + + [default: debug] + + --log.file.directory + The path to put log files in + + [default: /logs] + + --log.file.max-size + The maximum size (in MB) of one log file + + [default: 200] + + --log.file.max-files + The maximum amount of log files that will be stored. If set to 0, background file logging is disabled + + [default: 5] + + --log.journald + Write logs to journald + + --log.journald.filter + The filter to use for logs written to journald + + [default: error] + + --color + Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting + + [default: always] + + Possible values: + - always: Colors on + - auto: Colors on + - never: Colors off + +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output +``` \ No newline at end of file diff --git a/book/cli/reth/db/stats.md b/book/cli/reth/db/stats.md index dea5e3d05..437c10bd0 100644 --- a/book/cli/reth/db/stats.md +++ b/book/cli/reth/db/stats.md @@ -18,7 +18,7 @@ Options: [default: default] - --only-total-size + --detailed-sizes Show only the total size for static files --chain @@ -30,8 +30,15 @@ Options: [default: mainnet] - --summary - Show only the summary per static file segment + --detailed-segments + Show detailed information per static file segment + + --checksum + Show a checksum of each table in the database. + + WARNING: this option will take a long time to run, as it needs to traverse and hash the entire database. + + For individual table checksums, use the `reth db checksum` command. --instance Add a new instance of a node. diff --git a/book/cli/reth/import.md b/book/cli/reth/import.md index 382efb8ef..411527f9e 100644 --- a/book/cli/reth/import.md +++ b/book/cli/reth/import.md @@ -30,6 +30,12 @@ Options: [default: mainnet] + --no-state + Disables stages that require state. + + --chunk-len + Chunk byte length. + --instance Add a new instance of a node. diff --git a/book/cli/reth/init-state.md b/book/cli/reth/init-state.md new file mode 100644 index 000000000..0254a43f5 --- /dev/null +++ b/book/cli/reth/init-state.md @@ -0,0 +1,158 @@ +# reth init-state + +Initialize the database from a state dump file + +```bash +$ reth init-state --help +Usage: reth init-state [OPTIONS] + +Options: + --datadir + The path to the data dir for all reth files and subdirectories. + + Defaults to the OS-specific data directory: + + - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` + - Windows: `{FOLDERID_RoamingAppData}/reth/` + - macOS: `$HOME/Library/Application Support/reth/` + + [default: default] + + --chain + The chain this node is running. + Possible values are either a built-in chain or the path to a chain specification file. + + Built-in chains: + mainnet, sepolia, goerli, holesky, dev + + [default: mainnet] + + --state + JSONL file with state dump. + + Must contain accounts in following format, additional account fields are ignored. Can + also contain { "root": \ } as first line. + { + "balance": "\", + "nonce": \, + "code": "\", + "storage": { + "\": "\", + .. + }, + "address": "\", + } + + Allows init at a non-genesis block. Caution! Blocks must be manually imported up until + and including the non-genesis block to init chain at. See 'import' command. + + --instance + Add a new instance of a node. + + Configures the ports of the node to avoid conflicts with the defaults. This is useful for running multiple nodes on the same machine. + + Max number of instances is 200. It is chosen in a way so that it's not possible to have port numbers that conflict with each other. + + Changes to the following port numbers: - DISCOVERY_PORT: default + `instance` - 1 - AUTH_PORT: default + `instance` * 100 - 100 - HTTP_RPC_PORT: default - `instance` + 1 - WS_RPC_PORT: default + `instance` * 2 - 2 + + [default: 1] + + -h, --help + Print help (see a summary with '-h') + +Database: + --db.log-level + Database logging level. Levels higher than "notice" require a debug build + + Possible values: + - fatal: Enables logging for critical conditions, i.e. assertion failures + - error: Enables logging for error conditions + - warn: Enables logging for warning conditions + - notice: Enables logging for normal but significant condition + - verbose: Enables logging for verbose informational + - debug: Enables logging for debug-level messages + - trace: Enables logging for trace debug-level messages + - extra: Enables logging for extra debug-level messages + + --db.exclusive + Open environment in exclusive/monopolistic mode. Makes it possible to open a database on an NFS volume + + [possible values: true, false] + +Logging: + --log.stdout.format + The format to use for logs written to stdout + + [default: terminal] + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + --log.stdout.filter + The filter to use for logs written to stdout + + [default: ] + + --log.file.format + The format to use for logs written to the log file + + [default: terminal] + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + --log.file.filter + The filter to use for logs written to the log file + + [default: debug] + + --log.file.directory + The path to put log files in + + [default: /logs] + + --log.file.max-size + The maximum size (in MB) of one log file + + [default: 200] + + --log.file.max-files + The maximum amount of log files that will be stored. If set to 0, background file logging is disabled + + [default: 5] + + --log.journald + Write logs to journald + + --log.journald.filter + The filter to use for logs written to journald + + [default: error] + + --color + Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting + + [default: always] + + Possible values: + - always: Colors on + - auto: Colors on + - never: Colors off + +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output +``` \ No newline at end of file diff --git a/book/cli/reth/node.md b/book/cli/reth/node.md index dbfe7b1d4..edf0993d7 100644 --- a/book/cli/reth/node.md +++ b/book/cli/reth/node.md @@ -4,21 +4,18 @@ Start the node ```bash $ reth node --help - -Start the node - Usage: reth node [OPTIONS] Options: --datadir The path to the data dir for all reth files and subdirectories. - + Defaults to the OS-specific data directory: - + - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` - Windows: `{FOLDERID_RoamingAppData}/reth/` - macOS: `$HOME/Library/Application Support/reth/` - + [default: default] --config @@ -27,26 +24,26 @@ Options: --chain The chain this node is running. Possible values are either a built-in chain or the path to a chain specification file. - + Built-in chains: mainnet, sepolia, goerli, holesky, dev - + [default: mainnet] --instance Add a new instance of a node. - + Configures the ports of the node to avoid conflicts with the defaults. This is useful for running multiple nodes on the same machine. - + Max number of instances is 200. It is chosen in a way so that it's not possible to have port numbers that conflict with each other. - + Changes to the following port numbers: - DISCOVERY_PORT: default + `instance` - 1 - AUTH_PORT: default + `instance` * 100 - 100 - HTTP_RPC_PORT: default - `instance` + 1 - WS_RPC_PORT: default + `instance` * 2 - 2 - + [default: 1] --with-unused-ports Sets all ports to unused, allowing the OS to choose random unused ports when sockets are bound. - + Mutually exclusive with `--instance`. -h, --help @@ -55,7 +52,7 @@ Options: Metrics: --metrics Enable Prometheus metrics. - + The metrics will be served at the given interface and port. Networking: @@ -73,27 +70,42 @@ Networking: --discovery.addr The UDP address to use for devp2p peer discovery version 4 - + [default: 0.0.0.0] --discovery.port The UDP port to use for devp2p peer discovery version 4 - + [default: 30303] --discovery.v5.addr The UDP address to use for devp2p peer discovery version 5 - + [default: 0.0.0.0] --discovery.v5.port The UDP port to use for devp2p peer discovery version 5 - + [default: 9000] + --discovery.v5.lookup-interval + The interval in seconds at which to carry out periodic lookup queries, for the whole run of the program + + [default: 60] + + --discovery.v5.bootstrap.lookup-interval + The interval in seconds at which to carry out boost lookup queries, for a fixed number of times, at bootstrap + + [default: 5] + + --discovery.v5.bootstrap.lookup-countdown + The number of times to carry out boost lookup queries at bootstrap + + [default: 100] + --trusted-peers Comma separated enode URLs of trusted peers for P2P connections. - + --trusted-peers enode://abcd@192.168.0.1:30303 --trusted-only @@ -101,7 +113,7 @@ Networking: --bootnodes Comma separated enode URLs for P2P discovery bootstrap. - + Will fall back to a network-specific default if not specified. --peers-file @@ -110,12 +122,12 @@ Networking: --identity Custom node identity - - [default: reth/-/-gnu] + + [default: reth/-/] --p2p-secret-key Secret key to use for this node. - + This will also deterministically set the peer ID. If not specified, it will be set in the data dir for the chain being used. --no-persist-peers @@ -123,17 +135,17 @@ Networking: --nat NAT resolution method (any|none|upnp|publicip|extip:\) - + [default: any] --addr Network listening address - + [default: 0.0.0.0] --port Network listening port - + [default: 30303] --max-outbound-peers @@ -144,14 +156,14 @@ Networking: --pooled-tx-response-soft-limit Soft limit for the byte size of a `PooledTransactions` response on assembling a `GetPooledTransactions` request. Spec'd at 2 MiB. - + . - + [default: 2097152] --pooled-tx-pack-soft-limit Default soft limit for the byte size of a `PooledTransactions` response on assembling a `GetPooledTransactions` request. This defaults to less than the [`SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESPONSE`], at 2 MiB, used when assembling a `PooledTransactions` response. Default is 128 KiB - + [default: 131072] RPC: @@ -160,17 +172,17 @@ RPC: --http.addr Http server address to listen on - + [default: 127.0.0.1] --http.port Http server port to listen on - + [default: 8545] --http.api Rpc Modules to be configured for the HTTP server - + [possible values: admin, debug, eth, net, trace, txpool, web3, rpc, reth, ots, eth-call-bundle] --http.corsdomain @@ -181,12 +193,12 @@ RPC: --ws.addr Ws server address to listen on - + [default: 127.0.0.1] --ws.port Ws server port to listen on - + [default: 8546] --ws.origins @@ -194,7 +206,7 @@ RPC: --ws.api Rpc Modules to be configured for the WS server - + [possible values: admin, debug, eth, net, trace, txpool, web3, rpc, reth, ots, eth-call-bundle] --ipcdisable @@ -202,176 +214,176 @@ RPC: --ipcpath Filename for IPC socket/pipe within the datadir - - [default: /tmp/reth.ipc] + + [default: .ipc] --authrpc.addr Auth server address to listen on - + [default: 127.0.0.1] --authrpc.port Auth server port to listen on - + [default: 8551] --authrpc.jwtsecret Path to a JWT secret to use for the authenticated engine-API RPC server. - + This will enforce JWT authentication for all requests coming from the consensus layer. - + If no path is provided, a secret will be generated and stored in the datadir under `//jwt.hex`. For mainnet this would be `~/.reth/mainnet/jwt.hex` by default. --auth-ipc - Enable auth engine api over IPC + Enable auth engine API over IPC --auth-ipc.path Filename for auth IPC socket/pipe within the datadir - - [default: /tmp/reth_engine_api.ipc] + + [default: _engine_api.ipc] --rpc.jwtsecret Hex encoded JWT secret to authenticate the regular RPC server(s), see `--http.api` and `--ws.api`. - + This is __not__ used for the authenticated engine-API RPC server, see `--authrpc.jwtsecret`. --rpc.max-request-size Set the maximum RPC request payload size for both HTTP and WS in megabytes - + [default: 15] --rpc.max-response-size Set the maximum RPC response payload size for both HTTP and WS in megabytes - + [default: 160] [aliases: rpc.returndata.limit] --rpc.max-subscriptions-per-connection Set the maximum concurrent subscriptions per connection - + [default: 1024] --rpc.max-connections Maximum number of RPC server connections - + [default: 500] --rpc.max-tracing-requests Maximum number of concurrent tracing requests - - [default: 14] + + [default: 8] --rpc.max-blocks-per-filter Maximum number of blocks that could be scanned per filter request. (0 = entire chain) - + [default: 100000] --rpc.max-logs-per-response Maximum number of logs that can be returned in a single response. (0 = no limit) - + [default: 20000] --rpc.gascap Maximum gas limit for `eth_call` and call tracing RPC methods - + [default: 50000000] RPC State Cache: --rpc-cache.max-blocks Max number of blocks in cache - + [default: 5000] --rpc-cache.max-receipts Max number receipts in cache - + [default: 2000] --rpc-cache.max-envs Max number of bytes for cached env data - + [default: 1000] --rpc-cache.max-concurrent-db-requests Max number of concurrent database requests - + [default: 512] Gas Price Oracle: --gpo.blocks Number of recent blocks to check for gas price - + [default: 20] --gpo.ignoreprice Gas Price below which gpo will ignore transactions - + [default: 2] --gpo.maxprice Maximum transaction priority fee(or gasprice before London Fork) to be recommended by gpo - + [default: 500000000000] --gpo.percentile The percentile of gas prices to use for the estimate - + [default: 60] TxPool: --txpool.pending-max-count Max number of transaction in the pending sub-pool - + [default: 10000] --txpool.pending-max-size Max size of the pending sub-pool in megabytes - + [default: 20] --txpool.basefee-max-count Max number of transaction in the basefee sub-pool - + [default: 10000] --txpool.basefee-max-size Max size of the basefee sub-pool in megabytes - + [default: 20] --txpool.queued-max-count Max number of transaction in the queued sub-pool - + [default: 10000] --txpool.queued-max-size Max size of the queued sub-pool in megabytes - + [default: 20] - --txpool.max_account_slots + --txpool.max-account-slots Max number of executable transaction slots guaranteed per account - + [default: 16] --txpool.pricebump Price bump (in %) for the transaction pool underpriced check - + [default: 10] --blobpool.pricebump Price bump percentage to replace an already existing blob transaction - + [default: 100] --txpool.max-tx-input-bytes Max size in bytes of a single transaction allowed to enter the pool - + [default: 131072] --txpool.max-cached-entries The maximum number of blobs to keep in the in memory blob cache - + [default: 100] --txpool.nolocals @@ -386,33 +398,33 @@ TxPool: Builder: --builder.extradata Block extra data set by the payload builder - - [default: reth/v0.2.0-beta.5/linux] + + [default: reth//] --builder.gaslimit Target gas ceiling for built blocks - + [default: 30000000] --builder.interval The interval at which the job should build a new payload after the last (in seconds) - + [default: 1] --builder.deadline The deadline for when the payload builder job should resolve - + [default: 12] --builder.max-tasks Maximum number of tasks to spawn for building a payload - + [default: 3] Debug: --debug.continuous Prompt the downloader to download blocks one at a time. - + NOTE: This is for testing purposes only. --debug.terminate @@ -420,23 +432,17 @@ Debug: --debug.tip Set the chain tip manually for testing purposes. - + NOTE: This is a temporary flag --debug.max-block Runs the sync only up to the specified block - --debug.print-inspector - Print opcode level traces directly to console during execution + --debug.skip-fcu + If provided, the engine will skip `n` consecutive FCUs - --debug.hook-block - Hook on a specific block during execution - - --debug.hook-transaction - Hook on a specific transaction during execution - - --debug.hook-all - Hook on every transaction in a block + --debug.skip-new-payload + If provided, the engine will skip `n` consecutive new payloads --debug.engine-api-store The path to store engine API messages at. If specified, all of the intercepted engine API messages will be written to specified location @@ -457,13 +463,13 @@ Database: --db.exclusive Open environment in exclusive/monopolistic mode. Makes it possible to open a database on an NFS volume - + [possible values: true, false] Dev testnet: --dev Start the node in dev mode - + This mode uses a local proof-of-authority consensus engine with either fixed block times or automatically mined blocks. Disables network discovery and enables local http server. @@ -475,7 +481,7 @@ Dev testnet: --dev.block-time Interval between blocks. - + Parses strings using [humantime::parse_duration] --dev.block-time 12s @@ -486,7 +492,7 @@ Pruning: Logging: --log.stdout.format The format to use for logs written to stdout - + [default: terminal] Possible values: @@ -496,12 +502,12 @@ Logging: --log.stdout.filter The filter to use for logs written to stdout - + [default: ] --log.file.format The format to use for logs written to the log file - + [default: terminal] Possible values: @@ -511,22 +517,22 @@ Logging: --log.file.filter The filter to use for logs written to the log file - + [default: debug] --log.file.directory The path to put log files in - - [default: /root/.cache/reth/logs] + + [default: /logs] --log.file.max-size The maximum size (in MB) of one log file - + [default: 200] --log.file.max-files The maximum amount of log files that will be stored. If set to 0, background file logging is disabled - + [default: 5] --log.journald @@ -534,12 +540,12 @@ Logging: --log.journald.filter The filter to use for logs written to journald - + [default: error] --color Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting - + [default: always] Possible values: @@ -550,7 +556,7 @@ Logging: Display: -v, --verbosity... Set the minimum log level. - + -v Errors -vv Warnings -vvv Info @@ -559,4 +565,4 @@ Display: -q, --quiet Silence all log output -``` +``` \ No newline at end of file diff --git a/book/cli/reth/p2p.md b/book/cli/reth/p2p.md index 17cd396cf..6f1c1d3e6 100644 --- a/book/cli/reth/p2p.md +++ b/book/cli/reth/p2p.md @@ -49,24 +49,36 @@ Options: --disable-discv4-discovery Disable Discv4 discovery + --enable-discv5-discovery + Enable Discv5 discovery + --discovery.addr - The UDP address to use for P2P discovery/networking + The UDP address to use for devp2p peer discovery version 4 [default: 0.0.0.0] --discovery.port - The UDP port to use for P2P discovery/networking + The UDP port to use for devp2p peer discovery version 4 [default: 30303] - --trusted-peer - Target trusted peer + --discovery.v5.addr + The UDP address to use for devp2p peer discovery version 5 + + [default: 0.0.0.0] - --trusted-only - Connect only to trusted peers + --discovery.v5.port + The UDP port to use for devp2p peer discovery version 5 + + [default: 9000] - --retries - The number of retries per request + --discovery.v5.lookup-interval + The interval in seconds at which to carry out periodic lookup queries, for the whole run of the program + + [default: 60] + + --discovery.v5.bootstrap.lookup-interval + The interval in seconds at which to carry out boost lookup queries, for a fixed number of times, at bootstrap [default: 5] @@ -81,6 +93,22 @@ Options: [default: 1] + --discovery.v5.bootstrap.lookup-countdown + The number of times to carry out boost lookup queries at bootstrap + + [default: 100] + + --trusted-peer + Target trusted peer + + --trusted-only + Connect only to trusted peers + + --retries + The number of retries per request + + [default: 5] + --nat [default: any] diff --git a/book/cli/reth/stage/drop.md b/book/cli/reth/stage/drop.md index 2efe9ed78..2b647574c 100644 --- a/book/cli/reth/stage/drop.md +++ b/book/cli/reth/stage/drop.md @@ -68,8 +68,8 @@ Database: - execution: The execution stage within the pipeline - account-hashing: The account hashing stage within the pipeline - storage-hashing: The storage hashing stage within the pipeline - - hashing: The hashing stage within the pipeline - - merkle: The Merkle stage within the pipeline + - hashing: The account and storage hashing stages within the pipeline + - merkle: The merkle stage within the pipeline - tx-lookup: The transaction lookup stage within the pipeline - account-history: The account history stage within the pipeline - storage-history: The storage history stage within the pipeline diff --git a/book/cli/reth/stage/run.md b/book/cli/reth/stage/run.md index f20eb3f68..348f082c4 100644 --- a/book/cli/reth/stage/run.md +++ b/book/cli/reth/stage/run.md @@ -17,8 +17,8 @@ Arguments: - execution: The execution stage within the pipeline - account-hashing: The account hashing stage within the pipeline - storage-hashing: The storage hashing stage within the pipeline - - hashing: The hashing stage within the pipeline - - merkle: The Merkle stage within the pipeline + - hashing: The account and storage hashing stages within the pipeline + - merkle: The merkle stage within the pipeline - tx-lookup: The transaction lookup stage within the pipeline - account-history: The account history stage within the pipeline - storage-history: The storage history stage within the pipeline @@ -96,16 +96,44 @@ Networking: --disable-discv4-discovery Disable Discv4 discovery + --enable-discv5-discovery + Enable Discv5 discovery + --discovery.addr - The UDP address to use for P2P discovery/networking + The UDP address to use for devp2p peer discovery version 4 [default: 0.0.0.0] --discovery.port - The UDP port to use for P2P discovery/networking + The UDP port to use for devp2p peer discovery version 4 [default: 30303] + --discovery.v5.addr + The UDP address to use for devp2p peer discovery version 5 + + [default: 0.0.0.0] + + --discovery.v5.port + The UDP port to use for devp2p peer discovery version 5 + + [default: 9000] + + --discovery.v5.lookup-interval + The interval in seconds at which to carry out periodic lookup queries, for the whole run of the program + + [default: 60] + + --discovery.v5.bootstrap.lookup-interval + The interval in seconds at which to carry out boost lookup queries, for a fixed number of times, at bootstrap + + [default: 5] + + --discovery.v5.bootstrap.lookup-countdown + The number of times to carry out boost lookup queries at bootstrap + + [default: 100] + --trusted-peers Comma separated enode URLs of trusted peers for P2P connections. @@ -126,7 +154,7 @@ Networking: --identity Custom node identity - [default: reth/-/-gnu] + [default: reth/-/] --p2p-secret-key Secret key to use for this node. diff --git a/book/cli/reth/stage/unwind.md b/book/cli/reth/stage/unwind.md index 8479bca51..44968aede 100644 --- a/book/cli/reth/stage/unwind.md +++ b/book/cli/reth/stage/unwind.md @@ -7,8 +7,8 @@ $ reth stage unwind --help Usage: reth stage unwind [OPTIONS] Commands: - to-block Unwinds the database until the given block number (range is inclusive) - num-blocks Unwinds the given number of blocks from the database + to-block Unwinds the database from the latest block, until the given block number or hash has been reached, that block is not included + num-blocks Unwinds the database from the latest block, until the given number of blocks have been reached help Print this message or the help of the given subcommand(s) Options: @@ -65,6 +65,117 @@ Database: [possible values: true, false] +Networking: + -d, --disable-discovery + Disable the discovery service + + --disable-dns-discovery + Disable the DNS discovery + + --disable-discv4-discovery + Disable Discv4 discovery + + --enable-discv5-discovery + Enable Discv5 discovery + + --discovery.addr + The UDP address to use for devp2p peer discovery version 4 + + [default: 0.0.0.0] + + --discovery.port + The UDP port to use for devp2p peer discovery version 4 + + [default: 30303] + + --discovery.v5.addr + The UDP address to use for devp2p peer discovery version 5 + + [default: 0.0.0.0] + + --discovery.v5.port + The UDP port to use for devp2p peer discovery version 5 + + [default: 9000] + + --discovery.v5.lookup-interval + The interval in seconds at which to carry out periodic lookup queries, for the whole run of the program + + [default: 60] + + --discovery.v5.bootstrap.lookup-interval + The interval in seconds at which to carry out boost lookup queries, for a fixed number of times, at bootstrap + + [default: 5] + + --discovery.v5.bootstrap.lookup-countdown + The number of times to carry out boost lookup queries at bootstrap + + [default: 100] + + --trusted-peers + Comma separated enode URLs of trusted peers for P2P connections. + + --trusted-peers enode://abcd@192.168.0.1:30303 + + --trusted-only + Connect only to trusted peers + + --bootnodes + Comma separated enode URLs for P2P discovery bootstrap. + + Will fall back to a network-specific default if not specified. + + --peers-file + The path to the known peers file. Connected peers are dumped to this file on nodes + shutdown, and read on startup. Cannot be used with `--no-persist-peers`. + + --identity + Custom node identity + + [default: reth/-/] + + --p2p-secret-key + Secret key to use for this node. + + This will also deterministically set the peer ID. If not specified, it will be set in the data dir for the chain being used. + + --no-persist-peers + Do not persist peers. + + --nat + NAT resolution method (any|none|upnp|publicip|extip:\) + + [default: any] + + --addr + Network listening address + + [default: 0.0.0.0] + + --port + Network listening port + + [default: 30303] + + --max-outbound-peers + Maximum number of outbound requests. default: 100 + + --max-inbound-peers + Maximum number of inbound requests. default: 30 + + --pooled-tx-response-soft-limit + Soft limit for the byte size of a `PooledTransactions` response on assembling a `GetPooledTransactions` request. Spec'd at 2 MiB. + + . + + [default: 2097152] + + --pooled-tx-pack-soft-limit + Default soft limit for the byte size of a `PooledTransactions` response on assembling a `GetPooledTransactions` request. This defaults to less than the [`SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESPONSE`], at 2 MiB, used when assembling a `PooledTransactions` response. Default is 128 KiB + + [default: 131072] + Logging: --log.stdout.format The format to use for logs written to stdout diff --git a/book/cli/reth/stage/unwind/num-blocks.md b/book/cli/reth/stage/unwind/num-blocks.md index 9737bd4fa..24d2bc516 100644 --- a/book/cli/reth/stage/unwind/num-blocks.md +++ b/book/cli/reth/stage/unwind/num-blocks.md @@ -1,6 +1,6 @@ # reth stage unwind num-blocks -Unwinds the given number of blocks from the database +Unwinds the database from the latest block, until the given number of blocks have been reached ```bash $ reth stage unwind num-blocks --help diff --git a/book/cli/reth/stage/unwind/to-block.md b/book/cli/reth/stage/unwind/to-block.md index 74f8ec4b7..f8aa3bd6e 100644 --- a/book/cli/reth/stage/unwind/to-block.md +++ b/book/cli/reth/stage/unwind/to-block.md @@ -1,6 +1,6 @@ # reth stage unwind to-block -Unwinds the database until the given block number (range is inclusive) +Unwinds the database from the latest block, until the given block number or hash has been reached, that block is not included ```bash $ reth stage unwind to-block --help diff --git a/book/installation/installation.md b/book/installation/installation.md index 9ecf71cc5..edd8849af 100644 --- a/book/installation/installation.md +++ b/book/installation/installation.md @@ -8,6 +8,11 @@ There are three core methods to obtain Reth: * [Docker images](./docker.md) * [Building from source.](./source.md) +> **Note** +> +> If you have Docker installed, we recommend using the [Docker Compose](./docker.md#using-docker-compose) configuration +> that will get you Reth, Lighthouse (Consensus Client), Prometheus and Grafana running and syncing with just one command. + ## Hardware Requirements The hardware requirements for running Reth depend on the node configuration and can change over time as the network grows or new features are implemented. diff --git a/book/jsonrpc/intro.md b/book/jsonrpc/intro.md index 1c602f6d2..21ded5bcc 100644 --- a/book/jsonrpc/intro.md +++ b/book/jsonrpc/intro.md @@ -114,7 +114,7 @@ You can use `curl`, a programming language with a low-level library, or a tool l As a reminder, you need to run the command below to enable all of these APIs using an HTTP transport: ```bash -RUST_LOG=info reth node --http --http.api "admin,debug,eth,net,trace,txpool,web3,rpc" +reth node --http --http.api "admin,debug,eth,net,trace,txpool,web3,rpc" ``` This allows you to then call: diff --git a/book/run/mainnet.md b/book/run/mainnet.md index 67e70b9db..4412f51c7 100644 --- a/book/run/mainnet.md +++ b/book/run/mainnet.md @@ -20,12 +20,12 @@ First, ensure that you have Reth installed by following the [installation instru Now, to start the archive node, run: ```bash -RUST_LOG=info reth node +reth node ``` And to start the full node, run: ```bash -RUST_LOG=info reth node --full +reth node --full ``` On differences between archive and full nodes, see [Pruning & Full Node](./pruning.md#basic-concepts) section. @@ -39,7 +39,7 @@ You can override this path using the `--authrpc.jwtsecret` option. You MUST use So one might do: ```bash -RUST_LOG=info reth node \ +reth node \ --authrpc.jwtsecret /path/to/secret \ --authrpc.addr 127.0.0.1 \ --authrpc.port 8551 @@ -54,7 +54,7 @@ First, make sure you have Lighthouse installed. Sigma Prime provides excellent [ Assuming you have done that, run: ```bash -RUST_LOG=info lighthouse bn \ +lighthouse bn \ --checkpoint-sync-url https://mainnet.checkpoint.sigp.io \ --execution-endpoint http://localhost:8551 \ --execution-jwt /path/to/secret diff --git a/book/run/observability.md b/book/run/observability.md index 39d485e1f..9f0f1b852 100644 --- a/book/run/observability.md +++ b/book/run/observability.md @@ -3,7 +3,7 @@ Reth exposes a number of metrics, which are listed [here][metrics]. We can serve them from an HTTP endpoint by adding the `--metrics` flag: ```bash -RUST_LOG=info reth node --metrics 127.0.0.1:9001 +reth node --metrics 127.0.0.1:9001 ``` Now, as the node is running, you can `curl` the endpoint you provided to the `--metrics` flag to get a text dump of the metrics at that time: diff --git a/book/run/pruning.md b/book/run/pruning.md index 4e6966551..b6f23f544 100644 --- a/book/run/pruning.md +++ b/book/run/pruning.md @@ -39,7 +39,7 @@ To run Reth as a full node, follow the steps from the previous chapter on [how to run on mainnet or official testnets](./mainnet.md), and add a `--full` flag. For example: ```bash -RUST_LOG=info reth node \ +reth node \ --full \ --authrpc.jwtsecret /path/to/secret \ --authrpc.addr 127.0.0.1 \ diff --git a/crates/blockchain-tree/Cargo.toml b/crates/blockchain-tree/Cargo.toml index 3a6ab1439..70ce9a290 100644 --- a/crates/blockchain-tree/Cargo.toml +++ b/crates/blockchain-tree/Cargo.toml @@ -15,10 +15,13 @@ workspace = true reth-primitives.workspace = true reth-interfaces.workspace = true reth-db.workspace = true +reth-evm.workspace = true +reth-revm.workspace = true reth-provider.workspace = true reth-stages-api.workspace = true reth-trie = { workspace = true, features = ["metrics"] } reth-trie-parallel = { workspace = true, features = ["parallel"] } +reth-consensus.workspace = true # common parking_lot.workspace = true @@ -39,11 +42,12 @@ reth-db = { workspace = true, features = ["test-utils"] } reth-interfaces = { workspace = true, features = ["test-utils"] } reth-primitives = { workspace = true , features = ["test-utils"] } reth-provider = { workspace = true, features = ["test-utils"] } +reth-evm = { workspace = true, features = ["test-utils"] } reth-revm.workspace = true -reth-node-ethereum.workspace = true +reth-evm-ethereum.workspace = true parking_lot.workspace = true assert_matches.workspace = true [features] test-utils = [] -optimism = ["reth-primitives/optimism", "reth-interfaces/optimism", "reth-provider/optimism", "reth-revm/optimism"] +optimism = ["reth-primitives/optimism", "reth-interfaces/optimism", "reth-provider/optimism"] diff --git a/crates/blockchain-tree/src/block_buffer.rs b/crates/blockchain-tree/src/block_buffer.rs index 23c6ca681..14e896337 100644 --- a/crates/blockchain-tree/src/block_buffer.rs +++ b/crates/blockchain-tree/src/block_buffer.rs @@ -104,13 +104,13 @@ impl BlockBuffer { removed } - /// Discard all blocks that precede finalized block number from the buffer. - pub fn remove_old_blocks(&mut self, finalized_number: BlockNumber) { + /// Discard all blocks that precede block number from the buffer. + pub fn remove_old_blocks(&mut self, block_number: BlockNumber) { let mut block_hashes_to_remove = Vec::new(); // discard all blocks that are before the finalized number. while let Some(entry) = self.earliest_blocks.first_entry() { - if *entry.key() > finalized_number { + if *entry.key() > block_number { break } let block_hashes = entry.remove(); diff --git a/crates/blockchain-tree/src/block_indices.rs b/crates/blockchain-tree/src/block_indices.rs index a262148b9..373b419b3 100644 --- a/crates/blockchain-tree/src/block_indices.rs +++ b/crates/blockchain-tree/src/block_indices.rs @@ -1,6 +1,6 @@ //! Implementation of [`BlockIndices`] related to [`super::BlockchainTree`] -use super::state::BlockChainId; +use super::state::BlockchainId; use crate::canonical_chain::CanonicalChain; use linked_hash_set::LinkedHashSet; use reth_primitives::{BlockHash, BlockNumHash, BlockNumber, SealedBlockWithSenders}; @@ -39,7 +39,7 @@ pub struct BlockIndices { /// hashes. block_number_to_block_hashes: BTreeMap>, /// Block hashes and side chain they belong - blocks_to_chain: HashMap, + blocks_to_chain: HashMap, } impl BlockIndices { @@ -71,7 +71,7 @@ impl BlockIndices { } /// Return block to chain id - pub fn blocks_to_chain(&self) -> &HashMap { + pub fn blocks_to_chain(&self) -> &HashMap { &self.blocks_to_chain } @@ -119,14 +119,14 @@ impl BlockIndices { &mut self, block_number: BlockNumber, block_hash: BlockHash, - chain_id: BlockChainId, + chain_id: BlockchainId, ) { self.block_number_to_block_hashes.entry(block_number).or_default().insert(block_hash); self.blocks_to_chain.insert(block_hash, chain_id); } /// Insert block to chain and fork child indices of the new chain - pub(crate) fn insert_chain(&mut self, chain_id: BlockChainId, chain: &Chain) { + pub(crate) fn insert_chain(&mut self, chain_id: BlockchainId, chain: &Chain) { for (number, block) in chain.blocks().iter() { // add block -> chain_id index self.blocks_to_chain.insert(block.hash(), chain_id); @@ -139,7 +139,7 @@ impl BlockIndices { } /// Get the chain ID the block belongs to - pub(crate) fn get_blocks_chain_id(&self, block: &BlockHash) -> Option { + pub(crate) fn get_blocks_chain_id(&self, block: &BlockHash) -> Option { self.blocks_to_chain.get(block).cloned() } @@ -149,7 +149,7 @@ impl BlockIndices { pub(crate) fn update_block_hashes( &mut self, hashes: BTreeMap, - ) -> (BTreeSet, Vec) { + ) -> (BTreeSet, Vec) { // set new canonical hashes. self.canonical_chain.replace(hashes.clone()); @@ -218,7 +218,7 @@ impl BlockIndices { /// Remove chain from indices and return dependent chains that need to be removed. /// Does the cleaning of the tree and removing blocks from the chain. - pub fn remove_chain(&mut self, chain: &Chain) -> BTreeSet { + pub fn remove_chain(&mut self, chain: &Chain) -> BTreeSet { chain .blocks() .iter() @@ -234,7 +234,7 @@ impl BlockIndices { &mut self, block_number: BlockNumber, block_hash: BlockHash, - ) -> BTreeSet { + ) -> BTreeSet { // rm number -> block if let btree_map::Entry::Occupied(mut entry) = self.block_number_to_block_hashes.entry(block_number) @@ -327,7 +327,7 @@ impl BlockIndices { &mut self, finalized_block: BlockNumber, num_of_additional_canonical_hashes_to_retain: u64, - ) -> BTreeSet { + ) -> BTreeSet { // get finalized chains. blocks between [self.last_finalized,finalized_block). // Dont remove finalized_block, as sidechain can point to it. let finalized_blocks: Vec = self diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs index 02bae76bb..689994471 100644 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -2,29 +2,31 @@ use crate::{ metrics::{MakeCanonicalAction, MakeCanonicalDurationsRecorder, TreeMetrics}, - state::{BlockChainId, TreeState}, + state::{BlockchainId, TreeState}, AppendableChain, BlockIndices, BlockchainTreeConfig, BundleStateData, TreeExternals, }; +use reth_consensus::{Consensus, ConsensusError}; use reth_db::database::Database; +use reth_evm::execute::BlockExecutorProvider; use reth_interfaces::{ blockchain_tree::{ error::{BlockchainTreeError, CanonicalError, InsertBlockError, InsertBlockErrorKind}, BlockAttachment, BlockStatus, BlockValidationKind, CanonicalOutcome, InsertPayloadOk, }, - consensus::{Consensus, ConsensusError}, executor::{BlockExecutionError, BlockValidationError}, provider::RootMismatch, RethResult, }; use reth_primitives::{ BlockHash, BlockNumHash, BlockNumber, ForkBlock, GotExpected, Hardfork, PruneModes, Receipt, - SealedBlock, SealedBlockWithSenders, SealedHeader, U256, + SealedBlock, SealedBlockWithSenders, SealedHeader, StaticFileSegment, B256, U256, }; use reth_provider::{ chain::{ChainSplit, ChainSplitTarget}, BlockExecutionWriter, BlockNumReader, BlockWriter, BundleStateWithReceipts, CanonStateNotification, CanonStateNotificationSender, CanonStateNotifications, Chain, - ChainSpecProvider, DisplayBlocksChain, ExecutorFactory, HeaderProvider, ProviderError, + ChainSpecProvider, DisplayBlocksChain, HeaderProvider, ProviderError, + StaticFileProviderFactory, }; use reth_stages_api::{MetricEvent, MetricEventsSender}; use std::{ @@ -57,13 +59,13 @@ use tracing::{debug, error, info, instrument, trace, warn}; /// * [BlockchainTree::make_canonical]: Check if we have the hash of a block that is the current /// canonical head and commit it to db. #[derive(Debug)] -pub struct BlockchainTree { +pub struct BlockchainTree { /// The state of the tree /// /// Tracks all the chains, the block indices, and the block buffer. state: TreeState, /// External components (the database, consensus engine etc.) - externals: TreeExternals, + externals: TreeExternals, /// Tree configuration config: BlockchainTreeConfig, /// Broadcast channel for canon state changes notifications. @@ -75,7 +77,7 @@ pub struct BlockchainTree { prune_modes: Option, } -impl BlockchainTree { +impl BlockchainTree { /// Subscribe to new blocks events. /// /// Note: Only canonical blocks are emitted by the tree. @@ -89,10 +91,10 @@ impl BlockchainTree { } } -impl BlockchainTree +impl BlockchainTree where DB: Database + Clone, - EVM: ExecutorFactory, + E: BlockExecutorProvider, { /// Builds the blockchain tree for the node. /// @@ -115,7 +117,7 @@ where /// storage space efficiently. It's important to validate this configuration to ensure it does /// not lead to unintended data loss. pub fn new( - externals: TreeExternals, + externals: TreeExternals, config: BlockchainTreeConfig, prune_modes: Option, ) -> RethResult { @@ -156,6 +158,18 @@ where }) } + /// Replaces the canon state notification sender. + /// + /// Caution: this will close any existing subscriptions to the previous sender. + #[doc(hidden)] + pub fn with_canon_state_notification_sender( + mut self, + canon_state_notification_sender: CanonStateNotificationSender, + ) -> Self { + self.canon_state_notification_sender = canon_state_notification_sender; + self + } + /// Set the sync metric events sender. /// /// A transmitter for sending synchronization metrics. This is used for monitoring the node's @@ -441,7 +455,7 @@ where fn try_insert_block_into_side_chain( &mut self, block: SealedBlockWithSenders, - chain_id: BlockChainId, + chain_id: BlockchainId, block_validation_kind: BlockValidationKind, ) -> Result { let block_num_hash = block.num_hash(); @@ -514,7 +528,7 @@ where /// # Note /// /// This is not cached in order to save memory. - fn all_chain_hashes(&self, chain_id: BlockChainId) -> BTreeMap { + fn all_chain_hashes(&self, chain_id: BlockchainId) -> BTreeMap { let mut chain_id = chain_id; let mut hashes = BTreeMap::new(); loop { @@ -553,7 +567,7 @@ where /// the block on /// /// Returns `None` if the chain is unknown. - fn canonical_fork(&self, chain_id: BlockChainId) -> Option { + fn canonical_fork(&self, chain_id: BlockchainId) -> Option { let mut chain_id = chain_id; let mut fork; loop { @@ -572,13 +586,13 @@ where /// Insert a chain into the tree. /// /// Inserts a chain into the tree and builds the block indices. - fn insert_chain(&mut self, chain: AppendableChain) -> Option { + fn insert_chain(&mut self, chain: AppendableChain) -> Option { self.state.insert_chain(chain) } /// Iterate over all child chains that depend on this block and return /// their ids. - fn find_all_dependent_chains(&self, block: &BlockHash) -> HashSet { + fn find_all_dependent_chains(&self, block: &BlockHash) -> HashSet { // Find all forks of given block. let mut dependent_block = self.block_indices().fork_to_child().get(block).cloned().unwrap_or_default(); @@ -609,7 +623,7 @@ where /// This method searches for any chain that depended on this block being part of the canonical /// chain. Each dependent chain's state is then updated with state entries removed from the /// plain state during the unwind. - fn insert_unwound_chain(&mut self, chain: AppendableChain) -> Option { + fn insert_unwound_chain(&mut self, chain: AppendableChain) -> Option { // iterate over all blocks in chain and find any fork blocks that are in tree. for (number, block) in chain.blocks().iter() { let hash = block.hash(); @@ -770,6 +784,11 @@ where Ok(InsertPayloadOk::Inserted(status)) } + /// Discard all blocks that precede block number from the buffer. + pub fn remove_old_blocks(&mut self, block: BlockNumber) { + self.state.buffered_blocks.remove_old_blocks(block); + } + /// Finalize blocks up until and including `finalized_block`, and remove them from the tree. pub fn finalize_block(&mut self, finalized_block: BlockNumber) { // remove blocks @@ -784,7 +803,7 @@ where } } // clean block buffer. - self.state.buffered_blocks.remove_old_blocks(finalized_block); + self.remove_old_blocks(finalized_block); } /// Reads the last `N` canonical hashes from the database and updates the block indices of the @@ -804,6 +823,16 @@ where ) -> RethResult<()> { self.finalize_block(last_finalized_block); + let last_canonical_hashes = self.update_block_hashes()?; + + self.connect_buffered_blocks_to_hashes(last_canonical_hashes)?; + + Ok(()) + } + + /// Update all block hashes. iterate over present and new list of canonical hashes and compare + /// them. Remove all mismatches, disconnect them and removes all chains. + pub fn update_block_hashes(&mut self) -> RethResult> { let last_canonical_hashes = self .externals .fetch_latest_canonical_hashes(self.config.num_of_canonical_hashes() as usize)?; @@ -818,9 +847,22 @@ where } } - self.connect_buffered_blocks_to_hashes(last_canonical_hashes)?; + Ok(last_canonical_hashes) + } - Ok(()) + /// Update all block hashes. iterate over present and new list of canonical hashes and compare + /// them. Remove all mismatches, disconnect them, removes all chains and clears all buffered + /// blocks before the tip. + pub fn update_block_hashes_and_clear_buffered( + &mut self, + ) -> RethResult> { + let chain = self.update_block_hashes()?; + + if let Some((block, _)) = chain.last_key_value() { + self.remove_old_blocks(*block); + } + + Ok(chain) } /// Reads the last `N` canonical hashes from the database and updates the block indices of the @@ -893,7 +935,7 @@ where /// The pending part of the chain is reinserted back into the tree with the same `chain_id`. fn remove_and_split_chain( &mut self, - chain_id: BlockChainId, + chain_id: BlockchainId, split_at: ChainSplitTarget, ) -> Option { let chain = self.state.chains.remove(&chain_id)?; @@ -1204,9 +1246,31 @@ where /// /// The block, `revert_until`, is __non-inclusive__, i.e. `revert_until` stays in the database. fn revert_canonical_from_database( - &mut self, + &self, revert_until: BlockNumber, ) -> Result, CanonicalError> { + // This should only happen when an optimistic sync target was re-orged. + // + // Static files generally contain finalized data. The blockchain tree only deals + // with unfinalized data. The only scenario where canonical reverts go past the highest + // static file is when an optimistic sync occured and unfinalized data was written to + // static files. + if self + .externals + .provider_factory + .static_file_provider() + .get_highest_static_file_block(StaticFileSegment::Headers) + .unwrap_or_default() > + revert_until + { + trace!( + target: "blockchain_tree", + "Reverting optimistic canonical chain to block {}", + revert_until + ); + return Err(CanonicalError::OptimisticTargetRevert(revert_until)) + } + // read data that is needed for new sidechain let provider_rw = self.externals.provider_factory.provider_rw()?; @@ -1227,7 +1291,7 @@ where } } - fn update_reorg_metrics(&mut self, reorg_depth: f64) { + fn update_reorg_metrics(&self, reorg_depth: f64) { self.metrics.reorgs.increment(1); self.metrics.latest_reorg_depth.set(reorg_depth); } @@ -1259,9 +1323,10 @@ mod tests { use super::*; use assert_matches::assert_matches; use linked_hash_set::LinkedHashSet; + use reth_consensus::test_utils::TestConsensus; use reth_db::{tables, test_utils::TempDatabase, transaction::DbTxMut, DatabaseEnv}; - use reth_interfaces::test_utils::TestConsensus; - use reth_node_ethereum::EthEvmConfig; + use reth_evm::test_utils::MockExecutorProvider; + use reth_evm_ethereum::execute::EthExecutorProvider; #[cfg(not(feature = "optimism"))] use reth_primitives::proofs::calculate_receipt_root; #[cfg(feature = "optimism")] @@ -1273,23 +1338,19 @@ mod tests { revm_primitives::AccountInfo, stage::StageCheckpoint, Account, Address, ChainSpecBuilder, Genesis, GenesisAccount, Header, Signature, - Transaction, TransactionKind, TransactionSigned, TransactionSignedEcRecovered, TxEip1559, - Withdrawals, B256, MAINNET, + Transaction, TransactionSigned, TransactionSignedEcRecovered, TxEip1559, Withdrawals, B256, + MAINNET, }; use reth_provider::{ - test_utils::{ - blocks::BlockChainTestData, create_test_provider_factory_with_chain_spec, - TestExecutorFactory, - }, + test_utils::{blocks::BlockchainTestData, create_test_provider_factory_with_chain_spec}, ProviderFactory, }; - use reth_revm::EvmProcessorFactory; use reth_trie::StateRoot; use std::collections::HashMap; fn setup_externals( exec_res: Vec, - ) -> TreeExternals>, TestExecutorFactory> { + ) -> TreeExternals>, MockExecutorProvider> { let chain_spec = Arc::new( ChainSpecBuilder::default() .chain(MAINNET.chain) @@ -1299,7 +1360,7 @@ mod tests { ); let provider_factory = create_test_provider_factory_with_chain_spec(chain_spec); let consensus = Arc::new(TestConsensus::default()); - let executor_factory = TestExecutorFactory::default(); + let executor_factory = MockExecutorProvider::default(); executor_factory.extend(exec_res); TreeExternals::new(provider_factory, consensus, executor_factory) @@ -1339,7 +1400,7 @@ mod tests { /// Number of chains chain_num: Option, /// Check block to chain index - block_to_chain: Option>, + block_to_chain: Option>, /// Check fork to child index fork_to_child: Option>>, /// Pending blocks @@ -1354,7 +1415,7 @@ mod tests { self } - fn with_block_to_chain(mut self, block_to_chain: HashMap) -> Self { + fn with_block_to_chain(mut self, block_to_chain: HashMap) -> Self { self.block_to_chain = Some(block_to_chain); self } @@ -1383,7 +1444,7 @@ mod tests { self } - fn assert(self, tree: &BlockchainTree) { + fn assert(self, tree: &BlockchainTree) { if let Some(chain_num) = self.chain_num { assert_eq!(tree.state.chains.len(), chain_num); } @@ -1427,8 +1488,7 @@ mod tests { ); let provider_factory = create_test_provider_factory_with_chain_spec(chain_spec.clone()); let consensus = Arc::new(TestConsensus::default()); - let executor_factory = - EvmProcessorFactory::new(chain_spec.clone(), EthEvmConfig::default()); + let executor_provider = EthExecutorProvider::ethereum(chain_spec.clone()); { let provider_rw = provider_factory.provider_rw().unwrap(); @@ -1453,7 +1513,7 @@ mod tests { chain_id: chain_spec.chain.id(), nonce, gas_limit: 21_000, - to: TransactionKind::Call(Address::ZERO), + to: Address::ZERO.into(), max_fee_per_gas: EIP1559_INITIAL_BASE_FEE as u128, ..Default::default() }), @@ -1536,7 +1596,7 @@ mod tests { mock_block(3, Some(sidechain_block_1.hash()), Vec::from([mock_tx(2)]), 3); let mut tree = BlockchainTree::new( - TreeExternals::new(provider_factory, consensus, executor_factory), + TreeExternals::new(provider_factory, consensus, executor_provider), BlockchainTreeConfig::default(), None, ) @@ -1602,7 +1662,7 @@ mod tests { #[test] fn sidechain_block_hashes() { - let data = BlockChainTestData::default_from_number(11); + let data = BlockchainTestData::default_from_number(11); let (block1, exec1) = data.blocks[0].clone(); let (block2, exec2) = data.blocks[1].clone(); let (block3, exec3) = data.blocks[2].clone(); @@ -1678,7 +1738,7 @@ mod tests { #[test] fn cached_trie_updates() { - let data = BlockChainTestData::default_from_number(11); + let data = BlockchainTestData::default_from_number(11); let (block1, exec1) = data.blocks[0].clone(); let (block2, exec2) = data.blocks[1].clone(); let (block3, exec3) = data.blocks[2].clone(); @@ -1766,7 +1826,7 @@ mod tests { #[test] fn test_side_chain_fork() { - let data = BlockChainTestData::default_from_number(11); + let data = BlockchainTestData::default_from_number(11); let (block1, exec1) = data.blocks[0].clone(); let (block2, exec2) = data.blocks[1].clone(); let genesis = data.genesis; @@ -1864,7 +1924,7 @@ mod tests { #[test] fn sanity_path() { - let data = BlockChainTestData::default_from_number(11); + let data = BlockchainTestData::default_from_number(11); let (block1, exec1) = data.blocks[0].clone(); let (block2, exec2) = data.blocks[1].clone(); let genesis = data.genesis; @@ -2153,7 +2213,7 @@ mod tests { .assert(&tree); // unwind canonical - assert_eq!(tree.unwind(block1.number), Ok(())); + assert!(tree.unwind(block1.number).is_ok()); // Trie state: // b2 b2a (pending block) // / / @@ -2217,7 +2277,7 @@ mod tests { .assert(&tree); // update canonical block to b2, this would make b2a be removed - assert_eq!(tree.connect_buffered_blocks_to_canonical_hashes_and_finalize(12), Ok(())); + assert!(tree.connect_buffered_blocks_to_canonical_hashes_and_finalize(12).is_ok()); assert_eq!( tree.is_block_known(block2.num_hash()).unwrap(), diff --git a/crates/blockchain-tree/src/chain.rs b/crates/blockchain-tree/src/chain.rs index 2444cf24a..db4b4627a 100644 --- a/crates/blockchain-tree/src/chain.rs +++ b/crates/blockchain-tree/src/chain.rs @@ -5,23 +5,25 @@ use super::externals::TreeExternals; use crate::BundleStateDataRef; +use reth_consensus::{Consensus, ConsensusError}; use reth_db::database::Database; +use reth_evm::execute::{BlockExecutionOutput, BlockExecutorProvider, Executor}; use reth_interfaces::{ blockchain_tree::{ error::{BlockchainTreeError, InsertBlockErrorKind}, BlockAttachment, BlockValidationKind, }, - consensus::{Consensus, ConsensusError}, RethResult, }; use reth_primitives::{ - BlockHash, BlockNumber, ForkBlock, GotExpected, SealedBlockWithSenders, SealedHeader, U256, + BlockHash, BlockNumber, ForkBlock, GotExpected, Receipts, SealedBlockWithSenders, SealedHeader, + U256, }; use reth_provider::{ providers::{BundleStateProvider, ConsistentDbView}, - BundleStateDataProvider, BundleStateWithReceipts, Chain, ExecutorFactory, ProviderError, - StateRootProvider, + BundleStateDataProvider, BundleStateWithReceipts, Chain, ProviderError, StateRootProvider, }; +use reth_revm::database::StateProviderDatabase; use reth_trie::updates::TrieUpdates; use reth_trie_parallel::parallel_root::ParallelStateRoot; use std::{ @@ -66,18 +68,18 @@ impl AppendableChain { /// /// if [BlockValidationKind::Exhaustive] is specified, the method will verify the state root of /// the block. - pub fn new_canonical_fork( + pub fn new_canonical_fork( block: SealedBlockWithSenders, parent_header: &SealedHeader, canonical_block_hashes: &BTreeMap, canonical_fork: ForkBlock, - externals: &TreeExternals, + externals: &TreeExternals, block_attachment: BlockAttachment, block_validation_kind: BlockValidationKind, ) -> Result where DB: Database + Clone, - EF: ExecutorFactory, + E: BlockExecutorProvider, { let state = BundleStateWithReceipts::default(); let empty = BTreeMap::new(); @@ -104,20 +106,21 @@ impl AppendableChain { /// Create a new chain that forks off of an existing sidechain. /// /// This differs from [AppendableChain::new_canonical_fork] in that this starts a new fork. - pub(crate) fn new_chain_fork( + pub(crate) fn new_chain_fork( &self, block: SealedBlockWithSenders, side_chain_block_hashes: BTreeMap, canonical_block_hashes: &BTreeMap, canonical_fork: ForkBlock, - externals: &TreeExternals, + externals: &TreeExternals, block_validation_kind: BlockValidationKind, ) -> Result where DB: Database + Clone, - EF: ExecutorFactory, + E: BlockExecutorProvider, { - let parent_number = block.number - 1; + let parent_number = + block.number.checked_sub(1).ok_or(BlockchainTreeError::GenesisBlockHasNoParent)?; let parent = self.blocks().get(&parent_number).ok_or( BlockchainTreeError::BlockNumberNotFoundInChain { block_number: parent_number }, )?; @@ -166,18 +169,18 @@ impl AppendableChain { /// - [BlockAttachment] represents if the block extends the canonical chain, and thus we can /// cache the trie state updates. /// - [BlockValidationKind] determines if the state root __should__ be validated. - fn validate_and_execute( + fn validate_and_execute( block: SealedBlockWithSenders, parent_block: &SealedHeader, bundle_state_data_provider: BSDP, - externals: &TreeExternals, + externals: &TreeExternals, block_attachment: BlockAttachment, block_validation_kind: BlockValidationKind, ) -> RethResult<(BundleStateWithReceipts, Option)> where BSDP: BundleStateDataProvider, DB: Database + Clone, - EVM: ExecutorFactory, + E: BlockExecutorProvider, { // some checks are done before blocks comes here. externals.consensus.validate_header_against_parent(&block, parent_block)?; @@ -203,11 +206,17 @@ impl AppendableChain { let provider = BundleStateProvider::new(state_provider, bundle_state_data_provider); - let mut executor = externals.executor_factory.with_state(&provider); + let db = StateProviderDatabase::new(&provider); + let executor = externals.executor_factory.executor(db); let block_hash = block.hash(); let block = block.unseal(); - executor.execute_and_verify_receipt(&block, U256::MAX)?; - let bundle_state = executor.take_output_state(); + let state = executor.execute((&block, U256::MAX).into())?; + let BlockExecutionOutput { state, receipts, .. } = state; + let bundle_state = BundleStateWithReceipts::new( + state, + Receipts::from_block_receipt(receipts), + block.number, + ); // check state root if the block extends the canonical chain __and__ if state root // validation was requested. @@ -259,19 +268,19 @@ impl AppendableChain { /// __not__ the canonical head. #[track_caller] #[allow(clippy::too_many_arguments)] - pub(crate) fn append_block( + pub(crate) fn append_block( &mut self, block: SealedBlockWithSenders, side_chain_block_hashes: BTreeMap, canonical_block_hashes: &BTreeMap, - externals: &TreeExternals, + externals: &TreeExternals, canonical_fork: ForkBlock, block_attachment: BlockAttachment, block_validation_kind: BlockValidationKind, ) -> Result<(), InsertBlockErrorKind> where DB: Database + Clone, - EF: ExecutorFactory, + E: BlockExecutorProvider, { let parent_block = self.chain.tip(); diff --git a/crates/blockchain-tree/src/externals.rs b/crates/blockchain-tree/src/externals.rs index 5a288271e..a311281c9 100644 --- a/crates/blockchain-tree/src/externals.rs +++ b/crates/blockchain-tree/src/externals.rs @@ -1,11 +1,12 @@ //! Blockchain tree externals. +use reth_consensus::Consensus; use reth_db::{ cursor::DbCursorRO, database::Database, static_file::HeaderMask, tables, transaction::DbTx, }; -use reth_interfaces::{consensus::Consensus, RethResult}; +use reth_interfaces::RethResult; use reth_primitives::{BlockHash, BlockNumber, StaticFileSegment}; -use reth_provider::{ProviderFactory, StatsReader}; +use reth_provider::{ProviderFactory, StaticFileProviderFactory, StatsReader}; use std::{collections::BTreeMap, sync::Arc}; /// A container for external components. @@ -18,27 +19,27 @@ use std::{collections::BTreeMap, sync::Arc}; /// - The executor factory to execute blocks with /// - The chain spec #[derive(Debug)] -pub struct TreeExternals { +pub struct TreeExternals { /// The provider factory, used to commit the canonical chain, or unwind it. pub(crate) provider_factory: ProviderFactory, /// The consensus engine. pub(crate) consensus: Arc, /// The executor factory to execute blocks with. - pub(crate) executor_factory: EVM, + pub(crate) executor_factory: E, } -impl TreeExternals { +impl TreeExternals { /// Create new tree externals. pub fn new( provider_factory: ProviderFactory, consensus: Arc, - executor_factory: EVM, + executor_factory: E, ) -> Self { Self { provider_factory, consensus, executor_factory } } } -impl TreeExternals { +impl TreeExternals { /// Fetches the latest canonical block hashes by walking backwards from the head. /// /// Returns the hashes sorted by increasing block numbers diff --git a/crates/blockchain-tree/src/noop.rs b/crates/blockchain-tree/src/noop.rs index eff385fb6..776a15325 100644 --- a/crates/blockchain-tree/src/noop.rs +++ b/crates/blockchain-tree/src/noop.rs @@ -22,7 +22,19 @@ use std::collections::{BTreeMap, HashSet}; /// Caution: this is only intended for testing purposes, or for wiring components together. #[derive(Debug, Clone, Default)] #[non_exhaustive] -pub struct NoopBlockchainTree {} +pub struct NoopBlockchainTree { + /// Broadcast channel for canon state changes notifications. + pub canon_state_notification_sender: Option, +} + +impl NoopBlockchainTree { + /// Create a new NoopBlockchainTree with a canon state notification sender. + pub fn with_canon_state_notifications( + canon_state_notification_sender: CanonStateNotificationSender, + ) -> Self { + Self { canon_state_notification_sender: Some(canon_state_notification_sender) } + } +} impl BlockchainTreeEngine for NoopBlockchainTree { fn buffer_block(&self, _block: SealedBlockWithSenders) -> Result<(), InsertBlockError> { @@ -56,6 +68,12 @@ impl BlockchainTreeEngine for NoopBlockchainTree { fn make_canonical(&self, block_hash: BlockHash) -> Result { Err(BlockchainTreeError::BlockHashNotFoundInChain { block_hash }.into()) } + + fn update_block_hashes_and_clear_buffered( + &self, + ) -> RethResult> { + Ok(BTreeMap::new()) + } } impl BlockchainTreeViewer for NoopBlockchainTree { @@ -127,6 +145,9 @@ impl BlockchainTreePendingStateProvider for NoopBlockchainTree { impl CanonStateSubscriptions for NoopBlockchainTree { fn subscribe_to_canonical_state(&self) -> CanonStateNotifications { - CanonStateNotificationSender::new(1).subscribe() + self.canon_state_notification_sender + .as_ref() + .map(|sender| sender.subscribe()) + .unwrap_or_else(|| CanonStateNotificationSender::new(1).subscribe()) } } diff --git a/crates/blockchain-tree/src/shareable.rs b/crates/blockchain-tree/src/shareable.rs index 7a0eb36fa..77cc53c2d 100644 --- a/crates/blockchain-tree/src/shareable.rs +++ b/crates/blockchain-tree/src/shareable.rs @@ -3,6 +3,7 @@ use super::BlockchainTree; use parking_lot::RwLock; use reth_db::database::Database; +use reth_evm::execute::BlockExecutorProvider; use reth_interfaces::{ blockchain_tree::{ error::{CanonicalError, InsertBlockError}, @@ -17,7 +18,7 @@ use reth_primitives::{ }; use reth_provider::{ BlockchainTreePendingStateProvider, BundleStateDataProvider, CanonStateSubscriptions, - ExecutorFactory, ProviderError, + ProviderError, }; use std::{ collections::{BTreeMap, HashSet}, @@ -27,22 +28,22 @@ use tracing::trace; /// Shareable blockchain tree that is behind a RwLock #[derive(Clone, Debug)] -pub struct ShareableBlockchainTree { +pub struct ShareableBlockchainTree { /// BlockchainTree - pub tree: Arc>>, + pub tree: Arc>>, } -impl ShareableBlockchainTree { +impl ShareableBlockchainTree { /// Create a new shareable database. - pub fn new(tree: BlockchainTree) -> Self { + pub fn new(tree: BlockchainTree) -> Self { Self { tree: Arc::new(RwLock::new(tree)) } } } -impl BlockchainTreeEngine for ShareableBlockchainTree +impl BlockchainTreeEngine for ShareableBlockchainTree where DB: Database + Clone, - EF: ExecutorFactory, + E: BlockExecutorProvider, { fn buffer_block(&self, block: SealedBlockWithSenders) -> Result<(), InsertBlockError> { let mut tree = self.tree.write(); @@ -82,6 +83,15 @@ where res } + fn update_block_hashes_and_clear_buffered( + &self, + ) -> RethResult> { + let mut tree = self.tree.write(); + let res = tree.update_block_hashes_and_clear_buffered(); + tree.update_chains_metrics(); + res + } + fn connect_buffered_blocks_to_canonical_hashes(&self) -> RethResult<()> { trace!(target: "blockchain_tree", "Connecting buffered blocks to canonical hashes"); let mut tree = self.tree.write(); @@ -99,10 +109,10 @@ where } } -impl BlockchainTreeViewer for ShareableBlockchainTree +impl BlockchainTreeViewer for ShareableBlockchainTree where DB: Database + Clone, - EF: ExecutorFactory, + E: BlockExecutorProvider, { fn blocks(&self) -> BTreeMap> { trace!(target: "blockchain_tree", "Returning all blocks in blockchain tree"); @@ -181,10 +191,10 @@ where } } -impl BlockchainTreePendingStateProvider for ShareableBlockchainTree +impl BlockchainTreePendingStateProvider for ShareableBlockchainTree where DB: Database + Clone, - EF: ExecutorFactory, + E: BlockExecutorProvider, { fn find_pending_state_provider( &self, @@ -196,10 +206,10 @@ where } } -impl CanonStateSubscriptions for ShareableBlockchainTree +impl CanonStateSubscriptions for ShareableBlockchainTree where DB: Send + Sync, - EF: Send + Sync, + E: Send + Sync, { fn subscribe_to_canonical_state(&self) -> reth_provider::CanonStateNotifications { trace!(target: "blockchain_tree", "Registered subscriber for canonical state"); diff --git a/crates/blockchain-tree/src/state.rs b/crates/blockchain-tree/src/state.rs index f741df8ec..5013be8c1 100644 --- a/crates/blockchain-tree/src/state.rs +++ b/crates/blockchain-tree/src/state.rs @@ -10,7 +10,7 @@ pub(crate) struct TreeState { /// Keeps track of new unique identifiers for chains block_chain_id_generator: u64, /// The tracked chains and their current data. - pub(crate) chains: HashMap, + pub(crate) chains: HashMap, /// Indices to block and their connection to the canonical chain. /// /// This gets modified by the tree itself and is read from engine API/RPC to access the pending @@ -41,10 +41,10 @@ impl TreeState { /// Issues a new unique identifier for a new chain. #[inline] - fn next_id(&mut self) -> BlockChainId { + fn next_id(&mut self) -> BlockchainId { let id = self.block_chain_id_generator; self.block_chain_id_generator += 1; - BlockChainId(id) + BlockchainId(id) } /// Expose internal indices of the BlockchainTree. @@ -85,7 +85,7 @@ impl TreeState { /// Insert a chain into the tree. /// /// Inserts a chain into the tree and builds the block indices. - pub(crate) fn insert_chain(&mut self, chain: AppendableChain) -> Option { + pub(crate) fn insert_chain(&mut self, chain: AppendableChain) -> Option { if chain.is_empty() { return None } @@ -113,17 +113,17 @@ impl TreeState { /// The ID of a sidechain internally in a [`BlockchainTree`][super::BlockchainTree]. #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Ord, PartialOrd)] -pub struct BlockChainId(u64); +pub struct BlockchainId(u64); -impl From for u64 { - fn from(value: BlockChainId) -> Self { +impl From for u64 { + fn from(value: BlockchainId) -> Self { value.0 } } #[cfg(test)] -impl From for BlockChainId { +impl From for BlockchainId { fn from(value: u64) -> Self { - BlockChainId(value) + BlockchainId(value) } } diff --git a/crates/cli/runner/Cargo.toml b/crates/cli/runner/Cargo.toml index 697621cee..3182b738b 100644 --- a/crates/cli/runner/Cargo.toml +++ b/crates/cli/runner/Cargo.toml @@ -15,7 +15,6 @@ workspace = true reth-tasks.workspace = true # async -futures.workspace = true tokio = { workspace = true, features = ["macros", "rt-multi-thread", "signal"] } # misc diff --git a/crates/cli/runner/src/lib.rs b/crates/cli/runner/src/lib.rs index 31a1356c6..94536d0cb 100644 --- a/crates/cli/runner/src/lib.rs +++ b/crates/cli/runner/src/lib.rs @@ -10,9 +10,8 @@ //! Entrypoint for running commands. -use futures::pin_mut; use reth_tasks::{TaskExecutor, TaskManager}; -use std::future::Future; +use std::{future::Future, pin::pin}; use tracing::{debug, error, trace}; /// Executes CLI commands. @@ -141,7 +140,7 @@ where E: Send + Sync + From + 'static, { { - pin_mut!(fut); + let fut = pin!(fut); tokio::select! { err = tasks => { return Err(err.into()) @@ -166,7 +165,9 @@ where { let mut stream = tokio::signal::unix::signal(tokio::signal::unix::SignalKind::terminate())?; let sigterm = stream.recv(); - pin_mut!(sigterm, ctrl_c, fut); + let sigterm = pin!(sigterm); + let ctrl_c = pin!(ctrl_c); + let fut = pin!(fut); tokio::select! { _ = ctrl_c => { @@ -181,7 +182,8 @@ where #[cfg(not(unix))] { - pin_mut!(ctrl_c, fut); + let ctrl_c = pin!(ctrl_c); + let fut = pin!(fut); tokio::select! { _ = ctrl_c => { diff --git a/crates/config/Cargo.toml b/crates/config/Cargo.toml index ece3fa0bb..d9147d7b7 100644 --- a/crates/config/Cargo.toml +++ b/crates/config/Cargo.toml @@ -24,7 +24,9 @@ humantime-serde.workspace = true # crypto secp256k1 = { workspace = true, features = ["global-context", "rand-std", "recovery"] } -[dev-dependencies] +# toml confy.workspace = true + +[dev-dependencies] tempfile.workspace = true toml.workspace = true diff --git a/crates/config/src/config.rs b/crates/config/src/config.rs index 7ce947b50..f6537a04c 100644 --- a/crates/config/src/config.rs +++ b/crates/config/src/config.rs @@ -6,10 +6,13 @@ use reth_primitives::PruneModes; use secp256k1::SecretKey; use serde::{Deserialize, Deserializer, Serialize}; use std::{ + ffi::OsStr, path::{Path, PathBuf}, time::Duration, }; +const EXTENSION: &str = "toml"; + /// Configuration for the reth node. #[derive(Debug, Clone, Default, Deserialize, PartialEq, Eq, Serialize)] #[serde(default)] @@ -47,6 +50,22 @@ impl Config { .peer_config(peer_config) .discovery(discv4) } + + /// Save the configuration to toml file. + pub fn save(&self, path: &Path) -> Result<(), std::io::Error> { + if path.extension() != Some(OsStr::new(EXTENSION)) { + return Err(std::io::Error::new( + std::io::ErrorKind::InvalidInput, + format!("reth config file extension must be '{EXTENSION}'"), + )); + } + confy::store_path(path, self).map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e)) + } + + /// Sets the pruning configuration. + pub fn update_prune_confing(&mut self, prune_config: PruneConfig) { + self.prune = Some(prune_config); + } } /// Configuration for each stage in the pipeline. @@ -325,11 +344,9 @@ where #[cfg(test)] mod tests { - use super::Config; + use super::{Config, EXTENSION}; use std::time::Duration; - const EXTENSION: &str = "toml"; - fn with_tempdir(filename: &str, proc: fn(&std::path::Path)) { let temp_dir = tempfile::tempdir().unwrap(); let config_path = temp_dir.path().join(filename).with_extension(EXTENSION); @@ -347,6 +364,14 @@ mod tests { }) } + #[test] + fn test_store_config_method() { + with_tempdir("config-store-test-method", |config_path| { + let config = Config::default(); + config.save(config_path).expect("Failed to store config"); + }) + } + #[test] fn test_load_config() { with_tempdir("config-load-test", |config_path| { diff --git a/crates/consensus/auto-seal/Cargo.toml b/crates/consensus/auto-seal/Cargo.toml index 5fbf4f07a..435ade53d 100644 --- a/crates/consensus/auto-seal/Cargo.toml +++ b/crates/consensus/auto-seal/Cargo.toml @@ -22,6 +22,9 @@ reth-revm.workspace = true reth-transaction-pool.workspace = true reth-evm.workspace = true reth-engine-primitives.workspace = true +reth-consensus.workspace = true +reth-rpc-types.workspace = true +reth-network-types.workspace = true # async futures-util.workspace = true diff --git a/crates/consensus/auto-seal/src/client.rs b/crates/consensus/auto-seal/src/client.rs index 7ed69c289..67a84d5d9 100644 --- a/crates/consensus/auto-seal/src/client.rs +++ b/crates/consensus/auto-seal/src/client.rs @@ -7,9 +7,8 @@ use reth_interfaces::p2p::{ headers::client::{HeadersClient, HeadersFut, HeadersRequest}, priority::Priority, }; -use reth_primitives::{ - BlockBody, BlockHashOrNumber, Header, HeadersDirection, PeerId, WithPeerId, B256, -}; +use reth_network_types::{PeerId, WithPeerId}; +use reth_primitives::{BlockBody, BlockHashOrNumber, Header, HeadersDirection, B256}; use std::fmt::Debug; use tracing::{trace, warn}; diff --git a/crates/consensus/auto-seal/src/lib.rs b/crates/consensus/auto-seal/src/lib.rs index 62a293664..e954108c8 100644 --- a/crates/consensus/auto-seal/src/lib.rs +++ b/crates/consensus/auto-seal/src/lib.rs @@ -16,27 +16,20 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] use reth_beacon_consensus::BeaconEngineMessage; +use reth_consensus::{Consensus, ConsensusError}; use reth_engine_primitives::EngineTypes; -use reth_evm::ConfigureEvm; -use reth_interfaces::{ - consensus::{Consensus, ConsensusError}, - executor::{BlockExecutionError, BlockValidationError}, -}; +use reth_interfaces::executor::{BlockExecutionError, BlockValidationError}; use reth_primitives::{ - constants::{EMPTY_RECEIPTS, EMPTY_TRANSACTIONS, ETHEREUM_BLOCK_GAS_LIMIT}, + constants::{EMPTY_TRANSACTIONS, ETHEREUM_BLOCK_GAS_LIMIT}, eip4844::calculate_excess_blob_gas, - proofs, Block, BlockBody, BlockHash, BlockHashOrNumber, BlockNumber, BlockWithSenders, Bloom, - ChainSpec, Header, ReceiptWithBloom, SealedBlock, SealedHeader, TransactionSigned, B256, - EMPTY_OMMER_ROOT_HASH, U256, + proofs, Block, BlockBody, BlockHash, BlockHashOrNumber, BlockNumber, ChainSpec, Header, + Receipts, SealedBlock, SealedHeader, TransactionSigned, Withdrawals, B256, U256, }; use reth_provider::{ - BlockExecutor, BlockReaderIdExt, BundleStateWithReceipts, CanonStateNotificationSender, - StateProviderFactory, -}; -use reth_revm::{ - database::StateProviderDatabase, db::states::bundle_state::BundleRetention, - processor::EVMProcessor, State, + BlockReaderIdExt, BundleStateWithReceipts, CanonStateNotificationSender, StateProviderFactory, + StateRootProvider, }; +use reth_revm::database::StateProviderDatabase; use reth_transaction_pool::TransactionPool; use std::{ collections::HashMap, @@ -52,6 +45,7 @@ mod task; pub use crate::client::AutoSealClient; pub use mode::{FixedBlockTimeMiner, MiningMode, ReadyTransactionMiner}; +use reth_evm::execute::{BlockExecutionOutput, BlockExecutorProvider, Executor}; pub use task::MiningTask; /// A consensus implementation intended for local development and testing purposes. @@ -272,6 +266,8 @@ impl StorageInner { pub(crate) fn build_header_template( &self, transactions: &[TransactionSigned], + ommers: &[Header], + withdrawals: Option<&Withdrawals>, chain_spec: Arc, ) -> Header { let timestamp = SystemTime::now().duration_since(UNIX_EPOCH).unwrap_or_default().as_secs(); @@ -281,14 +277,26 @@ impl StorageInner { parent.next_block_base_fee(chain_spec.base_fee_params_at_timestamp(timestamp)) }); + let blob_gas_used = if chain_spec.is_cancun_active_at_timestamp(timestamp) { + let mut sum_blob_gas_used = 0; + for tx in transactions { + if let Some(blob_tx) = tx.transaction.as_eip4844() { + sum_blob_gas_used += blob_tx.blob_gas(); + } + } + Some(sum_blob_gas_used) + } else { + None + }; + let mut header = Header { parent_hash: self.best_hash, - ommers_hash: EMPTY_OMMER_ROOT_HASH, + ommers_hash: proofs::calculate_ommers_root(ommers), beneficiary: Default::default(), state_root: Default::default(), transactions_root: Default::default(), receipts_root: Default::default(), - withdrawals_root: None, + withdrawals_root: withdrawals.map(|w| proofs::calculate_withdrawals_root(w)), logs_bloom: Default::default(), difficulty: U256::from(2), number: self.best_block + 1, @@ -298,7 +306,7 @@ impl StorageInner { mix_hash: Default::default(), nonce: 0, base_fee_per_gas, - blob_gas_used: None, + blob_gas_used, excess_blob_gas: None, extra_data: Default::default(), parent_beacon_block_root: None, @@ -334,148 +342,76 @@ impl StorageInner { header } - /// Executes the block with the given block and senders, on the provided [EVMProcessor]. - /// - /// This returns the poststate from execution and post-block changes, as well as the gas used. - pub(crate) fn execute( - &mut self, - block: &BlockWithSenders, - executor: &mut EVMProcessor<'_, EvmConfig>, - ) -> Result<(BundleStateWithReceipts, u64), BlockExecutionError> - where - EvmConfig: ConfigureEvm, - { - trace!(target: "consensus::auto", transactions=?&block.body, "executing transactions"); - // TODO: there isn't really a parent beacon block root here, so not sure whether or not to - // call the 4788 beacon contract - - // set the first block to find the correct index in bundle state - executor.set_first_block(block.number); - - let (receipts, gas_used) = executor.execute_transactions(block, U256::ZERO)?; - - // Save receipts. - executor.save_receipts(receipts)?; - - // add post execution state change - // Withdrawals, rewards etc. - executor.apply_post_execution_state_change(block, U256::ZERO)?; - - // merge transitions - executor.db_mut().merge_transitions(BundleRetention::Reverts); - - // apply post block changes - Ok((executor.take_output_state(), gas_used)) - } - - /// Fills in the post-execution header fields based on the given BundleState and gas used. - /// In doing this, the state root is calculated and the final header is returned. - pub(crate) fn complete_header( - &self, - mut header: Header, - bundle_state: &BundleStateWithReceipts, - client: &S, - gas_used: u64, - blob_gas_used: Option, - #[cfg(feature = "optimism")] chain_spec: &ChainSpec, - ) -> Result { - let receipts = bundle_state.receipts_by_block(header.number); - header.receipts_root = if receipts.is_empty() { - EMPTY_RECEIPTS - } else { - let receipts_with_bloom = receipts - .iter() - .map(|r| (*r).clone().expect("receipts have not been pruned").into()) - .collect::>(); - header.logs_bloom = - receipts_with_bloom.iter().fold(Bloom::ZERO, |bloom, r| bloom | r.bloom); - #[cfg(feature = "optimism")] - { - proofs::calculate_receipt_root_optimism( - &receipts_with_bloom, - chain_spec, - header.timestamp, - ) - } - #[cfg(not(feature = "optimism"))] - { - proofs::calculate_receipt_root(&receipts_with_bloom) - } - }; - - header.gas_used = gas_used; - header.blob_gas_used = blob_gas_used; - - // calculate the state root - let state_root = client - .latest() - .map_err(BlockExecutionError::LatestBlock)? - .state_root(bundle_state.state()) - .unwrap(); - header.state_root = state_root; - Ok(header) - } - - /// Builds and executes a new block with the given transactions, on the provided [EVMProcessor]. + /// Builds and executes a new block with the given transactions, on the provided executor. /// /// This returns the header of the executed block, as well as the poststate from execution. - pub(crate) fn build_and_execute( + pub(crate) fn build_and_execute( &mut self, transactions: Vec, - client: &impl StateProviderFactory, + ommers: Vec
, + withdrawals: Option, + provider: &Provider, chain_spec: Arc, - evm_config: EvmConfig, + executor: &Executor, ) -> Result<(SealedHeader, BundleStateWithReceipts), BlockExecutionError> where - EvmConfig: ConfigureEvm, + Executor: BlockExecutorProvider, + Provider: StateProviderFactory, { - let header = self.build_header_template(&transactions, chain_spec.clone()); + let header = + self.build_header_template(&transactions, &ommers, withdrawals.as_ref(), chain_spec); - let block = Block { header, body: transactions, ommers: vec![], withdrawals: None } - .with_recovered_senders() - .ok_or(BlockExecutionError::Validation(BlockValidationError::SenderRecoveryError))?; + let mut block = Block { + header, + body: transactions, + ommers: ommers.clone(), + withdrawals: withdrawals.clone(), + } + .with_recovered_senders() + .ok_or(BlockExecutionError::Validation(BlockValidationError::SenderRecoveryError))?; trace!(target: "consensus::auto", transactions=?&block.body, "executing transactions"); - // now execute the block - let db = State::builder() - .with_database_boxed(Box::new(StateProviderDatabase::new( - client.latest().map_err(BlockExecutionError::LatestBlock)?, - ))) - .with_bundle_update() - .build(); - let mut executor = EVMProcessor::new_with_state(chain_spec.clone(), db, evm_config); + let mut db = StateProviderDatabase::new( + provider.latest().map_err(BlockExecutionError::LatestBlock)?, + ); - let (bundle_state, gas_used) = self.execute(&block, &mut executor)?; + // TODO(mattsse): At this point we don't know certain fields of the header, so we first + // execute it and then update the header this can be improved by changing the executor + // input, for now we intercept the errors and retry + loop { + match executor.executor(&mut db).execute((&block, U256::ZERO).into()) { + Err(BlockExecutionError::Validation(BlockValidationError::BlockGasUsed { + gas, + .. + })) => { + block.block.header.gas_used = gas.got; + } + Err(BlockExecutionError::Validation(BlockValidationError::ReceiptRootDiff( + err, + ))) => { + block.block.header.receipts_root = err.got; + } + _ => break, + }; + } - let Block { header, body, .. } = block.block; - let body = BlockBody { transactions: body, ommers: vec![], withdrawals: None }; + // now execute the block + let BlockExecutionOutput { state, receipts, .. } = + executor.executor(&mut db).execute((&block, U256::ZERO).into())?; + let bundle_state = BundleStateWithReceipts::new( + state, + Receipts::from_block_receipt(receipts), + block.number, + ); - let blob_gas_used = if chain_spec.is_cancun_active_at_timestamp(header.timestamp) { - let mut sum_blob_gas_used = 0; - for tx in &body.transactions { - if let Some(blob_tx) = tx.transaction.as_eip4844() { - sum_blob_gas_used += blob_tx.blob_gas(); - } - } - Some(sum_blob_gas_used) - } else { - None - }; + let Block { mut header, body, .. } = block.block; + let body = BlockBody { transactions: body, ommers, withdrawals }; trace!(target: "consensus::auto", ?bundle_state, ?header, ?body, "executed block, calculating state root and completing header"); - // fill in the rest of the fields - let header = self.complete_header( - header, - &bundle_state, - client, - gas_used, - blob_gas_used, - #[cfg(feature = "optimism")] - chain_spec.as_ref(), - )?; - + // calculate the state root + header.state_root = db.state_root(bundle_state.state())?; trace!(target: "consensus::auto", root=?header.state_root, ?body, "calculated root"); // finally insert into storage diff --git a/crates/consensus/auto-seal/src/mode.rs b/crates/consensus/auto-seal/src/mode.rs index 809455311..b124010e6 100644 --- a/crates/consensus/auto-seal/src/mode.rs +++ b/crates/consensus/auto-seal/src/mode.rs @@ -62,6 +62,17 @@ impl MiningMode { } } +impl fmt::Display for MiningMode { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let kind = match self { + MiningMode::None => "None", + MiningMode::Auto(_) => "Auto", + MiningMode::FixedBlockTime(_) => "FixedBlockTime", + }; + write!(f, "{kind}") + } +} + /// A miner that's supposed to create a new block every `interval`, mining all transactions that are /// ready at that time. /// diff --git a/crates/consensus/auto-seal/src/task.rs b/crates/consensus/auto-seal/src/task.rs index e76b4333e..42f1268f3 100644 --- a/crates/consensus/auto-seal/src/task.rs +++ b/crates/consensus/auto-seal/src/task.rs @@ -2,10 +2,12 @@ use crate::{mode::MiningMode, Storage}; use futures_util::{future::BoxFuture, FutureExt}; use reth_beacon_consensus::{BeaconEngineMessage, ForkchoiceStatus}; use reth_engine_primitives::EngineTypes; -use reth_evm::ConfigureEvm; -use reth_interfaces::consensus::ForkchoiceState; -use reth_primitives::{Block, ChainSpec, IntoRecoveredTransaction, SealedBlockWithSenders}; +use reth_evm::execute::BlockExecutorProvider; +use reth_primitives::{ + Block, ChainSpec, IntoRecoveredTransaction, SealedBlockWithSenders, Withdrawals, +}; use reth_provider::{CanonChainTracker, CanonStateNotificationSender, Chain, StateProviderFactory}; +use reth_rpc_types::engine::ForkchoiceState; use reth_stages_api::PipelineEvent; use reth_transaction_pool::{TransactionPool, ValidPoolTransaction}; use std::{ @@ -20,7 +22,7 @@ use tokio_stream::wrappers::UnboundedReceiverStream; use tracing::{debug, error, warn}; /// A Future that listens for new ready transactions and puts new blocks into storage -pub struct MiningTask { +pub struct MiningTask { /// The configured chain spec chain_spec: Arc, /// The client used to interact with the state @@ -41,14 +43,14 @@ pub struct MiningTask>, - /// The type that defines how to configure the EVM. - evm_config: EvmConfig, + /// The type used for block execution + block_executor: Executor, } // === impl MiningTask === -impl - MiningTask +impl + MiningTask { /// Creates a new instance of the task #[allow(clippy::too_many_arguments)] @@ -60,7 +62,7 @@ impl storage: Storage, client: Client, pool: Pool, - evm_config: EvmConfig, + block_executor: Executor, ) -> Self { Self { chain_spec, @@ -73,7 +75,7 @@ impl canon_state_notification, queued: Default::default(), pipe_line_events: None, - evm_config, + block_executor, } } @@ -83,13 +85,13 @@ impl } } -impl Future for MiningTask +impl Future for MiningTask where Client: StateProviderFactory + CanonChainTracker + Clone + Unpin + 'static, Pool: TransactionPool + Unpin + 'static, ::Transaction: IntoRecoveredTransaction, Engine: EngineTypes + 'static, - EvmConfig: ConfigureEvm + Clone + Unpin + Send + Sync + 'static, + Executor: BlockExecutorProvider, { type Output = (); @@ -119,7 +121,7 @@ where let pool = this.pool.clone(); let events = this.pipe_line_events.take(); let canon_state_notification = this.canon_state_notification.clone(); - let evm_config = this.evm_config.clone(); + let executor = this.block_executor.clone(); // Create the mining future that creates a block, notifies the engine that drives // the pipeline @@ -134,12 +136,16 @@ where (recovered.into_signed(), signer) }) .unzip(); + let ommers = vec![]; + let withdrawals = Some(Withdrawals::default()); match storage.build_and_execute( transactions.clone(), + ommers.clone(), + withdrawals.clone(), &client, chain_spec, - evm_config, + &executor, ) { Ok((new_header, bundle_state)) => { // clear all transactions from pool @@ -193,8 +199,8 @@ where let block = Block { header: new_header.clone().unseal(), body: transactions, - ommers: vec![], - withdrawals: None, + ommers, + withdrawals, }; let sealed_block = block.seal_slow(); diff --git a/crates/consensus/beacon/Cargo.toml b/crates/consensus/beacon/Cargo.toml index 439002ec5..659ef02c1 100644 --- a/crates/consensus/beacon/Cargo.toml +++ b/crates/consensus/beacon/Cargo.toml @@ -12,7 +12,7 @@ workspace = true [dependencies] # reth -reth-beacon-consensus-core.workspace = true +reth-ethereum-consensus.workspace = true reth-primitives.workspace = true reth-interfaces.workspace = true reth-stages-api.workspace = true @@ -45,18 +45,22 @@ schnellru.workspace = true # reth reth-payload-builder = { workspace = true, features = ["test-utils"] } reth-primitives = { workspace = true, features = ["test-utils"] } +reth-consensus = { workspace = true, features = ["test-utils"] } reth-interfaces = { workspace = true, features = ["test-utils"] } reth-stages = { workspace = true, features = ["test-utils"] } reth-blockchain-tree = { workspace = true, features = ["test-utils"] } reth-db = { workspace = true, features = ["test-utils"] } reth-provider = { workspace = true, features = ["test-utils"] } +reth-evm = { workspace = true, features = ["test-utils"] } reth-rpc-types-compat.workspace = true +reth-rpc.workspace = true reth-tracing.workspace = true reth-revm.workspace = true reth-downloaders.workspace = true reth-evm-ethereum.workspace = true reth-ethereum-engine-primitives.workspace = true reth-config.workspace = true +reth-testing-utils.workspace = true assert_matches.workspace = true @@ -66,5 +70,6 @@ optimism = [ "reth-interfaces/optimism", "reth-provider/optimism", "reth-blockchain-tree/optimism", - "reth-beacon-consensus-core/optimism", + "reth-ethereum-consensus/optimism", + "reth-rpc/optimism" ] diff --git a/crates/consensus/beacon/src/engine/event.rs b/crates/consensus/beacon/src/engine/event.rs index 168130de7..d5cbdee46 100644 --- a/crates/consensus/beacon/src/engine/event.rs +++ b/crates/consensus/beacon/src/engine/event.rs @@ -1,6 +1,6 @@ use crate::engine::forkchoice::ForkchoiceStatus; -use reth_interfaces::consensus::ForkchoiceState; use reth_primitives::{SealedBlock, SealedHeader, B256}; +use reth_rpc_types::engine::ForkchoiceState; use std::{sync::Arc, time::Duration}; /// Events emitted by [crate::BeaconConsensusEngine]. diff --git a/crates/consensus/beacon/src/engine/hooks/controller.rs b/crates/consensus/beacon/src/engine/hooks/controller.rs index 48343d480..7916928db 100644 --- a/crates/consensus/beacon/src/engine/hooks/controller.rs +++ b/crates/consensus/beacon/src/engine/hooks/controller.rs @@ -124,16 +124,22 @@ impl EngineHooksController { } fn poll_next_hook_inner( - &mut self, + &self, cx: &mut Context<'_>, hook: &mut Box, args: EngineHookContext, db_write_active: bool, ) -> Poll> { - // Hook with DB write access level is not allowed to run due to already running hook with DB - // write access level or active DB write according to passed argument + // Hook with DB write access level is not allowed to run due to any of the following + // reasons: + // - An already running hook with DB write access level + // - Active DB write according to passed argument + // - Missing a finalized block number. We might be on an optimistic sync scenario where we + // cannot skip the FCU with the finalized hash, otherwise CL might misbehave. if hook.db_access_level().is_read_write() && - (self.active_db_write_hook.is_some() || db_write_active) + (self.active_db_write_hook.is_some() || + db_write_active || + args.finalized_block_number.is_none()) { return Poll::Pending } diff --git a/crates/consensus/beacon/src/engine/message.rs b/crates/consensus/beacon/src/engine/message.rs index 464dcedb2..f9f1a84d4 100644 --- a/crates/consensus/beacon/src/engine/message.rs +++ b/crates/consensus/beacon/src/engine/message.rs @@ -4,11 +4,11 @@ use crate::{ }; use futures::{future::Either, FutureExt}; use reth_engine_primitives::EngineTypes; -use reth_interfaces::{consensus::ForkchoiceState, RethResult}; +use reth_interfaces::RethResult; use reth_payload_builder::error::PayloadBuilderError; use reth_rpc_types::engine::{ - CancunPayloadFields, ExecutionPayload, ForkChoiceUpdateResult, ForkchoiceUpdateError, - ForkchoiceUpdated, PayloadId, PayloadStatus, PayloadStatusEnum, + CancunPayloadFields, ExecutionPayload, ForkChoiceUpdateResult, ForkchoiceState, + ForkchoiceUpdateError, ForkchoiceUpdated, PayloadId, PayloadStatus, PayloadStatusEnum, }; use std::{ future::Future, diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index 877e6f450..1057457c7 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -1,20 +1,11 @@ -use crate::{ - engine::{ - forkchoice::{ForkchoiceStateHash, ForkchoiceStateTracker}, - metrics::EngineMetrics, - }, - hooks::{EngineHookContext, EngineHooksController}, - sync::{EngineSyncController, EngineSyncEvent}, -}; -use futures::{Future, StreamExt}; +use futures::{stream::BoxStream, Future, StreamExt}; use reth_db::database::Database; use reth_engine_primitives::{EngineTypes, PayloadAttributes, PayloadBuilderAttributes}; use reth_interfaces::{ blockchain_tree::{ error::{BlockchainTreeError, CanonicalError, InsertBlockError, InsertBlockErrorKind}, - BlockStatus, BlockchainTreeEngine, CanonicalOutcome, InsertPayloadOk, + BlockStatus, BlockValidationKind, BlockchainTreeEngine, CanonicalOutcome, InsertPayloadOk, }, - consensus::ForkchoiceState, executor::BlockValidationError, p2p::{bodies::client::BodiesClient, headers::client::HeadersClient}, provider::ProviderResult, @@ -22,16 +13,19 @@ use reth_interfaces::{ RethError, RethResult, }; use reth_payload_builder::PayloadBuilderHandle; +use reth_payload_validator::ExecutionPayloadValidator; use reth_primitives::{ - constants::EPOCH_SLOTS, stage::StageId, BlockNumHash, BlockNumber, Head, Header, SealedBlock, - SealedHeader, B256, + constants::EPOCH_SLOTS, + stage::{PipelineTarget, StageId}, + BlockNumHash, BlockNumber, Head, Header, SealedBlock, SealedHeader, B256, }; use reth_provider::{ BlockIdReader, BlockReader, BlockSource, CanonChainTracker, ChainSpecProvider, ProviderError, StageCheckpointReader, }; use reth_rpc_types::engine::{ - CancunPayloadFields, ExecutionPayload, PayloadStatus, PayloadStatusEnum, PayloadValidationError, + CancunPayloadFields, ExecutionPayload, ForkchoiceState, PayloadStatus, PayloadStatusEnum, + PayloadValidationError, }; use reth_stages_api::{ControlFlow, Pipeline}; use reth_tasks::TaskSpawner; @@ -43,8 +37,7 @@ use std::{ time::{Duration, Instant}, }; use tokio::sync::{ - mpsc, - mpsc::{UnboundedReceiver, UnboundedSender}, + mpsc::{self, UnboundedSender}, oneshot, }; use tokio_stream::wrappers::UnboundedReceiverStream; @@ -69,18 +62,19 @@ mod handle; pub use handle::BeaconConsensusEngineHandle; mod forkchoice; -use crate::hooks::{EngineHookEvent, EngineHooks, PolledHook}; pub use forkchoice::ForkchoiceStatus; -use reth_interfaces::blockchain_tree::BlockValidationKind; -use reth_payload_validator::ExecutionPayloadValidator; +use forkchoice::{ForkchoiceStateHash, ForkchoiceStateTracker}; mod metrics; +use metrics::EngineMetrics; pub(crate) mod sync; +use sync::{EngineSyncController, EngineSyncEvent}; /// Hooks for running during the main loop of /// [consensus engine][`crate::engine::BeaconConsensusEngine`]. pub mod hooks; +use hooks::{EngineHookContext, EngineHookEvent, EngineHooks, EngineHooksController, PolledHook}; #[cfg(test)] pub mod test_utils; @@ -181,7 +175,7 @@ where /// Used for emitting updates about whether the engine is syncing or not. sync_state_updater: Box, /// The Engine API message receiver. - engine_message_rx: UnboundedReceiverStream>, + engine_message_stream: BoxStream<'static, BeaconEngineMessage>, /// A clone of the handle handle: BeaconConsensusEngineHandle, /// Tracks the received forkchoice state updates received by the CL. @@ -190,13 +184,11 @@ where payload_builder: PayloadBuilderHandle, /// Validator for execution payloads payload_validator: ExecutionPayloadValidator, - /// Listeners for engine events. - listeners: EventListeners, + /// Current blockchain tree action. + blockchain_tree_action: Option>, /// Tracks the header of invalid payloads that were rejected by the engine because they're /// invalid. invalid_headers: InvalidHeaderCache, - /// Consensus engine metrics. - metrics: EngineMetrics, /// After downloading a block corresponding to a recent forkchoice update, the engine will /// check whether or not we can connect the block to the current canonical chain. If we can't, /// we need to download and execute the missing parents of that block. @@ -210,6 +202,10 @@ where /// be used to download and execute the missing blocks. pipeline_run_threshold: u64, hooks: EngineHooksController, + /// Listeners for engine events. + listeners: EventListeners, + /// Consensus engine metrics. + metrics: EngineMetrics, } impl BeaconConsensusEngine @@ -253,7 +249,7 @@ where target, pipeline_run_threshold, to_engine, - rx, + Box::pin(UnboundedReceiverStream::from(rx)), hooks, ) } @@ -283,7 +279,7 @@ where target: Option, pipeline_run_threshold: u64, to_engine: UnboundedSender>, - rx: UnboundedReceiver>, + engine_message_stream: BoxStream<'static, BeaconEngineMessage>, hooks: EngineHooks, ) -> RethResult<(Self, BeaconConsensusEngineHandle)> { let handle = BeaconConsensusEngineHandle { to_engine }; @@ -302,15 +298,16 @@ where payload_validator: ExecutionPayloadValidator::new(blockchain.chain_spec()), blockchain, sync_state_updater, - engine_message_rx: UnboundedReceiverStream::new(rx), + engine_message_stream, handle: handle.clone(), forkchoice_state_tracker: Default::default(), payload_builder, - listeners, invalid_headers: InvalidHeaderCache::new(MAX_INVALID_HEADERS), - metrics: EngineMetrics::default(), + blockchain_tree_action: None, pipeline_run_threshold, hooks: EngineHooksController::new(hooks), + listeners, + metrics: EngineMetrics::default(), }; let maybe_pipeline_target = match target { @@ -320,7 +317,7 @@ where }; if let Some(target) = maybe_pipeline_target { - this.sync.set_pipeline_sync_target(target); + this.sync.set_pipeline_sync_target(target.into()); } Ok((this, handle)) @@ -381,40 +378,6 @@ where None } - /// Called to resolve chain forks and ensure that the Execution layer is working with the latest - /// valid chain. - /// - /// These responses should adhere to the [Engine API Spec for - /// `engine_forkchoiceUpdated`](https://github.com/ethereum/execution-apis/blob/main/src/engine/paris.md#specification-1). - /// - /// Returns an error if an internal error occurred like a database error. - fn forkchoice_updated( - &mut self, - state: ForkchoiceState, - attrs: Option, - ) -> Result { - trace!(target: "consensus::engine", ?state, "Received new forkchoice state update"); - - // Pre-validate forkchoice state update and return if it's invalid or - // cannot be processed at the moment. - if let Some(on_updated) = self.pre_validate_forkchoice_update(state) { - return Ok(on_updated) - } - - let start = Instant::now(); - let make_canonical_result = self.blockchain.make_canonical(state.head_block_hash); - let elapsed = self.record_make_canonical_latency(start, &make_canonical_result); - - let status = self.on_forkchoice_updated_make_canonical_result( - state, - attrs, - make_canonical_result, - elapsed, - )?; - trace!(target: "consensus::engine", ?status, ?state, "Returning forkchoice status"); - Ok(status) - } - /// Process the result of attempting to make forkchoice state head hash canonical. /// /// # Returns @@ -484,7 +447,7 @@ where /// /// Returns `true` if the head needs to be updated. fn on_head_already_canonical( - &mut self, + &self, header: &SealedHeader, attrs: &mut Option, ) -> bool { @@ -519,56 +482,59 @@ where false } - /// Invoked when we receive a new forkchoice update message. + /// Invoked when we receive a new forkchoice update message. Calls into the blockchain tree + /// to resolve chain forks and ensure that the Execution Layer is working with the latest valid + /// chain. + /// + /// These responses should adhere to the [Engine API Spec for + /// `engine_forkchoiceUpdated`](https://github.com/ethereum/execution-apis/blob/main/src/engine/paris.md#specification-1). /// - /// Returns `true` if the engine now reached its maximum block number, See - /// [EngineSyncController::has_reached_max_block]. + /// Returns an error if an internal error occurred like a database error. fn on_forkchoice_updated( &mut self, state: ForkchoiceState, attrs: Option, - tx: oneshot::Sender>, - ) -> Result { + tx: oneshot::Sender>, + ) { self.metrics.forkchoice_updated_messages.increment(1); self.blockchain.on_forkchoice_update_received(&state); + trace!(target: "consensus::engine", ?state, "Received new forkchoice state update"); - let on_updated = match self.forkchoice_updated(state, attrs) { - Ok(response) => response, - Err(error) => { - if error.is_fatal() { - // FCU resulted in a fatal error from which we can't recover - let err = error.clone(); - let _ = tx.send(Err(RethError::Canonical(error))); - return Err(err) - } - let _ = tx.send(Err(RethError::Canonical(error))); - return Ok(OnForkchoiceUpdateOutcome::Processed) - } - }; - - let fcu_status = on_updated.forkchoice_status(); - - // update the forkchoice state tracker - self.forkchoice_state_tracker.set_latest(state, fcu_status); + if let Some(on_updated) = self.pre_validate_forkchoice_update(state) { + // Pre-validate forkchoice state update and return if it's invalid + // or cannot be processed at the moment. + self.on_forkchoice_updated_status(state, on_updated, tx); + } else { + let previous_action = self + .blockchain_tree_action + .replace(BlockchainTreeAction::MakeForkchoiceHeadCanonical { state, attrs, tx }); + debug_assert!(previous_action.is_none(), "Pre-existing action found"); + } + } + /// Called after the forkchoice update status has been resolved. + /// Depending on the outcome, the method updates the sync state and notifies the listeners + /// about new processed FCU. + fn on_forkchoice_updated_status( + &mut self, + state: ForkchoiceState, + on_updated: OnForkChoiceUpdated, + tx: oneshot::Sender>, + ) { // send the response to the CL ASAP + let status = on_updated.forkchoice_status(); let _ = tx.send(Ok(on_updated)); - match fcu_status { + // update the forkchoice state tracker + self.forkchoice_state_tracker.set_latest(state, status); + + match status { ForkchoiceStatus::Invalid => {} ForkchoiceStatus::Valid => { // FCU head is valid, we're no longer syncing self.sync_state_updater.update_sync_state(SyncState::Idle); // node's fully synced, clear active download requests self.sync.clear_block_download_requests(); - - // check if we reached the maximum configured block - let tip_number = self.blockchain.canonical_tip().number; - if self.sync.has_reached_max_block(tip_number) { - // Terminate the sync early if it's reached the maximum user - // configured block. - return Ok(OnForkchoiceUpdateOutcome::ReachedMaxBlock) - } } ForkchoiceStatus::Syncing => { // we're syncing @@ -577,9 +543,7 @@ where } // notify listeners about new processed FCU - self.listeners.notify(BeaconConsensusEngineEvent::ForkchoiceUpdated(state, fcu_status)); - - Ok(OnForkchoiceUpdateOutcome::Processed) + self.listeners.notify(BeaconConsensusEngineEvent::ForkchoiceUpdated(state, status)); } /// Check if the pipeline is consistent (all stages have the checkpoint block numbers no less @@ -705,6 +669,21 @@ where // threshold return Some(state.finalized_block_hash) } + + // OPTIMISTIC SYNCING + // + // It can happen when the node is doing an + // optimistic sync, where the CL has no knowledge of the finalized hash, + // but is expecting the EL to sync as high + // as possible before finalizing. + // + // This usually doesn't happen on ETH mainnet since CLs use the more + // secure checkpoint syncing. + // + // However, optimism chains will do this. The risk of a reorg is however + // low. + debug!(target: "consensus::engine", hash=?state.head_block_hash, "Setting head hash as an optimistic pipeline target."); + return Some(state.head_block_hash) } Ok(Some(_)) => { // we're fully synced to the finalized block @@ -739,7 +718,7 @@ where /// - null if client software cannot determine the ancestor of the invalid payload satisfying /// the above conditions. fn latest_valid_hash_for_invalid_payload( - &self, + &mut self, parent_hash: B256, insert_err: Option<&InsertBlockErrorKind>, ) -> Option { @@ -749,12 +728,31 @@ where } // Check if parent exists in side chain or in canonical chain. + // TODO: handle find_block_by_hash errors. if matches!(self.blockchain.find_block_by_hash(parent_hash, BlockSource::Any), Ok(Some(_))) { Some(parent_hash) } else { - // TODO: attempt to iterate over ancestors in the invalid cache + // iterate over ancestors in the invalid cache // until we encounter the first valid ancestor + let mut current_hash = parent_hash; + let mut current_header = self.invalid_headers.get(¤t_hash); + while let Some(header) = current_header { + current_hash = header.parent_hash; + current_header = self.invalid_headers.get(¤t_hash); + + // If current_header is None, then the current_hash does not have an invalid + // ancestor in the cache, check its presence in blockchain tree + if current_header.is_none() && + matches!( + // TODO: handle find_block_by_hash errors. + self.blockchain.find_block_by_hash(current_hash, BlockSource::Any), + Ok(Some(_)) + ) + { + return Some(current_hash) + } + } None } } @@ -762,7 +760,7 @@ where /// Prepares the invalid payload response for the given hash, checking the /// database for the parent hash and populating the payload status with the latest valid hash /// according to the engine api spec. - fn prepare_invalid_response(&self, mut parent_hash: B256) -> PayloadStatus { + fn prepare_invalid_response(&mut self, mut parent_hash: B256) -> PayloadStatus { // Edge case: the `latestValid` field is the zero hash if the parent block is the terminal // PoW block, which we need to identify by looking at the parent's block difficulty if let Ok(Some(parent)) = self.blockchain.header_by_hash_or_number(parent_hash.into()) { @@ -771,10 +769,12 @@ where } } + let valid_parent_hash = + self.latest_valid_hash_for_invalid_payload(parent_hash, None).unwrap_or_default(); PayloadStatus::from_status(PayloadStatusEnum::Invalid { validation_error: PayloadValidationError::LinksToRejectedPayload.to_string(), }) - .with_latest_valid_hash(parent_hash) + .with_latest_valid_hash(valid_parent_hash) } /// Checks if the given `check` hash points to an invalid header, inserting the given `head` @@ -841,7 +841,7 @@ where /// This also updates the safe and finalized blocks in the [CanonChainTracker], if they are /// consistent with the head block. fn ensure_consistent_forkchoice_state( - &mut self, + &self, state: ForkchoiceState, ) -> ProviderResult> { // Ensure that the finalized block, if not zero, is known and in the canonical chain @@ -966,7 +966,7 @@ where /// /// If the newest head is not invalid, then this will trigger a new pipeline run to sync the gap /// - /// See [Self::forkchoice_updated] and [BlockchainTreeEngine::make_canonical]. + /// See [Self::on_forkchoice_updated] and [BlockchainTreeEngine::make_canonical]. fn on_failed_canonical_forkchoice_update( &mut self, state: &ForkchoiceState, @@ -997,6 +997,10 @@ where // so we should not warn the user, since this will result in us attempting to sync // to a new target and is considered normal operation during sync } + CanonicalError::OptimisticTargetRevert(block_number) => { + self.sync.set_pipeline_sync_target(PipelineTarget::Unwind(*block_number)); + return PayloadStatus::from_status(PayloadStatusEnum::Syncing) + } _ => { warn!(target: "consensus::engine", %error, ?state, "Failed to canonicalize the head hash"); // TODO(mattsse) better error handling before attempting to sync (FCU could be @@ -1027,7 +1031,7 @@ where if self.pipeline_run_threshold == 0 { // use the pipeline to sync to the target trace!(target: "consensus::engine", %target, "Triggering pipeline run to sync missing ancestors of the new head"); - self.sync.set_pipeline_sync_target(target); + self.sync.set_pipeline_sync_target(target.into()); } else { // trigger a full block download for missing hash, or the parent of its lowest buffered // ancestor @@ -1069,13 +1073,17 @@ where &mut self, payload: ExecutionPayload, cancun_fields: Option, - ) -> Result { + tx: oneshot::Sender>, + ) { + self.metrics.new_payload_messages.increment(1); + let block = match self.ensure_well_formed_payload(payload, cancun_fields) { Ok(block) => block, - Err(status) => return Ok(status), + Err(status) => { + let _ = tx.send(Ok(status)); + return + } }; - let block_hash = block.hash(); - let block_num_hash = block.num_hash(); let mut lowest_buffered_ancestor = self.lowest_buffered_ancestor_or(block.hash()); if lowest_buffered_ancestor == block.hash() { @@ -1086,74 +1094,14 @@ where if let Some(status) = self.check_invalid_ancestor_with_head(lowest_buffered_ancestor, block.hash()) { - return Ok(status) + let _ = tx.send(Ok(status)); + return } - let res = if self.sync.is_pipeline_idle() { - // we can only insert new payloads if the pipeline is _not_ running, because it holds - // exclusive access to the database - self.try_insert_new_payload(block) - } else { - self.try_buffer_payload(block) - }; - - let status = match res { - Ok(status) => { - if status.is_valid() { - if let Some(target) = self.forkchoice_state_tracker.sync_target_state() { - // if we're currently syncing and the inserted block is the targeted FCU - // head block, we can try to make it canonical. - if block_hash == target.head_block_hash { - if let Err((_hash, error)) = - self.try_make_sync_target_canonical(block_num_hash) - { - return if error.is_fatal() { - error!(target: "consensus::engine", %error, "Encountered fatal error"); - Err(BeaconOnNewPayloadError::Internal(Box::new(error))) - } else { - // If we could not make the sync target block canonical, we - // should return the error as an invalid payload status. - Ok(PayloadStatus::new( - PayloadStatusEnum::Invalid { - validation_error: error.to_string(), - }, - // TODO: return a proper latest valid hash - // - // See: - self.forkchoice_state_tracker.last_valid_head(), - )) - } - } - } - } - // block was successfully inserted, so we can cancel the full block request, if - // any exists - self.sync.cancel_full_block_request(block_hash); - } - Ok(status) - } - Err(error) => { - warn!(target: "consensus::engine", %error, "Error while processing payload"); - - // If the error was due to an invalid payload, the payload is added to the invalid - // headers cache and `Ok` with [PayloadStatusEnum::Invalid] is returned. - let (block, error) = error.split(); - if error.is_invalid_block() { - warn!(target: "consensus::engine", invalid_hash=?block.hash(), invalid_number=?block.number, %error, "Invalid block error on new payload"); - let latest_valid_hash = - self.latest_valid_hash_for_invalid_payload(block.parent_hash, Some(&error)); - // keep track of the invalid header - self.invalid_headers.insert(block.header); - let status = PayloadStatusEnum::Invalid { validation_error: error.to_string() }; - Ok(PayloadStatus::new(status, latest_valid_hash)) - } else { - Err(BeaconOnNewPayloadError::Internal(Box::new(error))) - } - } - }; - - trace!(target: "consensus::engine", ?status, "Returning payload status"); - status + let previous_action = self + .blockchain_tree_action + .replace(BlockchainTreeAction::InsertNewPayload { block, tx }); + debug_assert!(previous_action.is_none(), "Pre-existing action found"); } /// Ensures that the given payload does not violate any consensus rules that concern the block's @@ -1182,7 +1130,7 @@ where /// /// This validation **MUST** be instantly run in all cases even during active sync process. fn ensure_well_formed_payload( - &self, + &mut self, payload: ExecutionPayload, cancun_fields: Option, ) -> Result { @@ -1433,7 +1381,7 @@ where ) { // we don't have the block yet and the distance exceeds the allowed // threshold - self.sync.set_pipeline_sync_target(target); + self.sync.set_pipeline_sync_target(target.into()); // we can exit early here because the pipeline will take care of syncing return } @@ -1517,6 +1465,8 @@ where // TODO: do not ignore this let _ = self.blockchain.make_canonical(*target_hash.as_ref()); } + } else if let Some(block_number) = err.optimistic_revert_block_number() { + self.sync.set_pipeline_sync_target(PipelineTarget::Unwind(block_number)); } Err((target.head_block_hash, err)) @@ -1530,17 +1480,17 @@ where fn on_sync_event( &mut self, event: EngineSyncEvent, - ) -> Result { + ) -> Result { let outcome = match event { EngineSyncEvent::FetchedFullBlock(block) => { self.on_downloaded_block(block); - SyncEventOutcome::Processed + EngineEventOutcome::Processed } EngineSyncEvent::PipelineStarted(target) => { trace!(target: "consensus::engine", ?target, continuous = target.is_none(), "Started the pipeline"); self.metrics.pipeline_runs.increment(1); self.sync_state_updater.update_sync_state(SyncState::Syncing); - SyncEventOutcome::Processed + EngineEventOutcome::Processed } EngineSyncEvent::PipelineFinished { result, reached_max_block } => { trace!(target: "consensus::engine", ?result, ?reached_max_block, "Pipeline finished"); @@ -1548,10 +1498,10 @@ where let ctrl = result?; if reached_max_block { // Terminate the sync early if it's reached the maximum user-configured block. - SyncEventOutcome::ReachedMaxBlock + EngineEventOutcome::ReachedMaxBlock } else { self.on_pipeline_outcome(ctrl)?; - SyncEventOutcome::Processed + EngineEventOutcome::Processed } } EngineSyncEvent::PipelineTaskDropped => { @@ -1578,13 +1528,7 @@ where // update the canon chain if continuous is enabled if self.sync.run_pipeline_continuously() { - let max_block = ctrl.block_number().unwrap_or_default(); - let max_header = self.blockchain.sealed_header(max_block) - .inspect_err(|error| { - error!(target: "consensus::engine", %error, "Error getting canonical header for continuous sync"); - })? - .ok_or_else(|| ProviderError::HeaderNotFound(max_block.into()))?; - self.blockchain.set_canonical_head(max_header); + self.set_canonical_head(ctrl.block_number().unwrap_or_default())?; } let sync_target_state = match self.forkchoice_state_tracker.sync_target_state() { @@ -1597,6 +1541,14 @@ where } }; + if sync_target_state.finalized_block_hash.is_zero() { + self.set_canonical_head(ctrl.block_number().unwrap_or_default())?; + self.blockchain.update_block_hashes_and_clear_buffered()?; + self.blockchain.connect_buffered_blocks_to_canonical_hashes()?; + // We are on an optimistic syncing process, better to wait for the next FCU to handle + return Ok(()) + } + // Next, we check if we need to schedule another pipeline run or transition // to live sync via tree. // This can arise if we buffer the forkchoice head, and if the head is an @@ -1652,7 +1604,7 @@ where // the tree update from executing too many blocks and blocking. if let Some(target) = pipeline_target { // run the pipeline to the target since the distance is sufficient - self.sync.set_pipeline_sync_target(target); + self.sync.set_pipeline_sync_target(target.into()); } else if let Some(number) = self.blockchain.block_number(sync_target_state.finalized_block_hash)? { @@ -1664,12 +1616,23 @@ where } else { // We don't have the finalized block in the database, so we need to // trigger another pipeline run. - self.sync.set_pipeline_sync_target(sync_target_state.finalized_block_hash); + self.sync.set_pipeline_sync_target(sync_target_state.finalized_block_hash.into()); } Ok(()) } + fn set_canonical_head(&self, max_block: BlockNumber) -> RethResult<()> { + let max_header = self.blockchain.sealed_header(max_block) + .inspect_err(|error| { + error!(target: "consensus::engine", %error, "Error getting canonical header for continuous sync"); + })? + .ok_or_else(|| ProviderError::HeaderNotFound(max_block.into()))?; + self.blockchain.set_canonical_head(max_header); + + Ok(()) + } + fn on_hook_result(&self, polled_hook: PolledHook) -> Result<(), BeaconConsensusEngineError> { if let EngineHookEvent::Finished(Err(error)) = &polled_hook.event { error!( @@ -1708,6 +1671,139 @@ where Ok(()) } + + /// Process the next set blockchain tree action. + /// The handler might set next blockchain tree action to perform, + /// so the state change should be handled accordingly. + fn on_blockchain_tree_action( + &mut self, + action: BlockchainTreeAction, + ) -> RethResult { + match action { + BlockchainTreeAction::MakeForkchoiceHeadCanonical { state, attrs, tx } => { + let start = Instant::now(); + let result = self.blockchain.make_canonical(state.head_block_hash); + let elapsed = self.record_make_canonical_latency(start, &result); + match self + .on_forkchoice_updated_make_canonical_result(state, attrs, result, elapsed) + { + Ok(on_updated) => { + trace!(target: "consensus::engine", status = ?on_updated, ?state, "Returning forkchoice status"); + let fcu_status = on_updated.forkchoice_status(); + self.on_forkchoice_updated_status(state, on_updated, tx); + + if fcu_status.is_valid() { + let tip_number = self.blockchain.canonical_tip().number; + if self.sync.has_reached_max_block(tip_number) { + // Terminate the sync early if it's reached + // the maximum user configured block. + return Ok(EngineEventOutcome::ReachedMaxBlock) + } + } + } + Err(error) => { + let _ = tx.send(Err(RethError::Canonical(error.clone()))); + if error.is_fatal() { + return Err(RethError::Canonical(error)) + } + } + }; + } + BlockchainTreeAction::InsertNewPayload { block, tx } => { + let block_hash = block.hash(); + let block_num_hash = block.num_hash(); + let result = if self.sync.is_pipeline_idle() { + // we can only insert new payloads if the pipeline is _not_ running, because it + // holds exclusive access to the database + self.try_insert_new_payload(block) + } else { + self.try_buffer_payload(block) + }; + + let status = match result { + Ok(status) => status, + Err(error) => { + warn!(target: "consensus::engine", %error, "Error while processing payload"); + + let (block, error) = error.split(); + if !error.is_invalid_block() { + // TODO: revise if any error should be considered fatal at this point. + let _ = + tx.send(Err(BeaconOnNewPayloadError::Internal(Box::new(error)))); + return Ok(EngineEventOutcome::Processed) + } + + // If the error was due to an invalid payload, the payload is added to the + // invalid headers cache and `Ok` with [PayloadStatusEnum::Invalid] is + // returned. + warn!(target: "consensus::engine", invalid_hash=?block.hash(), invalid_number=?block.number, %error, "Invalid block error on new payload"); + let latest_valid_hash = self + .latest_valid_hash_for_invalid_payload(block.parent_hash, Some(&error)); + // keep track of the invalid header + self.invalid_headers.insert(block.header); + PayloadStatus::new( + PayloadStatusEnum::Invalid { validation_error: error.to_string() }, + latest_valid_hash, + ) + } + }; + + if status.is_valid() { + if let Some(target) = self.forkchoice_state_tracker.sync_target_state() { + // if we're currently syncing and the inserted block is the targeted + // FCU head block, we can try to make it canonical. + if block_hash == target.head_block_hash { + let previous_action = self.blockchain_tree_action.replace( + BlockchainTreeAction::MakeNewPayloadCanonical { + payload_num_hash: block_num_hash, + status, + tx, + }, + ); + debug_assert!(previous_action.is_none(), "Pre-existing action found"); + return Ok(EngineEventOutcome::Processed) + } + } + // block was successfully inserted, so we can cancel the full block + // request, if any exists + self.sync.cancel_full_block_request(block_hash); + } + + trace!(target: "consensus::engine", ?status, "Returning payload status"); + let _ = tx.send(Ok(status)); + } + BlockchainTreeAction::MakeNewPayloadCanonical { payload_num_hash, status, tx } => { + let status = match self.try_make_sync_target_canonical(payload_num_hash) { + Ok(()) => status, + Err((_hash, error)) => { + if error.is_fatal() { + let response = + Err(BeaconOnNewPayloadError::Internal(Box::new(error.clone()))); + let _ = tx.send(response); + return Err(RethError::Canonical(error)) + } else if error.optimistic_revert_block_number().is_some() { + // engine already set the pipeline unwind target on + // `try_make_sync_target_canonical` + PayloadStatus::from_status(PayloadStatusEnum::Syncing) + } else { + // If we could not make the sync target block canonical, + // we should return the error as an invalid payload status. + PayloadStatus::new( + PayloadStatusEnum::Invalid { validation_error: error.to_string() }, + // TODO: return a proper latest valid hash + // See: + self.forkchoice_state_tracker.last_valid_head(), + ) + } + } + }; + + trace!(target: "consensus::engine", ?status, "Returning payload status"); + let _ = tx.send(Ok(status)); + } + }; + Ok(EngineEventOutcome::Processed) + } } /// On initialization, the consensus engine will poll the message receiver and return @@ -1750,30 +1846,34 @@ where continue } + // Process any blockchain tree action result as set forth during engine message + // processing. + if let Some(action) = this.blockchain_tree_action.take() { + match this.on_blockchain_tree_action(action) { + Ok(EngineEventOutcome::Processed) => {} + Ok(EngineEventOutcome::ReachedMaxBlock) => return Poll::Ready(Ok(())), + Err(error) => { + error!(target: "consensus::engine", %error, "Encountered fatal error"); + return Poll::Ready(Err(error.into())) + } + }; + + // Blockchain tree action handler might set next action to take. + continue + } + // Process one incoming message from the CL. We don't drain the messages right away, // because we want to sneak a polling of running hook in between them. // // These messages can affect the state of the SyncController and they're also time // sensitive, hence they are polled first. - if let Poll::Ready(Some(msg)) = this.engine_message_rx.poll_next_unpin(cx) { + if let Poll::Ready(Some(msg)) = this.engine_message_stream.poll_next_unpin(cx) { match msg { BeaconEngineMessage::ForkchoiceUpdated { state, payload_attrs, tx } => { - match this.on_forkchoice_updated(state, payload_attrs, tx) { - Ok(OnForkchoiceUpdateOutcome::Processed) => {} - Ok(OnForkchoiceUpdateOutcome::ReachedMaxBlock) => { - // reached the max block, we can terminate the future - return Poll::Ready(Ok(())) - } - Err(err) => { - // fatal error, we can terminate the future - return Poll::Ready(Err(RethError::Canonical(err).into())) - } - } + this.on_forkchoice_updated(state, payload_attrs, tx); } BeaconEngineMessage::NewPayload { payload, cancun_fields, tx } => { - this.metrics.new_payload_messages.increment(1); - let res = this.on_new_payload(payload, cancun_fields); - let _ = tx.send(res); + this.on_new_payload(payload, cancun_fields, tx); } BeaconEngineMessage::TransitionConfigurationExchanged => { this.blockchain.on_transition_configuration_exchanged(); @@ -1792,9 +1892,9 @@ where if let Poll::Ready(sync_event) = this.sync.poll(cx) { match this.on_sync_event(sync_event)? { // Sync event was successfully processed - SyncEventOutcome::Processed => (), + EngineEventOutcome::Processed => (), // Max block has been reached, exit the engine loop - SyncEventOutcome::ReachedMaxBlock => return Poll::Ready(Ok(())), + EngineEventOutcome::ReachedMaxBlock => return Poll::Ready(Ok(())), } // this could have taken a while, so we start the next cycle to handle any new @@ -1828,21 +1928,29 @@ where } } -/// Represents all outcomes of an applied fork choice update. -#[derive(Debug)] -enum OnForkchoiceUpdateOutcome { - /// FCU was processed successfully. - Processed, - /// FCU was processed successfully and reached max block. - ReachedMaxBlock, +enum BlockchainTreeAction { + MakeForkchoiceHeadCanonical { + state: ForkchoiceState, + attrs: Option, + tx: oneshot::Sender>, + }, + InsertNewPayload { + block: SealedBlock, + tx: oneshot::Sender>, + }, + MakeNewPayloadCanonical { + payload_num_hash: BlockNumHash, + status: PayloadStatus, + tx: oneshot::Sender>, + }, } -/// Represents outcomes of processing a sync event +/// Represents outcomes of processing an engine event #[derive(Debug)] -enum SyncEventOutcome { - /// Sync event was processed successfully, engine should continue. +enum EngineEventOutcome { + /// Engine event was processed successfully, engine should continue. Processed, - /// Sync event was processed successfully and reached max block. + /// Engine event was processed successfully and reached max block. ReachedMaxBlock, } @@ -1858,7 +1966,7 @@ mod tests { use reth_primitives::{stage::StageCheckpoint, ChainSpecBuilder, MAINNET}; use reth_provider::{BlockWriter, ProviderFactory}; use reth_rpc_types::engine::{ForkchoiceState, ForkchoiceUpdated, PayloadStatus}; - use reth_rpc_types_compat::engine::payload::try_block_to_payload_v1; + use reth_rpc_types_compat::engine::payload::block_to_payload_v1; use reth_stages::{ExecOutput, PipelineError, StageError}; use std::{collections::VecDeque, sync::Arc}; use tokio::sync::oneshot::error::TryRecvError; @@ -1920,7 +2028,7 @@ mod tests { assert_matches!(rx.try_recv(), Err(TryRecvError::Empty)); // consensus engine is still idle because no FCUs were received - let _ = env.send_new_payload(try_block_to_payload_v1(SealedBlock::default()), None).await; + let _ = env.send_new_payload(block_to_payload_v1(SealedBlock::default()), None).await; assert_matches!(rx.try_recv(), Err(TryRecvError::Empty)); @@ -2350,11 +2458,9 @@ mod tests { use super::*; use reth_db::test_utils::create_test_static_files_dir; use reth_interfaces::test_utils::generators::random_block; - use reth_primitives::{ - genesis::{Genesis, GenesisAllocator}, - Hardfork, U256, - }; - use reth_provider::test_utils::blocks::BlockChainTestData; + use reth_primitives::{genesis::Genesis, Hardfork, U256}; + use reth_provider::test_utils::blocks::BlockchainTestData; + use reth_testing_utils::GenesisAllocator; #[tokio::test] async fn new_payload_before_forkchoice() { @@ -2379,7 +2485,7 @@ mod tests { // Send new payload let res = env .send_new_payload( - try_block_to_payload_v1(random_block(&mut rng, 0, None, None, Some(0))), + block_to_payload_v1(random_block(&mut rng, 0, None, None, Some(0))), None, ) .await; @@ -2390,7 +2496,7 @@ mod tests { // Send new payload let res = env .send_new_payload( - try_block_to_payload_v1(random_block(&mut rng, 1, None, None, Some(0))), + block_to_payload_v1(random_block(&mut rng, 1, None, None, Some(0))), None, ) .await; @@ -2446,7 +2552,7 @@ mod tests { // Send new payload let result = env - .send_new_payload_retry_on_syncing(try_block_to_payload_v1(block2.clone()), None) + .send_new_payload_retry_on_syncing(block_to_payload_v1(block2.clone()), None) .await .unwrap(); @@ -2560,7 +2666,7 @@ mod tests { // Send new payload let parent = rng.gen(); let block = random_block(&mut rng, 2, Some(parent), None, Some(0)); - let res = env.send_new_payload(try_block_to_payload_v1(block), None).await; + let res = env.send_new_payload(block_to_payload_v1(block), None).await; let expected_result = PayloadStatus::from_status(PayloadStatusEnum::Syncing); assert_matches!(res, Ok(result) => assert_eq!(result, expected_result)); @@ -2569,7 +2675,7 @@ mod tests { #[tokio::test] async fn payload_pre_merge() { - let data = BlockChainTestData::default(); + let data = BlockchainTestData::default(); let mut block1 = data.blocks[0].0.block.clone(); block1 .header @@ -2627,7 +2733,7 @@ mod tests { // Send new payload let result = env - .send_new_payload_retry_on_syncing(try_block_to_payload_v1(block2.clone()), None) + .send_new_payload_retry_on_syncing(block_to_payload_v1(block2.clone()), None) .await .unwrap(); diff --git a/crates/consensus/beacon/src/engine/sync.rs b/crates/consensus/beacon/src/engine/sync.rs index 96163e996..f73c4b54e 100644 --- a/crates/consensus/beacon/src/engine/sync.rs +++ b/crates/consensus/beacon/src/engine/sync.rs @@ -1,8 +1,8 @@ //! Sync management for the engine implementation. use crate::{ - engine::metrics::EngineSyncMetrics, BeaconConsensus, BeaconConsensusEngineEvent, - ConsensusEngineLiveSyncProgress, + engine::metrics::EngineSyncMetrics, BeaconConsensusEngineEvent, + ConsensusEngineLiveSyncProgress, EthBeaconConsensus, }; use futures::FutureExt; use reth_db::database::Database; @@ -11,7 +11,7 @@ use reth_interfaces::p2p::{ full_block::{FetchFullBlockFuture, FetchFullBlockRangeFuture, FullBlockClient}, headers::client::HeadersClient, }; -use reth_primitives::{BlockNumber, ChainSpec, SealedBlock, B256}; +use reth_primitives::{stage::PipelineTarget, BlockNumber, ChainSpec, SealedBlock, B256}; use reth_stages_api::{ControlFlow, Pipeline, PipelineError, PipelineWithResult}; use reth_tasks::TaskSpawner; use reth_tokio_util::EventListeners; @@ -44,7 +44,7 @@ where /// The pipeline is used for large ranges. pipeline_state: PipelineState, /// Pending target block for the pipeline to sync - pending_pipeline_target: Option, + pending_pipeline_target: Option, /// In-flight full block requests in progress. inflight_full_block_requests: Vec>, /// In-flight full block _range_ requests in progress. @@ -81,7 +81,7 @@ where Self { full_block_client: FullBlockClient::new( client, - Arc::new(BeaconConsensus::new(chain_spec)), + Arc::new(EthBeaconConsensus::new(chain_spec)), ), pipeline_task_spawner, pipeline_state: PipelineState::Idle(Some(pipeline)), @@ -216,8 +216,12 @@ where /// Sets a new target to sync the pipeline to. /// /// But ensures the target is not the zero hash. - pub(crate) fn set_pipeline_sync_target(&mut self, target: B256) { - if target.is_zero() { + pub(crate) fn set_pipeline_sync_target(&mut self, target: PipelineTarget) { + if target.sync_target().is_some_and(|target| target.is_zero()) { + trace!( + target: "consensus::engine::sync", + "Pipeline target cannot be zero hash." + ); // precaution to never sync to the zero hash return } @@ -384,7 +388,7 @@ pub(crate) enum EngineSyncEvent { /// Pipeline started syncing /// /// This is none if the pipeline is triggered without a specific target. - PipelineStarted(Option), + PipelineStarted(Option), /// Pipeline finished /// /// If this is returned, the pipeline is idle. @@ -436,8 +440,8 @@ mod tests { Header, PruneModes, SealedHeader, MAINNET, }; use reth_provider::{ - test_utils::{create_test_provider_factory_with_chain_spec, TestExecutorFactory}, - BundleStateWithReceipts, + test_utils::create_test_provider_factory_with_chain_spec, BundleStateWithReceipts, + StaticFileProviderFactory, }; use reth_stages::{test_utils::TestStages, ExecOutput, StageError}; use reth_static_file::StaticFileProducer; @@ -488,9 +492,6 @@ mod tests { fn build(self, chain_spec: Arc) -> Pipeline>> { reth_tracing::init_test_tracing(); - let executor_factory = TestExecutorFactory::default(); - executor_factory.extend(self.executor_results); - // Setup pipeline let (tip_tx, _tip_rx) = watch::channel(B256::default()); let mut pipeline = Pipeline::builder() @@ -590,7 +591,7 @@ mod tests { .build(pipeline, chain_spec); let tip = client.highest_block().expect("there should be blocks here"); - sync_controller.set_pipeline_sync_target(tip.hash()); + sync_controller.set_pipeline_sync_target(tip.hash().into()); let sync_future = poll_fn(|cx| sync_controller.poll(cx)); let next_event = poll!(sync_future); @@ -598,7 +599,7 @@ mod tests { // can assert that the first event here is PipelineStarted because we set the sync target, // and we should get Ready because the pipeline should be spawned immediately assert_matches!(next_event, Poll::Ready(EngineSyncEvent::PipelineStarted(Some(target))) => { - assert_eq!(target, tip.hash()); + assert_eq!(target.sync_target().unwrap(), tip.hash()); }); // the next event should be the pipeline finishing in a good state diff --git a/crates/consensus/beacon/src/engine/test_utils.rs b/crates/consensus/beacon/src/engine/test_utils.rs index 329ea6446..6cad1b471 100644 --- a/crates/consensus/beacon/src/engine/test_utils.rs +++ b/crates/consensus/beacon/src/engine/test_utils.rs @@ -1,36 +1,33 @@ use crate::{ - engine::hooks::PruneHook, hooks::EngineHooks, BeaconConsensus, BeaconConsensusEngine, + engine::hooks::PruneHook, hooks::EngineHooks, BeaconConsensusEngine, BeaconConsensusEngineError, BeaconConsensusEngineHandle, BeaconForkChoiceUpdateError, - BeaconOnNewPayloadError, MIN_BLOCKS_FOR_PIPELINE_RUN, + BeaconOnNewPayloadError, EthBeaconConsensus, MIN_BLOCKS_FOR_PIPELINE_RUN, }; use reth_blockchain_tree::{ config::BlockchainTreeConfig, externals::TreeExternals, BlockchainTree, ShareableBlockchainTree, }; use reth_config::config::EtlConfig; +use reth_consensus::{test_utils::TestConsensus, Consensus}; use reth_db::{test_utils::TempDatabase, DatabaseEnv as DE}; -use reth_ethereum_engine_primitives::EthEngineTypes; -use reth_evm_ethereum::EthEvmConfig; -type DatabaseEnv = TempDatabase; use reth_downloaders::{ bodies::bodies::BodiesDownloaderBuilder, headers::reverse_headers::ReverseHeadersDownloaderBuilder, }; +use reth_ethereum_engine_primitives::EthEngineTypes; +use reth_evm::{either::Either, test_utils::MockExecutorProvider}; +use reth_evm_ethereum::execute::EthExecutorProvider; use reth_interfaces::{ - consensus::Consensus, - executor::BlockExecutionError, p2p::{bodies::client::BodiesClient, either::EitherDownloader, headers::client::HeadersClient}, sync::NoopSyncStateUpdater, - test_utils::{NoopFullBlockClient, TestConsensus}, + test_utils::NoopFullBlockClient, }; use reth_payload_builder::test_utils::spawn_test_payload_service; use reth_primitives::{BlockNumber, ChainSpec, FinishedExExHeight, PruneModes, B256}; use reth_provider::{ - providers::BlockchainProvider, - test_utils::{create_test_provider_factory_with_chain_spec, TestExecutorFactory}, - BundleStateWithReceipts, ExecutorFactory, HeaderSyncMode, PrunableBlockExecutor, + providers::BlockchainProvider, test_utils::create_test_provider_factory_with_chain_spec, + BundleStateWithReceipts, HeaderSyncMode, StaticFileProviderFactory, }; use reth_prune::Pruner; -use reth_revm::EvmProcessorFactory; use reth_rpc_types::engine::{ CancunPayloadFields, ExecutionPayload, ForkchoiceState, ForkchoiceUpdated, PayloadStatus, }; @@ -40,15 +37,11 @@ use reth_tasks::TokioTaskExecutor; use std::{collections::VecDeque, sync::Arc}; use tokio::sync::{oneshot, watch}; +type DatabaseEnv = TempDatabase; + type TestBeaconConsensusEngine = BeaconConsensusEngine< Arc, - BlockchainProvider< - Arc, - ShareableBlockchainTree< - Arc, - EitherExecutorFactory>, - >, - >, + BlockchainProvider>, Arc>, EthEngineTypes, >; @@ -159,31 +152,6 @@ impl Default for TestExecutorConfig { } } -/// A type that represents one of two possible executor factories. -#[derive(Debug, Clone)] -pub enum EitherExecutorFactory { - /// The first factory variant - Left(A), - /// The second factory variant - Right(B), -} - -impl ExecutorFactory for EitherExecutorFactory -where - A: ExecutorFactory, - B: ExecutorFactory, -{ - fn with_state<'a, SP: reth_provider::StateProvider + 'a>( - &'a self, - sp: SP, - ) -> Box + 'a> { - match self { - EitherExecutorFactory::Left(a) => a.with_state::<'a, SP>(sp), - EitherExecutorFactory::Right(b) => b.with_state::<'a, SP>(sp), - } - } -} - /// The basic configuration for a `TestConsensusEngine`, without generics for the client or /// consensus engine. #[derive(Debug)] @@ -354,7 +322,7 @@ where let consensus: Arc = match self.base_config.consensus { TestConsensusConfig::Real => { - Arc::new(BeaconConsensus::new(Arc::clone(&self.base_config.chain_spec))) + Arc::new(EthBeaconConsensus::new(Arc::clone(&self.base_config.chain_spec))) } TestConsensusConfig::Test => Arc::new(TestConsensus::default()), }; @@ -370,14 +338,13 @@ where // use either test executor or real executor let executor_factory = match self.base_config.executor_config { TestExecutorConfig::Test(results) => { - let executor_factory = TestExecutorFactory::default(); + let executor_factory = MockExecutorProvider::default(); executor_factory.extend(results); - EitherExecutorFactory::Left(executor_factory) + Either::Left(executor_factory) + } + TestExecutorConfig::Real => { + Either::Right(EthExecutorProvider::ethereum(self.base_config.chain_spec.clone())) } - TestExecutorConfig::Real => EitherExecutorFactory::Right(EvmProcessorFactory::new( - self.base_config.chain_spec.clone(), - EthEvmConfig::default(), - )), }; let static_file_producer = StaticFileProducer::new( @@ -422,9 +389,9 @@ where // Setup blockchain tree let externals = TreeExternals::new(provider_factory.clone(), consensus, executor_factory); let config = BlockchainTreeConfig::new(1, 2, 3, 2); - let tree = ShareableBlockchainTree::new( + let tree = Arc::new(ShareableBlockchainTree::new( BlockchainTree::new(externals, config, None).expect("failed to create tree"), - ); + )); let latest = self.base_config.chain_spec.genesis_header().seal_slow(); let blockchain_provider = BlockchainProvider::with_latest(provider_factory.clone(), tree, latest); diff --git a/crates/consensus/beacon/src/lib.rs b/crates/consensus/beacon/src/lib.rs index 5a9e1da4a..f62a75f94 100644 --- a/crates/consensus/beacon/src/lib.rs +++ b/crates/consensus/beacon/src/lib.rs @@ -8,7 +8,7 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -pub use reth_beacon_consensus_core::BeaconConsensus; +pub use reth_ethereum_consensus::EthBeaconConsensus; mod engine; pub use engine::*; diff --git a/crates/consensus/common/Cargo.toml b/crates/consensus/common/Cargo.toml index 4659dd3e1..af93788ee 100644 --- a/crates/consensus/common/Cargo.toml +++ b/crates/consensus/common/Cargo.toml @@ -15,11 +15,9 @@ workspace = true reth-primitives.workspace = true reth-interfaces.workspace = true reth-provider.workspace = true +reth-consensus.workspace=true [dev-dependencies] reth-interfaces = { workspace = true, features = ["test-utils"] } reth-provider = { workspace = true, features = ["test-utils"] } mockall = "0.12" - -[features] -optimism = ["reth-primitives/optimism"] diff --git a/crates/consensus/common/src/validation.rs b/crates/consensus/common/src/validation.rs index 1ab466c77..b67d40e98 100644 --- a/crates/consensus/common/src/validation.rs +++ b/crates/consensus/common/src/validation.rs @@ -1,14 +1,15 @@ //! Collection of methods for block validation. -use reth_interfaces::{consensus::ConsensusError, RethResult}; +use reth_consensus::ConsensusError; +use reth_interfaces::RethResult; use reth_primitives::{ - constants::eip4844::{DATA_GAS_PER_BLOB, MAX_DATA_GAS_PER_BLOCK}, - BlockNumber, ChainSpec, GotExpected, Hardfork, Header, InvalidTransactionError, SealedBlock, - SealedHeader, Transaction, TransactionSignedEcRecovered, TxEip1559, TxEip2930, TxEip4844, - TxLegacy, + constants::{ + eip4844::{DATA_GAS_PER_BLOB, MAX_DATA_GAS_PER_BLOCK}, + MAXIMUM_EXTRA_DATA_SIZE, + }, + ChainSpec, GotExpected, Hardfork, Header, SealedBlock, SealedHeader, }; -use reth_provider::{AccountReader, HeaderProvider, WithdrawalsProvider}; -use std::collections::{hash_map::Entry, HashMap}; +use reth_provider::{HeaderProvider, WithdrawalsProvider}; /// Validate header standalone pub fn validate_header_standalone( @@ -55,148 +56,6 @@ pub fn validate_header_standalone( Ok(()) } -/// Validate a transaction with regard to a block header. -/// -/// The only parameter from the header that affects the transaction is `base_fee`. -pub fn validate_transaction_regarding_header( - transaction: &Transaction, - chain_spec: &ChainSpec, - at_block_number: BlockNumber, - at_timestamp: u64, - base_fee: Option, -) -> Result<(), ConsensusError> { - #[allow(unreachable_patterns)] - let chain_id = match transaction { - Transaction::Legacy(TxLegacy { chain_id, .. }) => { - // EIP-155: Simple replay attack protection: https://eips.ethereum.org/EIPS/eip-155 - if !chain_spec.fork(Hardfork::SpuriousDragon).active_at_block(at_block_number) && - chain_id.is_some() - { - return Err(InvalidTransactionError::OldLegacyChainId.into()) - } - *chain_id - } - Transaction::Eip2930(TxEip2930 { chain_id, .. }) => { - // EIP-2930: Optional access lists: https://eips.ethereum.org/EIPS/eip-2930 (New transaction type) - if !chain_spec.fork(Hardfork::Berlin).active_at_block(at_block_number) { - return Err(InvalidTransactionError::Eip2930Disabled.into()) - } - Some(*chain_id) - } - Transaction::Eip1559(TxEip1559 { - chain_id, - max_fee_per_gas, - max_priority_fee_per_gas, - .. - }) => { - // EIP-1559: Fee market change for ETH 1.0 chain https://eips.ethereum.org/EIPS/eip-1559 - if !chain_spec.fork(Hardfork::London).active_at_block(at_block_number) { - return Err(InvalidTransactionError::Eip1559Disabled.into()) - } - - // EIP-1559: add more constraints to the tx validation - // https://github.com/ethereum/EIPs/pull/3594 - if max_priority_fee_per_gas > max_fee_per_gas { - return Err(InvalidTransactionError::TipAboveFeeCap.into()) - } - - Some(*chain_id) - } - Transaction::Eip4844(TxEip4844 { - chain_id, - max_fee_per_gas, - max_priority_fee_per_gas, - .. - }) => { - // EIP-4844: Shard Blob Transactions https://eips.ethereum.org/EIPS/eip-4844 - if !chain_spec.is_cancun_active_at_timestamp(at_timestamp) { - return Err(InvalidTransactionError::Eip4844Disabled.into()) - } - - // EIP-1559: add more constraints to the tx validation - // https://github.com/ethereum/EIPs/pull/3594 - if max_priority_fee_per_gas > max_fee_per_gas { - return Err(InvalidTransactionError::TipAboveFeeCap.into()) - } - - Some(*chain_id) - } - _ => { - // Op Deposit - None - } - }; - if let Some(chain_id) = chain_id { - if chain_id != chain_spec.chain().id() { - return Err(InvalidTransactionError::ChainIdMismatch.into()) - } - } - // Check basefee and few checks that are related to that. - // https://github.com/ethereum/EIPs/pull/3594 - if let Some(base_fee_per_gas) = base_fee { - if transaction.max_fee_per_gas() < base_fee_per_gas as u128 { - return Err(InvalidTransactionError::FeeCapTooLow.into()) - } - } - - Ok(()) -} - -/// Iterate over all transactions, validate them against each other and against the block. -/// There is no gas check done as [REVM](https://github.com/bluealloy/revm/blob/fd0108381799662098b7ab2c429ea719d6dfbf28/crates/revm/src/evm_impl.rs#L113-L131) already checks that. -pub fn validate_all_transaction_regarding_block_and_nonces< - 'a, - Provider: HeaderProvider + AccountReader, ->( - transactions: impl Iterator, - header: &Header, - provider: Provider, - chain_spec: &ChainSpec, -) -> RethResult<()> { - let mut account_nonces = HashMap::new(); - - for transaction in transactions { - validate_transaction_regarding_header( - transaction, - chain_spec, - header.number, - header.timestamp, - header.base_fee_per_gas, - )?; - - // Get nonce, if there is previous transaction from same sender we need - // to take that nonce. - let nonce = match account_nonces.entry(transaction.signer()) { - Entry::Occupied(mut entry) => { - let nonce = *entry.get(); - *entry.get_mut() += 1; - nonce - } - Entry::Vacant(entry) => { - let account = provider.basic_account(transaction.signer())?.unwrap_or_default(); - // Signer account shouldn't have bytecode. Presence of bytecode means this is a - // smartcontract. - if account.has_bytecode() { - return Err(ConsensusError::from( - InvalidTransactionError::SignerAccountHasBytecode, - ) - .into()) - } - let nonce = account.nonce; - entry.insert(account.nonce + 1); - nonce - } - }; - - // check nonce - if transaction.nonce() != nonce { - return Err(ConsensusError::from(InvalidTransactionError::NonceNotConsistent).into()) - } - } - - Ok(()) -} - /// Validate a block without regard for state: /// /// - Compares the ommer hash in the block header to the block body @@ -320,6 +179,18 @@ pub fn validate_4844_header_standalone(header: &SealedHeader) -> Result<(), Cons Ok(()) } +/// Validates the header's extradata according to the beacon consensus rules. +/// +/// From yellow paper: extraData: An arbitrary byte array containing data relevant to this block. +/// This must be 32 bytes or fewer; formally Hx. +pub fn validate_header_extradata(header: &Header) -> Result<(), ConsensusError> { + if header.extra_data.len() > MAXIMUM_EXTRA_DATA_SIZE { + Err(ConsensusError::ExtraDataExceedsMax { len: header.extra_data.len() }) + } else { + Ok(()) + } +} + #[cfg(test)] mod tests { use super::*; @@ -329,10 +200,11 @@ mod tests { test_utils::generators::{self, Rng}, }; use reth_primitives::{ - hex_literal::hex, proofs, Account, Address, BlockBody, BlockHash, BlockHashOrNumber, Bytes, - ChainSpecBuilder, Signature, TransactionKind, TransactionSigned, Withdrawal, Withdrawals, - MAINNET, U256, + hex_literal::hex, proofs, Account, Address, BlockBody, BlockHash, BlockHashOrNumber, + BlockNumber, Bytes, ChainSpecBuilder, Signature, Transaction, TransactionSigned, TxEip4844, + Withdrawal, Withdrawals, U256, }; + use reth_provider::AccountReader; use std::ops::RangeBounds; mock! { @@ -366,15 +238,6 @@ mod tests { withdrawals_provider: MockWithdrawalsProvider::new(), } } - /// New provider where is_known is always true - fn new_known() -> Self { - Self { - is_known: true, - parent: None, - account: None, - withdrawals_provider: MockWithdrawalsProvider::new(), - } - } } impl AccountReader for Provider { @@ -441,25 +304,6 @@ mod tests { } } - fn mock_tx(nonce: u64) -> TransactionSignedEcRecovered { - let request = Transaction::Eip2930(TxEip2930 { - chain_id: 1u64, - nonce, - gas_price: 0x28f000fff, - gas_limit: 10, - to: TransactionKind::Call(Address::default()), - value: U256::from(3_u64), - input: Bytes::from(vec![1, 2]), - access_list: Default::default(), - }); - - let signature = Signature { odd_y_parity: true, r: U256::default(), s: U256::default() }; - - let tx = TransactionSigned::from_transaction_and_signature(request, signature); - let signer = Address::ZERO; - TransactionSignedEcRecovered::from_signed_transaction(tx, signer) - } - fn mock_blob_tx(nonce: u64, num_blobs: usize) -> TransactionSigned { let mut rng = generators::rng(); let request = Transaction::Eip4844(TxEip4844 { @@ -469,7 +313,7 @@ mod tests { max_priority_fee_per_gas: 0x28f000fff, max_fee_per_blob_gas: 0x7, gas_limit: 10, - to: TransactionKind::Call(Address::default()), + to: Address::default().into(), value: U256::from(3_u64), input: Bytes::from(vec![1, 2]), access_list: Default::default(), @@ -523,60 +367,6 @@ mod tests { (SealedBlock { header: header.seal_slow(), body, ommers, withdrawals: None }, parent) } - #[test] - fn sanity_tx_nonce_check() { - let (block, _) = mock_block(); - let tx1 = mock_tx(0); - let tx2 = mock_tx(1); - let provider = Provider::new_known(); - - let txs = vec![tx1, tx2]; - validate_all_transaction_regarding_block_and_nonces( - txs.iter(), - &block.header, - provider, - &MAINNET, - ) - .expect("To Pass"); - } - - #[test] - fn nonce_gap_in_first_transaction() { - let (block, _) = mock_block(); - let tx1 = mock_tx(1); - let provider = Provider::new_known(); - - let txs = vec![tx1]; - assert_eq!( - validate_all_transaction_regarding_block_and_nonces( - txs.iter(), - &block.header, - provider, - &MAINNET, - ), - Err(ConsensusError::from(InvalidTransactionError::NonceNotConsistent).into()) - ) - } - - #[test] - fn nonce_gap_on_second_tx_from_same_signer() { - let (block, _) = mock_block(); - let tx1 = mock_tx(0); - let tx2 = mock_tx(3); - let provider = Provider::new_known(); - - let txs = vec![tx1, tx2]; - assert_eq!( - validate_all_transaction_regarding_block_and_nonces( - txs.iter(), - &block.header, - provider, - &MAINNET, - ), - Err(ConsensusError::from(InvalidTransactionError::NonceNotConsistent).into()) - ); - } - #[test] fn valid_withdrawal_index() { let chain_spec = ChainSpecBuilder::mainnet().shanghai_activated().build(); diff --git a/crates/consensus/consensus/Cargo.toml b/crates/consensus/consensus/Cargo.toml new file mode 100644 index 000000000..308a16f20 --- /dev/null +++ b/crates/consensus/consensus/Cargo.toml @@ -0,0 +1,21 @@ +[package] +name = "reth-consensus" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true + +[lints] +workspace = true + +[dependencies] +reth-primitives.workspace = true + +# misc +auto_impl.workspace = true +thiserror.workspace = true + +[features] +test-utils = [] \ No newline at end of file diff --git a/crates/interfaces/src/consensus.rs b/crates/consensus/consensus/src/lib.rs similarity index 93% rename from crates/interfaces/src/consensus.rs rename to crates/consensus/consensus/src/lib.rs index b7d03b72e..2dee6b124 100644 --- a/crates/interfaces/src/consensus.rs +++ b/crates/consensus/consensus/src/lib.rs @@ -1,11 +1,22 @@ +//! Consensus protocol functions + +#![doc( + html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", + html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", + issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" +)] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] +#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] + use reth_primitives::{ BlockHash, BlockNumber, GotExpected, GotExpectedBoxed, Header, HeaderValidationError, InvalidTransactionError, SealedBlock, SealedHeader, B256, U256, }; use std::fmt::Debug; -/// Re-export fork choice state -pub use reth_rpc_types::engine::ForkchoiceState; +#[cfg(any(test, feature = "test-utils"))] +/// test helpers for mocking consensus +pub mod test_utils; /// Consensus is a protocol that chooses canonical chain. #[auto_impl::auto_impl(&, Arc)] @@ -248,6 +259,13 @@ pub enum ConsensusError { HeaderValidationError(#[from] HeaderValidationError), } +impl ConsensusError { + /// Returns `true` if the error is a state root error. + pub fn is_state_root_error(&self) -> bool { + matches!(self, ConsensusError::BodyStateRootDiff(_)) + } +} + /// `HeaderConsensusError` combines a `ConsensusError` with the `SealedHeader` it relates to. #[derive(thiserror::Error, Debug)] #[error("Consensus error: {0}, Invalid header: {1:?}")] diff --git a/crates/consensus/consensus/src/test_utils.rs b/crates/consensus/consensus/src/test_utils.rs new file mode 100644 index 000000000..a8655661b --- /dev/null +++ b/crates/consensus/consensus/src/test_utils.rs @@ -0,0 +1,70 @@ +use crate::{Consensus, ConsensusError}; +use reth_primitives::{Header, SealedBlock, SealedHeader, U256}; +use std::sync::atomic::{AtomicBool, Ordering}; + +/// Consensus engine implementation for testing +#[derive(Debug)] +pub struct TestConsensus { + /// Flag whether the header validation should purposefully fail + fail_validation: AtomicBool, +} + +impl Default for TestConsensus { + fn default() -> Self { + Self { fail_validation: AtomicBool::new(false) } + } +} + +impl TestConsensus { + /// Get the failed validation flag. + pub fn fail_validation(&self) -> bool { + self.fail_validation.load(Ordering::SeqCst) + } + + /// Update the validation flag. + pub fn set_fail_validation(&self, val: bool) { + self.fail_validation.store(val, Ordering::SeqCst) + } +} + +impl Consensus for TestConsensus { + fn validate_header(&self, _header: &SealedHeader) -> Result<(), ConsensusError> { + if self.fail_validation() { + Err(ConsensusError::BaseFeeMissing) + } else { + Ok(()) + } + } + + fn validate_header_against_parent( + &self, + _header: &SealedHeader, + _parent: &SealedHeader, + ) -> Result<(), ConsensusError> { + if self.fail_validation() { + Err(ConsensusError::BaseFeeMissing) + } else { + Ok(()) + } + } + + fn validate_header_with_total_difficulty( + &self, + _header: &Header, + _total_difficulty: U256, + ) -> Result<(), ConsensusError> { + if self.fail_validation() { + Err(ConsensusError::BaseFeeMissing) + } else { + Ok(()) + } + } + + fn validate_block(&self, _block: &SealedBlock) -> Result<(), ConsensusError> { + if self.fail_validation() { + Err(ConsensusError::BaseFeeMissing) + } else { + Ok(()) + } + } +} diff --git a/crates/e2e-test-utils/Cargo.toml b/crates/e2e-test-utils/Cargo.toml index f32ff029c..03e0edb91 100644 --- a/crates/e2e-test-utils/Cargo.toml +++ b/crates/e2e-test-utils/Cargo.toml @@ -17,6 +17,8 @@ reth-tracing.workspace = true reth-db.workspace = true reth-rpc.workspace = true reth-payload-builder = { workspace = true, features = ["test-utils"] } +reth-provider.workspace = true +reth-node-builder.workspace = true jsonrpsee.workspace = true @@ -31,4 +33,5 @@ alloy-signer.workspace = true alloy-signer-wallet = { workspace = true, features = ["mnemonic"] } alloy-rpc-types.workspace = true alloy-network.workspace = true -alloy-consensus.workspace = true +alloy-consensus = { workspace = true, features = ["kzg"] } +tracing.workspace = true \ No newline at end of file diff --git a/crates/e2e-test-utils/src/engine_api.rs b/crates/e2e-test-utils/src/engine_api.rs index ec8b058a3..fefd7d6ff 100644 --- a/crates/e2e-test-utils/src/engine_api.rs +++ b/crates/e2e-test-utils/src/engine_api.rs @@ -1,22 +1,29 @@ use crate::traits::PayloadEnvelopeExt; -use jsonrpsee::http_client::HttpClient; +use jsonrpsee::{ + core::client::ClientT, + http_client::{transport::HttpBackend, HttpClient}, +}; use reth::{ api::{EngineTypes, PayloadBuilderAttributes}, providers::CanonStateNotificationStream, - rpc::{api::EngineApiClient, types::engine::ForkchoiceState}, + rpc::{ + api::EngineApiClient, + types::engine::{ForkchoiceState, PayloadStatusEnum}, + }, }; use reth_payload_builder::PayloadId; use reth_primitives::B256; +use reth_rpc::AuthClientService; use std::marker::PhantomData; /// Helper for engine api operations -pub struct EngineApiHelper { +pub struct EngineApiTestContext { pub canonical_stream: CanonStateNotificationStream, - pub engine_api_client: HttpClient, + pub engine_api_client: HttpClient>, pub _marker: PhantomData, } -impl EngineApiHelper { +impl EngineApiTestContext { /// Retrieves a v3 payload from the engine api pub async fn get_payload_v3( &self, @@ -25,11 +32,21 @@ impl EngineApiHelper { Ok(EngineApiClient::::get_payload_v3(&self.engine_api_client, payload_id).await?) } + /// Retrieves a v3 payload from the engine api as serde value + pub async fn get_payload_v3_value( + &self, + payload_id: PayloadId, + ) -> eyre::Result { + Ok(self.engine_api_client.request("engine_getPayloadV3", (payload_id,)).await?) + } + /// Submits a payload to the engine api pub async fn submit_payload( &self, payload: E::BuiltPayload, payload_builder_attributes: E::PayloadBuilderAttributes, + expected_status: PayloadStatusEnum, + versioned_hashes: Vec, ) -> eyre::Result where E::ExecutionPayloadV3: From + PayloadEnvelopeExt, @@ -41,22 +58,39 @@ impl EngineApiHelper { let submission = EngineApiClient::::new_payload_v3( &self.engine_api_client, envelope_v3.execution_payload(), - vec![], + versioned_hashes, payload_builder_attributes.parent_beacon_block_root().unwrap(), ) .await?; - assert!(submission.is_valid(), "{}", submission); - Ok(submission.latest_valid_hash.unwrap()) + + assert_eq!(submission.status, expected_status); + + Ok(submission.latest_valid_hash.unwrap_or_default()) } /// Sends forkchoice update to the engine api - pub async fn update_forkchoice(&self, hash: B256) -> eyre::Result<()> { + pub async fn update_forkchoice(&self, current_head: B256, new_head: B256) -> eyre::Result<()> { + EngineApiClient::::fork_choice_updated_v2( + &self.engine_api_client, + ForkchoiceState { + head_block_hash: new_head, + safe_block_hash: current_head, + finalized_block_hash: current_head, + }, + None, + ) + .await?; + Ok(()) + } + + /// Sends forkchoice update to the engine api with a zero finalized hash + pub async fn update_optimistic_forkchoice(&self, hash: B256) -> eyre::Result<()> { EngineApiClient::::fork_choice_updated_v2( &self.engine_api_client, ForkchoiceState { head_block_hash: hash, - safe_block_hash: hash, - finalized_block_hash: hash, + safe_block_hash: B256::ZERO, + finalized_block_hash: B256::ZERO, }, None, ) diff --git a/crates/e2e-test-utils/src/lib.rs b/crates/e2e-test-utils/src/lib.rs index 016fb4d3e..aa7d46428 100644 --- a/crates/e2e-test-utils/src/lib.rs +++ b/crates/e2e-test-utils/src/lib.rs @@ -1,6 +1,25 @@ +use node::NodeTestContext; +use reth::{ + args::{DiscoveryArgs, NetworkArgs, RpcServerArgs}, + builder::{NodeBuilder, NodeConfig, NodeHandle}, + tasks::TaskManager, +}; +use reth_db::{test_utils::TempDatabase, DatabaseEnv}; +use reth_node_builder::{ + components::NodeComponentsBuilder, FullNodeTypesAdapter, Node, NodeAdapter, RethFullAdapter, +}; +use reth_primitives::ChainSpec; +use reth_provider::providers::BlockchainProvider; +use std::sync::Arc; +use tracing::{span, Level}; +use wallet::Wallet; + /// Wrapper type to create test nodes pub mod node; +/// Helper for transaction operations +pub mod transaction; + /// Helper type to yield accounts from mnemonic pub mod wallet; @@ -12,6 +31,82 @@ mod network; /// Helper for engine api operations mod engine_api; +/// Helper for rpc operations +mod rpc; /// Helper traits mod traits; + +/// Creates the initial setup with `num_nodes` started and interconnected. +pub async fn setup( + num_nodes: usize, + chain_spec: Arc, + is_dev: bool, +) -> eyre::Result<(Vec>, TaskManager, Wallet)> +where + N: Default + Node>, +{ + let tasks = TaskManager::current(); + let exec = tasks.executor(); + + let network_config = NetworkArgs { + discovery: DiscoveryArgs { disable_discovery: true, ..DiscoveryArgs::default() }, + ..NetworkArgs::default() + }; + + // Create nodes and peer them + let mut nodes: Vec> = Vec::with_capacity(num_nodes); + + for idx in 0..num_nodes { + let mut node_config = NodeConfig::test() + .with_chain(chain_spec.clone()) + .with_network(network_config.clone()) + .with_unused_ports() + .with_rpc(RpcServerArgs::default().with_unused_ports().with_http()); + + if is_dev { + node_config = node_config.dev(); + } + + let span = span!(Level::INFO, "node", idx); + let _enter = span.enter(); + let NodeHandle { node, node_exit_future: _ } = NodeBuilder::new(node_config.clone()) + .testing_node(exec.clone()) + .node(Default::default()) + .launch() + .await?; + + let mut node = NodeTestContext::new(node).await?; + + // Connect each node in a chain. + if let Some(previous_node) = nodes.last_mut() { + previous_node.connect(&mut node).await; + } + + // Connect last node with the first if there are more than two + if idx + 1 == num_nodes && num_nodes > 2 { + if let Some(first_node) = nodes.first_mut() { + node.connect(first_node).await; + } + } + + nodes.push(node); + } + + Ok((nodes, tasks, Wallet::default().with_chain_id(chain_spec.chain().into()))) +} + +// Type aliases + +type TmpDB = Arc>; +type TmpNodeAdapter = FullNodeTypesAdapter>; + +type Adapter = NodeAdapter< + RethFullAdapter, + <>>::ComponentsBuilder as NodeComponentsBuilder< + RethFullAdapter, + >>::Components, +>; + +/// Type alias for a type of NodeHelper +pub type NodeHelperType = NodeTestContext>; diff --git a/crates/e2e-test-utils/src/network.rs b/crates/e2e-test-utils/src/network.rs index 341b0d7d0..92e9b316a 100644 --- a/crates/e2e-test-utils/src/network.rs +++ b/crates/e2e-test-utils/src/network.rs @@ -5,12 +5,12 @@ use reth_tracing::tracing::info; use tokio_stream::wrappers::UnboundedReceiverStream; /// Helper for network operations -pub struct NetworkHelper { +pub struct NetworkTestContext { network_events: UnboundedReceiverStream, network: NetworkHandle, } -impl NetworkHelper { +impl NetworkTestContext { /// Creates a new network helper pub fn new(network: NetworkHandle) -> Self { let network_events = network.event_listener(); diff --git a/crates/e2e-test-utils/src/node.rs b/crates/e2e-test-utils/src/node.rs index d88a428f0..0ae20664a 100644 --- a/crates/e2e-test-utils/src/node.rs +++ b/crates/e2e-test-utils/src/node.rs @@ -1,38 +1,35 @@ use crate::{ - engine_api::EngineApiHelper, network::NetworkHelper, payload::PayloadHelper, - traits::PayloadEnvelopeExt, + engine_api::EngineApiTestContext, network::NetworkTestContext, payload::PayloadTestContext, + rpc::RpcTestContext, traits::PayloadEnvelopeExt, }; + use alloy_rpc_types::BlockNumberOrTag; use eyre::Ok; +use futures_util::Future; use reth::{ api::{BuiltPayload, EngineTypes, FullNodeComponents, PayloadBuilderAttributes}, builder::FullNode, - providers::{BlockReaderIdExt, CanonStateSubscriptions}, - rpc::{ - eth::{error::EthResult, EthTransactions}, - types::engine::PayloadAttributes, - }, -}; -use reth_payload_builder::EthPayloadBuilderAttributes; -use reth_primitives::{Address, BlockNumber, Bytes, B256}; -use std::{ - marker::PhantomData, - time::{SystemTime, UNIX_EPOCH}, + providers::{BlockReader, BlockReaderIdExt, CanonStateSubscriptions, StageCheckpointReader}, + rpc::types::engine::PayloadStatusEnum, }; +use reth_node_builder::NodeTypes; +use reth_primitives::{stage::StageId, BlockHash, BlockNumber, Bytes, B256}; +use std::{marker::PhantomData, pin::Pin}; use tokio_stream::StreamExt; /// An helper struct to handle node actions -pub struct NodeHelper +pub struct NodeTestContext where Node: FullNodeComponents, { pub inner: FullNode, - payload: PayloadHelper, - pub network: NetworkHelper, - pub engine_api: EngineApiHelper, + pub payload: PayloadTestContext, + pub network: NetworkTestContext, + pub engine_api: EngineApiTestContext, + pub rpc: RpcTestContext, } -impl NodeHelper +impl NodeTestContext where Node: FullNodeComponents, { @@ -42,64 +39,164 @@ where Ok(Self { inner: node.clone(), - network: NetworkHelper::new(node.network.clone()), - payload: PayloadHelper::new(builder).await?, - engine_api: EngineApiHelper { + payload: PayloadTestContext::new(builder).await?, + network: NetworkTestContext::new(node.network.clone()), + engine_api: EngineApiTestContext { engine_api_client: node.auth_server_handle().http_client(), canonical_stream: node.provider.canonical_state_stream(), _marker: PhantomData::, }, + rpc: RpcTestContext { inner: node.rpc_registry }, }) } - /// Advances the node forward + pub async fn connect(&mut self, node: &mut NodeTestContext) { + self.network.add_peer(node.network.record()).await; + node.network.add_peer(self.network.record()).await; + node.network.expect_session().await; + self.network.expect_session().await; + } + + /// Advances the chain `length` blocks. + /// + /// Returns the added chain as a Vec of block hashes. pub async fn advance( &mut self, - raw_tx: Bytes, - attributes_generator: impl Fn(u64) -> ::PayloadBuilderAttributes, - ) -> eyre::Result<(B256, B256)> + length: u64, + tx_generator: impl Fn(u64) -> Pin>>, + attributes_generator: impl Fn(u64) -> ::PayloadBuilderAttributes + + Copy, + ) -> eyre::Result< + Vec<( + ::BuiltPayload, + ::PayloadBuilderAttributes, + )>, + > where ::ExecutionPayloadV3: From<::BuiltPayload> + PayloadEnvelopeExt, { - // push tx into pool via RPC server - let tx_hash = self.inject_tx(raw_tx).await?; + let mut chain = Vec::with_capacity(length as usize); + for i in 0..length { + let raw_tx = tx_generator(i).await; + let tx_hash = self.rpc.inject_tx(raw_tx).await?; + let (payload, eth_attr) = self.advance_block(vec![], attributes_generator).await?; + let block_hash = payload.block().hash(); + let block_number = payload.block().number; + self.assert_new_block(tx_hash, block_hash, block_number).await?; + chain.push((payload, eth_attr)); + } + Ok(chain) + } + /// Creates a new payload from given attributes generator + /// expects a payload attribute event and waits until the payload is built. + /// + /// It triggers the resolve payload via engine api and expects the built payload event. + pub async fn new_payload( + &mut self, + attributes_generator: impl Fn(u64) -> ::PayloadBuilderAttributes, + ) -> eyre::Result<( + <::Engine as EngineTypes>::BuiltPayload, + <::Engine as EngineTypes>::PayloadBuilderAttributes, + )> + where + ::ExecutionPayloadV3: + From<::BuiltPayload> + PayloadEnvelopeExt, + { // trigger new payload building draining the pool let eth_attr = self.payload.new_payload(attributes_generator).await.unwrap(); - // first event is the payload attributes self.payload.expect_attr_event(eth_attr.clone()).await?; - // wait for the payload builder to have finished building self.payload.wait_for_built_payload(eth_attr.payload_id()).await; - // trigger resolve payload via engine api - self.engine_api.get_payload_v3(eth_attr.payload_id()).await?; - + self.engine_api.get_payload_v3_value(eth_attr.payload_id()).await?; // ensure we're also receiving the built payload as event - let payload = self.payload.expect_built_payload().await?; + Ok((self.payload.expect_built_payload().await?, eth_attr)) + } - // submit payload via engine api - let block_number = payload.block().number; - let block_hash = self.engine_api.submit_payload(payload, eth_attr.clone()).await?; + /// Advances the node forward one block + pub async fn advance_block( + &mut self, + versioned_hashes: Vec, + attributes_generator: impl Fn(u64) -> ::PayloadBuilderAttributes, + ) -> eyre::Result<( + ::BuiltPayload, + <::Engine as EngineTypes>::PayloadBuilderAttributes, + )> + where + ::ExecutionPayloadV3: + From<::BuiltPayload> + PayloadEnvelopeExt, + { + let (payload, eth_attr) = self.new_payload(attributes_generator).await?; + + let block_hash = self + .engine_api + .submit_payload( + payload.clone(), + eth_attr.clone(), + PayloadStatusEnum::Valid, + versioned_hashes, + ) + .await?; // trigger forkchoice update via engine api to commit the block to the blockchain - self.engine_api.update_forkchoice(block_hash).await?; + self.engine_api.update_forkchoice(block_hash, block_hash).await?; - // assert the block has been committed to the blockchain - self.assert_new_block(tx_hash, block_hash, block_number).await?; - Ok((block_hash, tx_hash)) + Ok((payload, eth_attr)) } - /// Injects a raw transaction into the node tx pool via RPC server - async fn inject_tx(&mut self, raw_tx: Bytes) -> EthResult { - let eth_api = self.inner.rpc_registry.eth_api(); - eth_api.send_raw_transaction(raw_tx).await + /// Waits for block to be available on node. + pub async fn wait_block( + &self, + number: BlockNumber, + expected_block_hash: BlockHash, + wait_finish_checkpoint: bool, + ) -> eyre::Result<()> { + let mut check = !wait_finish_checkpoint; + loop { + tokio::time::sleep(std::time::Duration::from_millis(20)).await; + + if !check && wait_finish_checkpoint { + if let Some(checkpoint) = + self.inner.provider.get_stage_checkpoint(StageId::Finish)? + { + if checkpoint.block_number >= number { + check = true + } + } + } + + if check { + if let Some(latest_block) = self.inner.provider.block_by_number(number)? { + assert_eq!(latest_block.hash_slow(), expected_block_hash); + break + } + if wait_finish_checkpoint { + panic!("Finish checkpoint matches, but could not fetch block."); + } + } + } + Ok(()) + } + + pub async fn wait_unwind(&self, number: BlockNumber) -> eyre::Result<()> { + loop { + tokio::time::sleep(std::time::Duration::from_millis(10)).await; + if let Some(checkpoint) = self.inner.provider.get_stage_checkpoint(StageId::Headers)? { + if checkpoint.block_number == number { + break + } + } + } + Ok(()) } /// Asserts that a new block has been added to the blockchain - /// and the tx has been included in the block + /// and the tx has been included in the block. + /// + /// Does NOT work for pipeline since there's no stream notification! pub async fn assert_new_block( &mut self, tip_tx_hash: B256, @@ -129,17 +226,3 @@ where Ok(()) } } - -/// Helper function to create a new eth payload attributes -pub fn eth_payload_attributes() -> EthPayloadBuilderAttributes { - let timestamp = SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_secs(); - - let attributes = PayloadAttributes { - timestamp, - prev_randao: B256::ZERO, - suggested_fee_recipient: Address::ZERO, - withdrawals: Some(vec![]), - parent_beacon_block_root: Some(B256::ZERO), - }; - EthPayloadBuilderAttributes::new(B256::ZERO, attributes) -} diff --git a/crates/e2e-test-utils/src/payload.rs b/crates/e2e-test-utils/src/payload.rs index 37138cdd3..47f4134d7 100644 --- a/crates/e2e-test-utils/src/payload.rs +++ b/crates/e2e-test-utils/src/payload.rs @@ -4,13 +4,13 @@ use reth_payload_builder::{Events, PayloadBuilderHandle, PayloadId}; use tokio_stream::wrappers::BroadcastStream; /// Helper for payload operations -pub struct PayloadHelper { +pub struct PayloadTestContext { pub payload_event_stream: BroadcastStream>, payload_builder: PayloadBuilderHandle, - timestamp: u64, + pub timestamp: u64, } -impl PayloadHelper { +impl PayloadTestContext { /// Creates a new payload helper pub async fn new(payload_builder: PayloadBuilderHandle) -> eyre::Result { let payload_events = payload_builder.subscribe().await?; diff --git a/crates/e2e-test-utils/src/rpc.rs b/crates/e2e-test-utils/src/rpc.rs new file mode 100644 index 000000000..09f161a91 --- /dev/null +++ b/crates/e2e-test-utils/src/rpc.rs @@ -0,0 +1,24 @@ +use alloy_consensus::TxEnvelope; +use alloy_network::eip2718::Decodable2718; +use reth::{api::FullNodeComponents, builder::rpc::RpcRegistry, rpc::api::DebugApiServer}; +use reth_primitives::{Bytes, B256}; +use reth_rpc::eth::{error::EthResult, EthTransactions}; + +pub struct RpcTestContext { + pub inner: RpcRegistry, +} + +impl RpcTestContext { + /// Injects a raw transaction into the node tx pool via RPC server + pub async fn inject_tx(&mut self, raw_tx: Bytes) -> EthResult { + let eth_api = self.inner.eth_api(); + eth_api.send_raw_transaction(raw_tx).await + } + + /// Retrieves a transaction envelope by its hash + pub async fn envelope_by_hash(&mut self, hash: B256) -> eyre::Result { + let tx = self.inner.debug_api().raw_transaction(hash).await?.unwrap(); + let tx = tx.to_vec(); + Ok(TxEnvelope::decode_2718(&mut tx.as_ref()).unwrap()) + } +} diff --git a/crates/e2e-test-utils/src/transaction.rs b/crates/e2e-test-utils/src/transaction.rs new file mode 100644 index 000000000..ea066304b --- /dev/null +++ b/crates/e2e-test-utils/src/transaction.rs @@ -0,0 +1,80 @@ +use alloy_consensus::{ + BlobTransactionSidecar, SidecarBuilder, SimpleCoder, TxEip4844Variant, TxEnvelope, +}; +use alloy_network::{eip2718::Encodable2718, EthereumSigner, TransactionBuilder}; +use alloy_rpc_types::{TransactionInput, TransactionRequest}; +use alloy_signer_wallet::LocalWallet; +use eyre::Ok; +use reth_primitives::{hex, Address, Bytes, U256}; + +use reth_primitives::{constants::eip4844::MAINNET_KZG_TRUSTED_SETUP, B256}; + +pub struct TransactionTestContext; + +impl TransactionTestContext { + /// Creates a static transfer and signs it + pub async fn transfer_tx(chain_id: u64, wallet: LocalWallet) -> Bytes { + let tx = tx(chain_id, None, 0); + let signer = EthereumSigner::from(wallet); + tx.build(&signer).await.unwrap().encoded_2718().into() + } + + /// Creates a tx with blob sidecar and sign it + pub async fn tx_with_blobs(chain_id: u64, wallet: LocalWallet) -> eyre::Result { + let mut tx = tx(chain_id, None, 0); + + let mut builder = SidecarBuilder::::new(); + builder.ingest(b"dummy blob"); + let sidecar: BlobTransactionSidecar = builder.build()?; + + tx.set_blob_sidecar(sidecar); + tx.set_max_fee_per_blob_gas(15e9 as u128); + + let signer = EthereumSigner::from(wallet); + let signed = tx.clone().build(&signer).await.unwrap(); + + Ok(signed.encoded_2718().into()) + } + + pub async fn optimism_l1_block_info_tx( + chain_id: u64, + wallet: LocalWallet, + nonce: u64, + ) -> Bytes { + let l1_block_info = Bytes::from_static(&hex!("7ef9015aa044bae9d41b8380d781187b426c6fe43df5fb2fb57bd4466ef6a701e1f01e015694deaddeaddeaddeaddeaddeaddeaddeaddead000194420000000000000000000000000000000000001580808408f0d18001b90104015d8eb900000000000000000000000000000000000000000000000000000000008057650000000000000000000000000000000000000000000000000000000063d96d10000000000000000000000000000000000000000000000000000000000009f35273d89754a1e0387b89520d989d3be9c37c1f32495a88faf1ea05c61121ab0d1900000000000000000000000000000000000000000000000000000000000000010000000000000000000000002d679b567db6187c0c8323fa982cfb88b74dbcc7000000000000000000000000000000000000000000000000000000000000083400000000000000000000000000000000000000000000000000000000000f4240")); + let tx = tx(chain_id, Some(l1_block_info), nonce); + let signer = EthereumSigner::from(wallet); + tx.build(&signer).await.unwrap().encoded_2718().into() + } + + /// Validates the sidecar of a given tx envelope and returns the versioned hashes + pub fn validate_sidecar(tx: TxEnvelope) -> Vec { + let proof_setting = MAINNET_KZG_TRUSTED_SETUP.clone(); + + match tx { + TxEnvelope::Eip4844(signed) => match signed.tx() { + TxEip4844Variant::TxEip4844WithSidecar(tx) => { + tx.validate_blob(&proof_setting).unwrap(); + tx.sidecar.versioned_hashes().collect() + } + _ => panic!("Expected Eip4844 transaction with sidecar"), + }, + _ => panic!("Expected Eip4844 transaction"), + } + } +} + +/// Creates a type 2 transaction +fn tx(chain_id: u64, data: Option, nonce: u64) -> TransactionRequest { + TransactionRequest { + nonce: Some(nonce), + value: Some(U256::from(100)), + to: Some(reth_primitives::TxKind::Call(Address::random())), + gas: Some(210000), + max_fee_per_gas: Some(20e9 as u128), + max_priority_fee_per_gas: Some(20e9 as u128), + chain_id: Some(chain_id), + input: TransactionInput { input: None, data }, + ..Default::default() + } +} diff --git a/crates/e2e-test-utils/src/wallet.rs b/crates/e2e-test-utils/src/wallet.rs index 43fe7555d..e841e7cd7 100644 --- a/crates/e2e-test-utils/src/wallet.rs +++ b/crates/e2e-test-utils/src/wallet.rs @@ -1,19 +1,20 @@ -use alloy_network::{eip2718::Encodable2718, EthereumSigner, TransactionBuilder}; -use alloy_rpc_types::{TransactionInput, TransactionRequest}; +use alloy_signer::Signer; use alloy_signer_wallet::{coins_bip39::English, LocalWallet, MnemonicBuilder}; -use reth_primitives::{Address, Bytes, U256}; + /// One of the accounts of the genesis allocations. pub struct Wallet { - inner: LocalWallet, - nonce: u64, - chain_id: u64, + pub inner: LocalWallet, + pub inner_nonce: u64, + pub chain_id: u64, + amount: usize, + derivation_path: Option, } impl Wallet { /// Creates a new account from one of the secret/pubkeys of the genesis allocations (test.json) - pub(crate) fn new(phrase: &str) -> Self { - let inner = MnemonicBuilder::::default().phrase(phrase).build().unwrap(); - Self { inner, chain_id: 1, nonce: 0 } + pub fn new(amount: usize) -> Self { + let inner = MnemonicBuilder::::default().phrase(TEST_MNEMONIC).build().unwrap(); + Self { inner, chain_id: 1, amount, derivation_path: None, inner_nonce: 0 } } /// Sets chain id @@ -22,26 +23,24 @@ impl Wallet { self } - /// Creates a static transfer and signs it - pub async fn transfer_tx(&mut self) -> Bytes { - self.tx(None).await + fn get_derivation_path(&self) -> &str { + self.derivation_path.as_deref().unwrap_or("m/44'/60'/0'/0/") } - /// Creates a transaction with data and signs it - pub async fn tx(&mut self, data: Option) -> Bytes { - let tx = TransactionRequest { - nonce: Some(self.nonce), - value: Some(U256::from(100)), - to: Some(Address::random()), - gas_price: Some(20e9 as u128), - gas: Some(210000), - chain_id: Some(self.chain_id), - input: TransactionInput { input: None, data }, - ..Default::default() - }; - self.nonce += 1; - let signer = EthereumSigner::from(self.inner.clone()); - tx.build(&signer).await.unwrap().encoded_2718().into() + pub fn gen(&self) -> Vec { + let builder = MnemonicBuilder::::default().phrase(TEST_MNEMONIC); + + // use the derivation path + let derivation_path = self.get_derivation_path(); + + let mut wallets = Vec::with_capacity(self.amount); + for idx in 0..self.amount { + let builder = + builder.clone().derivation_path(&format!("{derivation_path}{idx}")).unwrap(); + let wallet = builder.build().unwrap().with_chain_id(Some(self.chain_id)); + wallets.push(wallet) + } + wallets } } @@ -49,6 +48,6 @@ const TEST_MNEMONIC: &str = "test test test test test test test test test test t impl Default for Wallet { fn default() -> Self { - Wallet::new(TEST_MNEMONIC) + Wallet::new(1) } } diff --git a/crates/engine-primitives/src/error.rs b/crates/engine-primitives/src/error.rs index d6549a516..f6dd3a8b7 100644 --- a/crates/engine-primitives/src/error.rs +++ b/crates/engine-primitives/src/error.rs @@ -8,11 +8,13 @@ use thiserror::Error; /// both execution payloads and forkchoice update attributes with respect to a method version. #[derive(Error, Debug)] pub enum EngineObjectValidationError { - /// Thrown when the underlying validation error occured while validating an `ExecutionPayload`. + /// Thrown when the underlying validation error occurred while validating an + /// `ExecutionPayload`. #[error("Payload validation error: {0}")] Payload(VersionSpecificValidationError), - /// Thrown when the underlying validation error occured while validating a `PayloadAttributes`. + /// Thrown when the underlying validation error occurred while validating a + /// `PayloadAttributes`. #[error("Payload attributes validation error: {0}")] PayloadAttributes(VersionSpecificValidationError), diff --git a/crates/engine-primitives/src/lib.rs b/crates/engine-primitives/src/lib.rs index e144d0fcd..99edf521c 100644 --- a/crates/engine-primitives/src/lib.rs +++ b/crates/engine-primitives/src/lib.rs @@ -115,6 +115,29 @@ pub fn validate_payload_timestamp( // the payload does not fall within the time frame of the Cancun fork. return Err(EngineObjectValidationError::UnsupportedFork) } + + let is_prague = chain_spec.is_prague_active_at_timestamp(timestamp); + if version == EngineApiMessageVersion::V4 && !is_prague { + // From the Engine API spec: + // + // + // For `engine_getPayloadV4`: + // + // 1. Client software **MUST** return `-38005: Unsupported fork` error if the `timestamp` of + // the built payload does not fall within the time frame of the Prague fork. + // + // For `engine_forkchoiceUpdatedV4`: + // + // 2. Client software **MUST** return `-38005: Unsupported fork` error if the + // `payloadAttributes` is set and the `payloadAttributes.timestamp` does not fall within + // the time frame of the Prague fork. + // + // For `engine_newPayloadV4`: + // + // 2. Client software **MUST** return `-38005: Unsupported fork` error if the `timestamp` of + // the payload does not fall within the time frame of the Prague fork. + return Err(EngineObjectValidationError::UnsupportedFork) + } Ok(()) } @@ -128,7 +151,7 @@ pub fn validate_withdrawals_presence( timestamp: u64, has_withdrawals: bool, ) -> Result<(), EngineObjectValidationError> { - let is_shanghai = chain_spec.is_shanghai_active_at_timestamp(timestamp); + let is_shanghai_active = chain_spec.is_shanghai_active_at_timestamp(timestamp); match version { EngineApiMessageVersion::V1 => { @@ -136,17 +159,17 @@ pub fn validate_withdrawals_presence( return Err(message_validation_kind .to_error(VersionSpecificValidationError::WithdrawalsNotSupportedInV1)) } - if is_shanghai { + if is_shanghai_active { return Err(message_validation_kind .to_error(VersionSpecificValidationError::NoWithdrawalsPostShanghai)) } } - EngineApiMessageVersion::V2 | EngineApiMessageVersion::V3 => { - if is_shanghai && !has_withdrawals { + EngineApiMessageVersion::V2 | EngineApiMessageVersion::V3 | EngineApiMessageVersion::V4 => { + if is_shanghai_active && !has_withdrawals { return Err(message_validation_kind .to_error(VersionSpecificValidationError::NoWithdrawalsPostShanghai)) } - if !is_shanghai && has_withdrawals { + if !is_shanghai_active && has_withdrawals { return Err(message_validation_kind .to_error(VersionSpecificValidationError::HasWithdrawalsPreShanghai)) } @@ -237,7 +260,7 @@ pub fn validate_parent_beacon_block_root_presence( )) } } - EngineApiMessageVersion::V3 => { + EngineApiMessageVersion::V3 | EngineApiMessageVersion::V4 => { if !has_parent_beacon_block_root { return Err(validation_kind .to_error(VersionSpecificValidationError::NoParentBeaconBlockRootPostCancun)) @@ -321,10 +344,14 @@ pub enum EngineApiMessageVersion { V1, /// Version 2 /// - /// Added for shanghai hardfork. + /// Added in the Shanghai hardfork. V2, /// Version 3 /// - /// Added for cancun hardfork. + /// Added in the Cancun hardfork. V3, + /// Version 4 + /// + /// Added in the Prague hardfork. + V4, } diff --git a/crates/ethereum-forks/src/forkid.rs b/crates/ethereum-forks/src/forkid.rs index bb163c86e..ee4edb8bd 100644 --- a/crates/ethereum-forks/src/forkid.rs +++ b/crates/ethereum-forks/src/forkid.rs @@ -4,7 +4,7 @@ use crate::Head; use alloy_primitives::{hex, BlockNumber, B256}; -use alloy_rlp::*; +use alloy_rlp::{Error as RlpError, *}; #[cfg(any(test, feature = "arbitrary"))] use arbitrary::Arbitrary; use crc::*; @@ -115,6 +115,64 @@ pub struct ForkId { pub next: u64, } +/// Represents a forward-compatible ENR entry for including the forkid in a node record via +/// EIP-868. Forward compatibility is achieved via EIP-8. +/// +/// See: +/// +/// +/// for how geth implements ForkId values and forward compatibility. +#[derive(Debug, Clone, PartialEq, Eq, RlpEncodable)] +pub struct EnrForkIdEntry { + /// The inner forkid + pub fork_id: ForkId, +} + +impl Decodable for EnrForkIdEntry { + // NOTE(onbjerg): Manual implementation to satisfy EIP-8. + // + // See https://eips.ethereum.org/EIPS/eip-8 + fn decode(buf: &mut &[u8]) -> alloy_rlp::Result { + let b = &mut &**buf; + let rlp_head = Header::decode(b)?; + if !rlp_head.list { + return Err(RlpError::UnexpectedString) + } + let started_len = b.len(); + + let this = Self { fork_id: Decodable::decode(b)? }; + + // NOTE(onbjerg): Because of EIP-8, we only check that we did not consume *more* than the + // payload length, i.e. it is ok if payload length is greater than what we consumed, as we + // just discard the remaining list items + let consumed = started_len - b.len(); + if consumed > rlp_head.payload_length { + return Err(RlpError::ListLengthMismatch { + expected: rlp_head.payload_length, + got: consumed, + }) + } + + let rem = rlp_head.payload_length - consumed; + b.advance(rem); + *buf = *b; + + Ok(this) + } +} + +impl From for EnrForkIdEntry { + fn from(fork_id: ForkId) -> Self { + Self { fork_id } + } +} + +impl From for ForkId { + fn from(entry: EnrForkIdEntry) -> Self { + entry.fork_id + } +} + /// Reason for rejecting provided `ForkId`. #[derive(Clone, Copy, Debug, Error, PartialEq, Eq, Hash)] pub enum ValidationError { @@ -626,4 +684,39 @@ mod tests { assert!(fork_filter.set_head_priv(Head { number: b2, ..Default::default() }).is_some()); assert_eq!(fork_filter.current(), h2); } + + mod eip8 { + use super::*; + + fn junk_enr_fork_id_entry() -> Vec { + let mut buf = Vec::new(); + // enr request is just an expiration + let fork_id = ForkId { hash: ForkHash(hex!("deadbeef")), next: 0xBADDCAFE }; + + // add some junk + let junk: u64 = 112233; + + // rlp header encoding + let payload_length = fork_id.length() + junk.length(); + alloy_rlp::Header { list: true, payload_length }.encode(&mut buf); + + // fields + fork_id.encode(&mut buf); + junk.encode(&mut buf); + + buf + } + + #[test] + fn eip8_decode_enr_fork_id_entry() { + let enr_fork_id_entry_with_junk = junk_enr_fork_id_entry(); + + let mut buf = enr_fork_id_entry_with_junk.as_slice(); + let decoded = EnrForkIdEntry::decode(&mut buf).unwrap(); + assert_eq!( + decoded.fork_id, + ForkId { hash: ForkHash(hex!("deadbeef")), next: 0xBADDCAFE } + ); + } + } } diff --git a/crates/ethereum-forks/src/hardfork.rs b/crates/ethereum-forks/src/hardfork.rs index 6ccb30697..41d1f1302 100644 --- a/crates/ethereum-forks/src/hardfork.rs +++ b/crates/ethereum-forks/src/hardfork.rs @@ -73,6 +73,9 @@ pub enum Hardfork { // Upcoming /// Prague: Prague, + /// Fjord: + #[cfg(feature = "optimism")] + Fjord, } impl Hardfork { diff --git a/crates/ethereum-forks/src/lib.rs b/crates/ethereum-forks/src/lib.rs index e781fe3a5..6dbec7c38 100644 --- a/crates/ethereum-forks/src/lib.rs +++ b/crates/ethereum-forks/src/lib.rs @@ -20,7 +20,9 @@ mod forkid; mod hardfork; mod head; -pub use forkid::{ForkFilter, ForkFilterKey, ForkHash, ForkId, ForkTransition, ValidationError}; +pub use forkid::{ + EnrForkIdEntry, ForkFilter, ForkFilterKey, ForkHash, ForkId, ForkTransition, ValidationError, +}; pub use hardfork::Hardfork; pub use head::Head; diff --git a/crates/consensus/beacon-core/Cargo.toml b/crates/ethereum/consensus/Cargo.toml similarity index 83% rename from crates/consensus/beacon-core/Cargo.toml rename to crates/ethereum/consensus/Cargo.toml index 232631f73..f3ff5d4d3 100644 --- a/crates/consensus/beacon-core/Cargo.toml +++ b/crates/ethereum/consensus/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "reth-beacon-consensus-core" +name = "reth-ethereum-consensus" version.workspace = true edition.workspace = true rust-version.workspace = true @@ -14,7 +14,7 @@ workspace = true # reth reth-consensus-common.workspace = true reth-primitives.workspace = true -reth-interfaces.workspace = true +reth-consensus.workspace = true [features] optimism = ["reth-primitives/optimism"] \ No newline at end of file diff --git a/crates/consensus/beacon-core/src/lib.rs b/crates/ethereum/consensus/src/lib.rs similarity index 82% rename from crates/consensus/beacon-core/src/lib.rs rename to crates/ethereum/consensus/src/lib.rs index 599e01009..ed283f026 100644 --- a/crates/consensus/beacon-core/src/lib.rs +++ b/crates/ethereum/consensus/src/lib.rs @@ -8,30 +8,30 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +use reth_consensus::{Consensus, ConsensusError}; use reth_consensus_common::validation; -use reth_interfaces::consensus::{Consensus, ConsensusError}; use reth_primitives::{ - constants::MAXIMUM_EXTRA_DATA_SIZE, Chain, ChainSpec, Hardfork, Header, SealedBlock, - SealedHeader, EMPTY_OMMER_ROOT_HASH, U256, + Chain, ChainSpec, Hardfork, Header, SealedBlock, SealedHeader, EMPTY_OMMER_ROOT_HASH, U256, }; use std::{sync::Arc, time::SystemTime}; + /// Ethereum beacon consensus /// /// This consensus engine does basic checks as outlined in the execution specs. #[derive(Debug)] -pub struct BeaconConsensus { +pub struct EthBeaconConsensus { /// Configuration chain_spec: Arc, } -impl BeaconConsensus { - /// Create a new instance of [BeaconConsensus] +impl EthBeaconConsensus { + /// Create a new instance of [EthBeaconConsensus] pub fn new(chain_spec: Arc) -> Self { Self { chain_spec } } } -impl Consensus for BeaconConsensus { +impl Consensus for EthBeaconConsensus { fn validate_header(&self, header: &SealedHeader) -> Result<(), ConsensusError> { validation::validate_header_standalone(header, &self.chain_spec)?; Ok(()) @@ -87,7 +87,7 @@ impl Consensus for BeaconConsensus { // is greater than its parent timestamp. // validate header extradata for all networks post merge - validate_header_extradata(header)?; + validation::validate_header_extradata(header)?; // mixHash is used instead of difficulty inside EVM // https://eips.ethereum.org/EIPS/eip-4399#using-mixhash-field-instead-of-difficulty @@ -111,7 +111,7 @@ impl Consensus for BeaconConsensus { // * If the network is goerli pre-merge, ignore the extradata check, since we do not // support clique. Same goes for OP blocks below Bedrock. if self.chain_spec.chain != Chain::goerli() && !self.chain_spec.is_optimism() { - validate_header_extradata(header)?; + validation::validate_header_extradata(header)?; } } @@ -122,15 +122,3 @@ impl Consensus for BeaconConsensus { validation::validate_block_standalone(block, &self.chain_spec) } } - -/// Validates the header's extradata according to the beacon consensus rules. -/// -/// From yellow paper: extraData: An arbitrary byte array containing data relevant to this block. -/// This must be 32 bytes or fewer; formally Hx. -fn validate_header_extradata(header: &Header) -> Result<(), ConsensusError> { - if header.extra_data.len() > MAXIMUM_EXTRA_DATA_SIZE { - Err(ConsensusError::ExtraDataExceedsMax { len: header.extra_data.len() }) - } else { - Ok(()) - } -} diff --git a/crates/ethereum/engine-primitives/src/lib.rs b/crates/ethereum/engine-primitives/src/lib.rs index 6b030a9c5..cb6d0231e 100644 --- a/crates/ethereum/engine-primitives/src/lib.rs +++ b/crates/ethereum/engine-primitives/src/lib.rs @@ -1,4 +1,4 @@ -//! Ethereum specifc +//! Ethereum specific #![doc( html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", diff --git a/crates/ethereum/engine-primitives/src/payload.rs b/crates/ethereum/engine-primitives/src/payload.rs index a6c47ebde..55a97c96d 100644 --- a/crates/ethereum/engine-primitives/src/payload.rs +++ b/crates/ethereum/engine-primitives/src/payload.rs @@ -11,8 +11,7 @@ use reth_rpc_types::engine::{ PayloadId, }; use reth_rpc_types_compat::engine::payload::{ - block_to_payload_v3, convert_block_to_payload_field_v2, - convert_standalone_withdraw_to_withdrawal, try_block_to_payload_v1, + block_to_payload_v1, block_to_payload_v3, convert_block_to_payload_field_v2, }; use revm_primitives::{BlobExcessGasAndPrice, BlockEnv, CfgEnv, CfgEnvWithHandlerCfg, SpecId}; use std::convert::Infallible; @@ -58,6 +57,11 @@ impl EthBuiltPayload { self.fees } + /// Returns the blob sidecars. + pub fn sidecars(&self) -> &[BlobTransactionSidecar] { + &self.sidecars + } + /// Adds sidecars to the payload. pub fn extend_sidecars(&mut self, sidecars: Vec) { self.sidecars.extend(sidecars) @@ -87,7 +91,7 @@ impl<'a> BuiltPayload for &'a EthBuiltPayload { // V1 engine_getPayloadV1 response impl From for ExecutionPayloadV1 { fn from(value: EthBuiltPayload) -> Self { - try_block_to_payload_v1(value.block) + block_to_payload_v1(value.block) } } @@ -159,22 +163,13 @@ impl EthPayloadBuilderAttributes { pub fn new(parent: B256, attributes: PayloadAttributes) -> Self { let id = payload_id(&parent, &attributes); - let withdraw = attributes.withdrawals.map(|withdrawals| { - Withdrawals::new( - withdrawals - .into_iter() - .map(convert_standalone_withdraw_to_withdrawal) // Removed the parentheses here - .collect(), - ) - }); - Self { id, parent, timestamp: attributes.timestamp, suggested_fee_recipient: attributes.suggested_fee_recipient, prev_randao: attributes.prev_randao, - withdrawals: withdraw.unwrap_or_default(), + withdrawals: attributes.withdrawals.unwrap_or_default().into(), parent_beacon_block_root: attributes.parent_beacon_block_root, } } diff --git a/crates/ethereum/evm/Cargo.toml b/crates/ethereum/evm/Cargo.toml index ea7cfab8c..6fa61e34f 100644 --- a/crates/ethereum/evm/Cargo.toml +++ b/crates/ethereum/evm/Cargo.toml @@ -16,7 +16,6 @@ reth-evm.workspace = true reth-primitives.workspace = true reth-revm.workspace = true reth-interfaces.workspace = true -reth-provider.workspace = true # Ethereum revm-primitives.workspace = true diff --git a/crates/revm/src/eth_dao_fork.rs b/crates/ethereum/evm/src/dao_fork.rs similarity index 100% rename from crates/revm/src/eth_dao_fork.rs rename to crates/ethereum/evm/src/dao_fork.rs diff --git a/crates/ethereum/evm/src/execute.rs b/crates/ethereum/evm/src/execute.rs index b23c35cfd..c80e476bc 100644 --- a/crates/ethereum/evm/src/execute.rs +++ b/crates/ethereum/evm/src/execute.rs @@ -1,28 +1,28 @@ //! Ethereum block executor. -use crate::EthEvmConfig; +use crate::{ + dao_fork::{DAO_HARDFORK_BENEFICIARY, DAO_HARDKFORK_ACCOUNTS}, + verify::verify_receipts, + EthEvmConfig, +}; use reth_evm::{ execute::{ - BatchBlockOutput, BatchExecutor, EthBlockExecutionInput, EthBlockOutput, Executor, - ExecutorProvider, + BatchBlockExecutionOutput, BatchExecutor, BlockExecutionInput, BlockExecutionOutput, + BlockExecutorProvider, Executor, }, - ConfigureEvm, ConfigureEvmEnv, + ConfigureEvm, }; use reth_interfaces::{ executor::{BlockExecutionError, BlockValidationError}, provider::ProviderError, }; use reth_primitives::{ - BlockWithSenders, ChainSpec, GotExpected, Hardfork, Header, PruneModes, Receipt, Receipts, - Withdrawals, U256, + BlockNumber, BlockWithSenders, ChainSpec, GotExpected, Hardfork, Header, PruneModes, Receipt, + Receipts, Withdrawals, MAINNET, U256, }; -use reth_provider::BundleStateWithReceipts; use reth_revm::{ batch::{BlockBatchRecord, BlockExecutorStats}, db::states::bundle_state::BundleRetention, - eth_dao_fork::{DAO_HARDFORK_BENEFICIARY, DAO_HARDKFORK_ACCOUNTS}, - processor::verify_receipt, - stack::InspectorStack, state_change::{apply_beacon_root_contract_call, post_block_balance_increments}, Evm, State, }; @@ -35,43 +35,33 @@ use tracing::debug; /// Provides executors to execute regular ethereum blocks #[derive(Debug, Clone)] -pub struct EthExecutorProvider { +pub struct EthExecutorProvider { chain_spec: Arc, evm_config: EvmConfig, - inspector: Option, - prune_modes: PruneModes, } -impl EthExecutorProvider { +impl EthExecutorProvider { /// Creates a new default ethereum executor provider. pub fn ethereum(chain_spec: Arc) -> Self { Self::new(chain_spec, Default::default()) } + + /// Returns a new provider for the mainnet. + pub fn mainnet() -> Self { + Self::ethereum(MAINNET.clone()) + } } impl EthExecutorProvider { /// Creates a new executor provider. pub fn new(chain_spec: Arc, evm_config: EvmConfig) -> Self { - Self { chain_spec, evm_config, inspector: None, prune_modes: PruneModes::none() } - } - - /// Configures an optional inspector stack for debugging. - pub fn with_inspector(mut self, inspector: InspectorStack) -> Self { - self.inspector = Some(inspector); - self - } - - /// Configures the prune modes for the executor. - pub fn with_prune_modes(mut self, prune_modes: PruneModes) -> Self { - self.prune_modes = prune_modes; - self + Self { chain_spec, evm_config } } } impl EthExecutorProvider where EvmConfig: ConfigureEvm, - EvmConfig: ConfigureEvmEnv, { fn eth_executor(&self, db: DB) -> EthBlockExecutor where @@ -82,14 +72,12 @@ where self.evm_config.clone(), State::builder().with_database(db).with_bundle_update().without_state_clear().build(), ) - .with_inspector(self.inspector.clone()) } } -impl ExecutorProvider for EthExecutorProvider +impl BlockExecutorProvider for EthExecutorProvider where EvmConfig: ConfigureEvm, - EvmConfig: ConfigureEvmEnv, { type Executor> = EthBlockExecutor; @@ -102,14 +90,14 @@ where self.eth_executor(db) } - fn batch_executor(&self, db: DB) -> Self::BatchExecutor + fn batch_executor(&self, db: DB, prune_modes: PruneModes) -> Self::BatchExecutor where DB: Database, { let executor = self.eth_executor(db); EthBatchExecutor { executor, - batch_record: BlockBatchRecord::new(self.prune_modes.clone()), + batch_record: BlockBatchRecord::new(prune_modes), stats: BlockExecutorStats::default(), } } @@ -127,7 +115,6 @@ struct EthEvmExecutor { impl EthEvmExecutor where EvmConfig: ConfigureEvm, - EvmConfig: ConfigureEvmEnv, { /// Executes the transactions in the block and returns the receipts. /// @@ -137,7 +124,7 @@ where /// /// It does __not__ apply post-execution changes. fn execute_pre_and_transactions( - &mut self, + &self, block: &BlockWithSenders, mut evm: Evm<'_, Ext, &mut State>, ) -> Result<(Vec, u64), BlockExecutionError> @@ -168,7 +155,7 @@ where .into()) } - EvmConfig::fill_tx_env(evm.tx_mut(), transaction, *sender, ()); + EvmConfig::fill_tx_env(evm.tx_mut(), transaction, *sender); // Execute transaction. let ResultAndState { result, state } = evm.transact().map_err(move |err| { @@ -225,20 +212,12 @@ pub struct EthBlockExecutor { executor: EthEvmExecutor, /// The state to use for execution state: State, - /// Optional inspector stack for debugging - inspector: Option, } impl EthBlockExecutor { /// Creates a new Ethereum block executor. pub fn new(chain_spec: Arc, evm_config: EvmConfig, state: State) -> Self { - Self { executor: EthEvmExecutor { chain_spec, evm_config }, state, inspector: None } - } - - /// Sets the inspector stack for debugging. - pub fn with_inspector(mut self, inspector: Option) -> Self { - self.inspector = inspector; - self + Self { executor: EthEvmExecutor { chain_spec, evm_config }, state } } #[inline] @@ -256,8 +235,6 @@ impl EthBlockExecutor { impl EthBlockExecutor where EvmConfig: ConfigureEvm, - // TODO(mattsse): get rid of this - EvmConfig: ConfigureEvmEnv, DB: Database, { /// Configures a new evm configuration and block environment for the given block. @@ -296,19 +273,9 @@ where let env = self.evm_env_for_block(&block.header, total_difficulty); let (receipts, gas_used) = { - if let Some(inspector) = self.inspector.as_mut() { - let evm = self.executor.evm_config.evm_with_env_and_inspector( - &mut self.state, - env, - inspector, - ); - self.executor.execute_pre_and_transactions(block, evm)? - } else { - let evm = self.executor.evm_config.evm_with_env(&mut self.state, env); - - self.executor.execute_pre_and_transactions(block, evm)? - } - }; + let evm = self.executor.evm_config.evm_with_env(&mut self.state, env); + self.executor.execute_pre_and_transactions(block, evm) + }?; // 3. apply post execution changes self.post_execution(block, total_difficulty)?; @@ -318,9 +285,11 @@ where // transaction This was replaced with is_success flag. // See more about EIP here: https://eips.ethereum.org/EIPS/eip-658 if self.chain_spec().is_byzantium_active_at_block(block.header.number) { - if let Err(error) = - verify_receipt(block.header.receipts_root, block.header.logs_bloom, receipts.iter()) - { + if let Err(error) = verify_receipts( + block.header.receipts_root, + block.header.logs_bloom, + receipts.iter(), + ) { debug!(target: "evm", %error, ?receipts, "receipts verification failed"); return Err(error) }; @@ -379,11 +348,10 @@ where impl Executor for EthBlockExecutor where EvmConfig: ConfigureEvm, - EvmConfig: ConfigureEvmEnv, DB: Database, { - type Input<'a> = EthBlockExecutionInput<'a, BlockWithSenders>; - type Output = EthBlockOutput; + type Input<'a> = BlockExecutionInput<'a, BlockWithSenders>; + type Output = BlockExecutionOutput; type Error = BlockExecutionError; /// Executes the block and commits the state changes. @@ -394,13 +362,13 @@ where /// /// State changes are committed to the database. fn execute(mut self, input: Self::Input<'_>) -> Result { - let EthBlockExecutionInput { block, total_difficulty } = input; + let BlockExecutionInput { block, total_difficulty } = input; let (receipts, gas_used) = self.execute_and_verify(block, total_difficulty)?; - // prepare the state for extraction - self.state.merge_transitions(BundleRetention::PlainState); + // NOTE: we need to merge keep the reverts for the bundle retention + self.state.merge_transitions(BundleRetention::Reverts); - Ok(EthBlockOutput { state: self.state.take_bundle(), receipts, gas_used }) + Ok(BlockExecutionOutput { state: self.state.take_bundle(), receipts, gas_used }) } } @@ -429,16 +397,14 @@ impl EthBatchExecutor { impl BatchExecutor for EthBatchExecutor where EvmConfig: ConfigureEvm, - // TODO(mattsse): get rid of this - EvmConfig: ConfigureEvmEnv, DB: Database, { - type Input<'a> = EthBlockExecutionInput<'a, BlockWithSenders>; - type Output = BundleStateWithReceipts; + type Input<'a> = BlockExecutionInput<'a, BlockWithSenders>; + type Output = BatchBlockExecutionOutput; type Error = BlockExecutionError; - fn execute_one(&mut self, input: Self::Input<'_>) -> Result { - let EthBlockExecutionInput { block, total_difficulty } = input; + fn execute_one(&mut self, input: Self::Input<'_>) -> Result<(), Self::Error> { + let BlockExecutionInput { block, total_difficulty } = input; let (receipts, _gas_used) = self.executor.execute_and_verify(block, total_difficulty)?; // prepare the state according to the prune mode @@ -448,28 +414,39 @@ where // store receipts in the set self.batch_record.save_receipts(receipts)?; - Ok(BatchBlockOutput { size_hint: Some(self.executor.state.bundle_size_hint()) }) + if self.batch_record.first_block().is_none() { + self.batch_record.set_first_block(block.number); + } + + Ok(()) } fn finalize(mut self) -> Self::Output { self.stats.log_debug(); - BundleStateWithReceipts::new( + BatchBlockExecutionOutput::new( self.executor.state.take_bundle(), self.batch_record.take_receipts(), self.batch_record.first_block().unwrap_or_default(), ) } + + fn set_tip(&mut self, tip: BlockNumber) { + self.batch_record.set_tip(tip); + } + + fn size_hint(&self) -> Option { + Some(self.executor.state.bundle_state.size_hint()) + } } #[cfg(test)] mod tests { use super::*; - use crate::EthEvmConfig; use reth_primitives::{ bytes, constants::{BEACON_ROOTS_ADDRESS, SYSTEM_ADDRESS}, - keccak256, Account, Block, Bytes, ChainSpecBuilder, ForkCondition, B256, MAINNET, + keccak256, Account, Block, Bytes, ChainSpecBuilder, ForkCondition, B256, }; use reth_revm::{ database::StateProviderDatabase, test_utils::StateProviderTest, TransitionState, @@ -498,12 +475,7 @@ mod tests { } fn executor_provider(chain_spec: Arc) -> EthExecutorProvider { - EthExecutorProvider { - chain_spec, - evm_config: Default::default(), - inspector: None, - prune_modes: Default::default(), - } + EthExecutorProvider { chain_spec, evm_config: Default::default() } } #[test] @@ -543,9 +515,10 @@ mod tests { .expect_err( "Executing cancun block without parent beacon block root field should fail", ); + assert_eq!( - err, - BlockExecutionError::Validation(BlockValidationError::MissingParentBeaconBlockRoot) + err.as_validation().unwrap().clone(), + BlockValidationError::MissingParentBeaconBlockRoot ); // fix header, set a gas limit @@ -698,7 +671,8 @@ mod tests { let provider = executor_provider(chain_spec); - let mut executor = provider.batch_executor(StateProviderDatabase::new(&db)); + let mut executor = + provider.batch_executor(StateProviderDatabase::new(&db), PruneModes::none()); // attempt to execute the genesis block with non-zero parent beacon block root, expect err header.parent_beacon_block_root = Some(B256::with_last_byte(0x69)); @@ -778,7 +752,8 @@ mod tests { let provider = executor_provider(chain_spec); // execute header - let mut executor = provider.batch_executor(StateProviderDatabase::new(&db)); + let mut executor = + provider.batch_executor(StateProviderDatabase::new(&db), PruneModes::none()); // Now execute a block with the fixed header, ensure that it does not fail executor diff --git a/crates/ethereum/evm/src/lib.rs b/crates/ethereum/evm/src/lib.rs index a320a2b3c..7799cf410 100644 --- a/crates/ethereum/evm/src/lib.rs +++ b/crates/ethereum/evm/src/lib.rs @@ -12,9 +12,14 @@ use reth_evm::{ConfigureEvm, ConfigureEvmEnv}; use reth_primitives::{ revm::{config::revm_spec, env::fill_tx_env}, revm_primitives::{AnalysisKind, CfgEnvWithHandlerCfg, TxEnv}, - Address, ChainSpec, Head, Header, Transaction, U256, + Address, ChainSpec, Head, Header, TransactionSigned, U256, }; +use reth_revm::{Database, EvmBuilder}; pub mod execute; +pub mod verify; + +/// Ethereum DAO hardfork state change data. +pub mod dao_fork; /// Ethereum-related EVM configuration. #[derive(Debug, Clone, Copy, Default)] @@ -22,12 +27,7 @@ pub mod execute; pub struct EthEvmConfig; impl ConfigureEvmEnv for EthEvmConfig { - type TxMeta = (); - - fn fill_tx_env(tx_env: &mut TxEnv, transaction: T, sender: Address, _meta: ()) - where - T: AsRef, - { + fn fill_tx_env(tx_env: &mut TxEnv, transaction: &TransactionSigned, sender: Address) { fill_tx_env(tx_env, transaction, sender) } @@ -55,7 +55,16 @@ impl ConfigureEvmEnv for EthEvmConfig { } } -impl ConfigureEvm for EthEvmConfig {} +impl ConfigureEvm for EthEvmConfig { + type DefaultExternalContext<'a> = (); + + fn evm<'a, DB: Database + 'a>( + &self, + db: DB, + ) -> reth_revm::Evm<'a, Self::DefaultExternalContext<'a>, DB> { + EvmBuilder::default().with_db(db).build() + } +} #[cfg(test)] mod tests { diff --git a/crates/ethereum/evm/src/verify.rs b/crates/ethereum/evm/src/verify.rs new file mode 100644 index 000000000..6f552fe42 --- /dev/null +++ b/crates/ethereum/evm/src/verify.rs @@ -0,0 +1,53 @@ +//! Helpers for verifying the receipts. + +use reth_interfaces::executor::{BlockExecutionError, BlockValidationError}; +use reth_primitives::{Bloom, GotExpected, Receipt, ReceiptWithBloom, B256}; + +/// Calculate the receipts root, and compare it against against the expected receipts root and logs +/// bloom. +pub fn verify_receipts<'a>( + expected_receipts_root: B256, + expected_logs_bloom: Bloom, + receipts: impl Iterator + Clone, +) -> Result<(), BlockExecutionError> { + // Calculate receipts root. + let receipts_with_bloom = receipts.map(|r| r.clone().into()).collect::>(); + let receipts_root = reth_primitives::proofs::calculate_receipt_root(&receipts_with_bloom); + + // Create header log bloom. + let logs_bloom = receipts_with_bloom.iter().fold(Bloom::ZERO, |bloom, r| bloom | r.bloom); + + compare_receipts_root_and_logs_bloom( + receipts_root, + logs_bloom, + expected_receipts_root, + expected_logs_bloom, + )?; + + Ok(()) +} + +/// Compare the calculated receipts root with the expected receipts root, also compare +/// the calculated logs bloom with the expected logs bloom. +pub fn compare_receipts_root_and_logs_bloom( + calculated_receipts_root: B256, + calculated_logs_bloom: Bloom, + expected_receipts_root: B256, + expected_logs_bloom: Bloom, +) -> Result<(), BlockExecutionError> { + if calculated_receipts_root != expected_receipts_root { + return Err(BlockValidationError::ReceiptRootDiff( + GotExpected { got: calculated_receipts_root, expected: expected_receipts_root }.into(), + ) + .into()) + } + + if calculated_logs_bloom != expected_logs_bloom { + return Err(BlockValidationError::BloomLogDiff( + GotExpected { got: calculated_logs_bloom, expected: expected_logs_bloom }.into(), + ) + .into()) + } + + Ok(()) +} diff --git a/crates/node-ethereum/Cargo.toml b/crates/ethereum/node/Cargo.toml similarity index 100% rename from crates/node-ethereum/Cargo.toml rename to crates/ethereum/node/Cargo.toml diff --git a/crates/node-ethereum/src/evm.rs b/crates/ethereum/node/src/evm.rs similarity index 53% rename from crates/node-ethereum/src/evm.rs rename to crates/ethereum/node/src/evm.rs index a5528d74a..d710d8d8d 100644 --- a/crates/node-ethereum/src/evm.rs +++ b/crates/ethereum/node/src/evm.rs @@ -1,4 +1,6 @@ //! Ethereum EVM support +#[doc(inline)] +pub use reth_evm_ethereum::execute::EthExecutorProvider; #[doc(inline)] pub use reth_evm_ethereum::EthEvmConfig; diff --git a/crates/node-ethereum/src/lib.rs b/crates/ethereum/node/src/lib.rs similarity index 92% rename from crates/node-ethereum/src/lib.rs rename to crates/ethereum/node/src/lib.rs index cea2e7be0..44ec6836c 100644 --- a/crates/node-ethereum/src/lib.rs +++ b/crates/ethereum/node/src/lib.rs @@ -11,7 +11,7 @@ pub use reth_ethereum_engine_primitives::EthEngineTypes; pub mod evm; -pub use evm::EthEvmConfig; +pub use evm::{EthEvmConfig, EthExecutorProvider}; pub mod node; pub use node::EthereumNode; diff --git a/crates/node-ethereum/src/node.rs b/crates/ethereum/node/src/node.rs similarity index 78% rename from crates/node-ethereum/src/node.rs rename to crates/ethereum/node/src/node.rs index e7caa927a..87bc54d15 100644 --- a/crates/node-ethereum/src/node.rs +++ b/crates/ethereum/node/src/node.rs @@ -2,11 +2,14 @@ use crate::{EthEngineTypes, EthEvmConfig}; use reth_basic_payload_builder::{BasicPayloadJobGenerator, BasicPayloadJobGeneratorConfig}; +use reth_evm_ethereum::execute::EthExecutorProvider; use reth_network::NetworkHandle; use reth_node_builder::{ - components::{ComponentsBuilder, NetworkBuilder, PayloadServiceBuilder, PoolBuilder}, - node::{FullNodeTypes, Node, NodeTypes}, - BuilderContext, PayloadBuilderConfig, + components::{ + ComponentsBuilder, ExecutorBuilder, NetworkBuilder, PayloadServiceBuilder, PoolBuilder, + }, + node::{FullNodeTypes, NodeTypes}, + BuilderContext, Node, PayloadBuilderConfig, }; use reth_payload_builder::{PayloadBuilderHandle, PayloadBuilderService}; use reth_provider::CanonStateSubscriptions; @@ -23,8 +26,13 @@ pub struct EthereumNode; impl EthereumNode { /// Returns a [ComponentsBuilder] configured for a regular Ethereum node. - pub fn components( - ) -> ComponentsBuilder + pub fn components() -> ComponentsBuilder< + Node, + EthereumPoolBuilder, + EthereumPayloadBuilder, + EthereumNetworkBuilder, + EthereumExecutorBuilder, + > where Node: FullNodeTypes, { @@ -33,35 +41,53 @@ impl EthereumNode { .pool(EthereumPoolBuilder::default()) .payload(EthereumPayloadBuilder::default()) .network(EthereumNetworkBuilder::default()) + .executor(EthereumExecutorBuilder::default()) } } impl NodeTypes for EthereumNode { type Primitives = (); type Engine = EthEngineTypes; - type Evm = EthEvmConfig; - - fn evm_config(&self) -> Self::Evm { - EthEvmConfig::default() - } } impl Node for EthereumNode where N: FullNodeTypes, { - type PoolBuilder = EthereumPoolBuilder; - type NetworkBuilder = EthereumNetworkBuilder; - type PayloadBuilder = EthereumPayloadBuilder; + type ComponentsBuilder = ComponentsBuilder< + N, + EthereumPoolBuilder, + EthereumPayloadBuilder, + EthereumNetworkBuilder, + EthereumExecutorBuilder, + >; + + fn components_builder(self) -> Self::ComponentsBuilder { + Self::components() + } +} - fn components( +/// A regular ethereum evm and executor builder. +#[derive(Debug, Default, Clone, Copy)] +#[non_exhaustive] +pub struct EthereumExecutorBuilder; + +impl ExecutorBuilder for EthereumExecutorBuilder +where + Node: FullNodeTypes, +{ + type EVM = EthEvmConfig; + type Executor = EthExecutorProvider; + + async fn build_evm( self, - ) -> ComponentsBuilder { - ComponentsBuilder::default() - .node_types::() - .pool(EthereumPoolBuilder::default()) - .payload(EthereumPayloadBuilder::default()) - .network(EthereumNetworkBuilder::default()) + ctx: &BuilderContext, + ) -> eyre::Result<(Self::EVM, Self::Executor)> { + let chain_spec = ctx.chain_spec(); + let evm_config = EthEvmConfig::default(); + let executor = EthExecutorProvider::new(chain_spec, evm_config); + + Ok((evm_config, executor)) } } @@ -83,7 +109,7 @@ where async fn build_pool(self, ctx: &BuilderContext) -> eyre::Result { let data_dir = ctx.data_dir(); - let blob_store = DiskFileBlobStore::open(data_dir.blobstore_path(), Default::default())?; + let blob_store = DiskFileBlobStore::open(data_dir.blobstore(), Default::default())?; let validator = TransactionValidationTaskExecutor::eth_builder(ctx.chain_spec()) .with_head_timestamp(ctx.head().timestamp) .kzg_settings(ctx.kzg_settings()?) @@ -97,7 +123,7 @@ where let transaction_pool = reth_transaction_pool::Pool::eth_pool(validator, blob_store, ctx.pool_config()); info!(target: "reth::cli", "Transaction pool initialized"); - let transactions_path = data_dir.txpool_transactions_path(); + let transactions_path = data_dir.txpool_transactions(); // spawn txpool maintenance task { @@ -158,8 +184,7 @@ where .interval(conf.interval()) .deadline(conf.deadline()) .max_payload_tasks(conf.max_payload_tasks()) - .extradata(conf.extradata_bytes()) - .max_gas_limit(conf.max_gas_limit()); + .extradata(conf.extradata_bytes()); let payload_generator = BasicPayloadJobGenerator::with_builder( ctx.provider().clone(), diff --git a/crates/node-ethereum/tests/assets/genesis.json b/crates/ethereum/node/tests/assets/genesis.json similarity index 100% rename from crates/node-ethereum/tests/assets/genesis.json rename to crates/ethereum/node/tests/assets/genesis.json diff --git a/crates/ethereum/node/tests/e2e/blobs.rs b/crates/ethereum/node/tests/e2e/blobs.rs new file mode 100644 index 000000000..d8fca42d6 --- /dev/null +++ b/crates/ethereum/node/tests/e2e/blobs.rs @@ -0,0 +1,96 @@ +use std::sync::Arc; + +use reth::{ + args::RpcServerArgs, + builder::{NodeBuilder, NodeConfig, NodeHandle}, + rpc::types::engine::PayloadStatusEnum, + tasks::TaskManager, +}; +use reth_e2e_test_utils::{ + node::NodeTestContext, transaction::TransactionTestContext, wallet::Wallet, +}; +use reth_node_ethereum::EthereumNode; +use reth_primitives::{b256, ChainSpecBuilder, Genesis, MAINNET}; +use reth_transaction_pool::TransactionPool; + +use crate::utils::eth_payload_attributes; + +#[tokio::test] +async fn can_handle_blobs() -> eyre::Result<()> { + reth_tracing::init_test_tracing(); + let tasks = TaskManager::current(); + let exec = tasks.executor(); + + let genesis: Genesis = serde_json::from_str(include_str!("../assets/genesis.json")).unwrap(); + let chain_spec = Arc::new( + ChainSpecBuilder::default() + .chain(MAINNET.chain) + .genesis(genesis) + .cancun_activated() + .build(), + ); + let node_config = NodeConfig::test() + .with_chain(chain_spec) + .with_unused_ports() + .with_rpc(RpcServerArgs::default().with_unused_ports().with_http()); + let NodeHandle { node, node_exit_future: _ } = NodeBuilder::new(node_config.clone()) + .testing_node(exec.clone()) + .node(EthereumNode::default()) + .launch() + .await?; + + let mut node = NodeTestContext::new(node).await?; + + let wallets = Wallet::new(2).gen(); + let blob_wallet = wallets.first().unwrap(); + let second_wallet = wallets.last().unwrap(); + + // inject normal tx + let raw_tx = TransactionTestContext::transfer_tx(1, second_wallet.clone()).await; + let tx_hash = node.rpc.inject_tx(raw_tx).await?; + // build payload with normal tx + let (payload, attributes) = node.new_payload(eth_payload_attributes).await?; + + // clean the pool + node.inner.pool.remove_transactions(vec![tx_hash]); + + // build blob tx + let blob_tx = TransactionTestContext::tx_with_blobs(1, blob_wallet.clone()).await?; + + // inject blob tx to the pool + let blob_tx_hash = node.rpc.inject_tx(blob_tx).await?; + // fetch it from rpc + let envelope = node.rpc.envelope_by_hash(blob_tx_hash).await?; + // validate sidecar + let versioned_hashes = TransactionTestContext::validate_sidecar(envelope); + + // build a payload + let (blob_payload, blob_attr) = node.new_payload(eth_payload_attributes).await?; + + // submit the blob payload + let blob_block_hash = node + .engine_api + .submit_payload(blob_payload, blob_attr, PayloadStatusEnum::Valid, versioned_hashes.clone()) + .await?; + + let genesis_hash = b256!("d4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3"); + + let (_, _) = tokio::join!( + // send fcu with blob hash + node.engine_api.update_forkchoice(genesis_hash, blob_block_hash), + // send fcu with normal hash + node.engine_api.update_forkchoice(genesis_hash, payload.block().hash()) + ); + + // submit normal payload + node.engine_api.submit_payload(payload, attributes, PayloadStatusEnum::Valid, vec![]).await?; + + tokio::time::sleep(std::time::Duration::from_secs(3)).await; + + // expects the blob tx to be back in the pool + let envelope = node.rpc.envelope_by_hash(blob_tx_hash).await?; + // make sure the sidecar is present + TransactionTestContext::validate_sidecar(envelope); + + Ok(()) +} diff --git a/crates/node-ethereum/tests/e2e/dev.rs b/crates/ethereum/node/tests/e2e/dev.rs similarity index 67% rename from crates/node-ethereum/tests/e2e/dev.rs rename to crates/ethereum/node/tests/e2e/dev.rs index b096bda5a..4570a8c0e 100644 --- a/crates/node-ethereum/tests/e2e/dev.rs +++ b/crates/ethereum/node/tests/e2e/dev.rs @@ -1,43 +1,27 @@ -use futures_util::StreamExt; -use reth::{ - api::FullNodeComponents, - builder::{FullNode, NodeBuilder, NodeHandle}, - providers::CanonStateSubscriptions, - rpc::eth::EthTransactions, - tasks::TaskManager, -}; -use reth_node_core::{args::RpcServerArgs, node_config::NodeConfig}; -use reth_node_ethereum::EthereumNode; +use crate::utils::EthNode; +use futures::StreamExt; +use reth::rpc::eth::EthTransactions; +use reth_e2e_test_utils::setup; use reth_primitives::{b256, hex, ChainSpec, Genesis}; +use reth_provider::CanonStateSubscriptions; use std::sync::Arc; #[tokio::test] async fn can_run_dev_node() -> eyre::Result<()> { - let tasks = TaskManager::current(); + reth_tracing::init_test_tracing(); + let (mut nodes, _tasks, _) = setup(1, custom_chain(), true).await?; - // create node config - let node_config = NodeConfig::test() - .dev() - .with_rpc(RpcServerArgs::default().with_http().with_unused_ports()) - .with_chain(custom_chain()); - - let NodeHandle { node, node_exit_future: _ } = NodeBuilder::new(node_config) - .testing_node(tasks.executor()) - .node(EthereumNode::default()) - .launch() - .await?; - - assert_chain_advances(node).await; + assert_chain_advances(nodes.pop().unwrap()).await; Ok(()) } -async fn assert_chain_advances(mut node: FullNode) { - let mut notifications = node.provider.canonical_state_stream(); +async fn assert_chain_advances(mut node: EthNode) { + let mut notifications = node.inner.provider.canonical_state_stream(); // submit tx through rpc let raw_tx = hex!("02f876820a28808477359400847735940082520894ab0840c0e43688012c1adb0f5e3fc665188f83d28a029d394a5d630544000080c080a0a044076b7e67b5deecc63f61a8d7913fab86ca365b344b5759d1fe3563b4c39ea019eab979dd000da04dfc72bb0377c092d30fd9e1cab5ae487de49586cc8b0090"); - let eth_api = node.rpc_registry.eth_api(); + let eth_api = node.inner.rpc_registry.eth_api(); let hash = eth_api.send_raw_transaction(raw_tx.into()).await.unwrap(); diff --git a/crates/node-ethereum/tests/e2e/eth.rs b/crates/ethereum/node/tests/e2e/eth.rs similarity index 60% rename from crates/node-ethereum/tests/e2e/eth.rs rename to crates/ethereum/node/tests/e2e/eth.rs index 6f9eeb999..4f566e7c8 100644 --- a/crates/node-ethereum/tests/e2e/eth.rs +++ b/crates/ethereum/node/tests/e2e/eth.rs @@ -4,7 +4,9 @@ use reth::{ builder::{NodeBuilder, NodeConfig, NodeHandle}, tasks::TaskManager, }; -use reth_e2e_test_utils::{node::NodeHelper, wallet::Wallet}; +use reth_e2e_test_utils::{ + node::NodeTestContext, setup, transaction::TransactionTestContext, wallet::Wallet, +}; use reth_node_ethereum::EthereumNode; use reth_primitives::{ChainSpecBuilder, Genesis, MAINNET}; use std::sync::Arc; @@ -13,38 +15,34 @@ use std::sync::Arc; async fn can_run_eth_node() -> eyre::Result<()> { reth_tracing::init_test_tracing(); - let exec = TaskManager::current(); - let exec = exec.executor(); + let (mut nodes, _tasks, _wallet) = setup::( + 1, + Arc::new( + ChainSpecBuilder::default() + .chain(MAINNET.chain) + .genesis(serde_json::from_str(include_str!("../assets/genesis.json")).unwrap()) + .cancun_activated() + .build(), + ), + false, + ) + .await?; + + let mut node = nodes.pop().unwrap(); + let wallet = Wallet::default(); + let raw_tx = TransactionTestContext::transfer_tx(1, wallet.inner).await; - // Chain spec with test allocs - let genesis: Genesis = serde_json::from_str(include_str!("../assets/genesis.json")).unwrap(); - let chain_spec = Arc::new( - ChainSpecBuilder::default() - .chain(MAINNET.chain) - .genesis(genesis) - .cancun_activated() - .build(), - ); - - // Node setup - let node_config = NodeConfig::test() - .with_chain(chain_spec) - .with_unused_ports() - .with_rpc(RpcServerArgs::default().with_unused_ports().with_http()); + // make the node advance + let tx_hash = node.rpc.inject_tx(raw_tx).await?; - let NodeHandle { node, node_exit_future: _ } = NodeBuilder::new(node_config) - .testing_node(exec) - .node(EthereumNode::default()) - .launch() - .await?; - let mut node = NodeHelper::new(node).await?; + // make the node advance + let (payload, _) = node.advance_block(vec![], eth_payload_attributes).await?; - // Configure wallet from test mnemonic and create dummy transfer tx - let mut wallet = Wallet::default(); - let raw_tx = wallet.transfer_tx().await; + let block_hash = payload.block().hash(); + let block_number = payload.block().number; - // make the node advance - node.advance(raw_tx, eth_payload_attributes).await?; + // assert the block has been committed to the blockchain + node.assert_new_block(tx_hash, block_hash, block_number).await?; Ok(()) } @@ -76,14 +74,23 @@ async fn can_run_eth_node_with_auth_engine_api_over_ipc() -> eyre::Result<()> { .node(EthereumNode::default()) .launch() .await?; - let mut node = NodeHelper::new(node).await?; + let mut node = NodeTestContext::new(node).await?; // Configure wallet from test mnemonic and create dummy transfer tx - let mut wallet = Wallet::default(); - let raw_tx = wallet.transfer_tx().await; + let wallet = Wallet::default(); + let raw_tx = TransactionTestContext::transfer_tx(1, wallet.inner).await; // make the node advance - node.advance(raw_tx, crate::utils::eth_payload_attributes).await?; + let tx_hash = node.rpc.inject_tx(raw_tx).await?; + + // make the node advance + let (payload, _) = node.advance_block(vec![], eth_payload_attributes).await?; + + let block_hash = payload.block().hash(); + let block_number = payload.block().number; + + // assert the block has been committed to the blockchain + node.assert_new_block(tx_hash, block_hash, block_number).await?; Ok(()) } @@ -113,7 +120,7 @@ async fn test_failed_run_eth_node_with_no_auth_engine_api_over_ipc_opts() -> eyr .launch() .await?; - let node = NodeHelper::new(node).await?; + let node = NodeTestContext::new(node).await?; // Ensure that the engine api client is not available let client = node.inner.engine_ipc_client().await; diff --git a/crates/node-ethereum/tests/e2e/main.rs b/crates/ethereum/node/tests/e2e/main.rs similarity index 82% rename from crates/node-ethereum/tests/e2e/main.rs rename to crates/ethereum/node/tests/e2e/main.rs index 6a8a01064..1d0d6db8c 100644 --- a/crates/node-ethereum/tests/e2e/main.rs +++ b/crates/ethereum/node/tests/e2e/main.rs @@ -1,3 +1,4 @@ +mod blobs; mod dev; mod eth; mod p2p; diff --git a/crates/ethereum/node/tests/e2e/p2p.rs b/crates/ethereum/node/tests/e2e/p2p.rs new file mode 100644 index 000000000..768d1ac5a --- /dev/null +++ b/crates/ethereum/node/tests/e2e/p2p.rs @@ -0,0 +1,47 @@ +use crate::utils::eth_payload_attributes; +use reth_e2e_test_utils::{setup, transaction::TransactionTestContext}; +use reth_node_ethereum::EthereumNode; +use reth_primitives::{ChainSpecBuilder, MAINNET}; +use std::sync::Arc; + +#[tokio::test] +async fn can_sync() -> eyre::Result<()> { + reth_tracing::init_test_tracing(); + + let (mut nodes, _tasks, wallet) = setup::( + 2, + Arc::new( + ChainSpecBuilder::default() + .chain(MAINNET.chain) + .genesis(serde_json::from_str(include_str!("../assets/genesis.json")).unwrap()) + .cancun_activated() + .build(), + ), + false, + ) + .await?; + + let raw_tx = TransactionTestContext::transfer_tx(1, wallet.inner).await; + let mut second_node = nodes.pop().unwrap(); + let mut first_node = nodes.pop().unwrap(); + + // Make the first node advance + let tx_hash = first_node.rpc.inject_tx(raw_tx).await?; + + // make the node advance + let (payload, _) = first_node.advance_block(vec![], eth_payload_attributes).await?; + + let block_hash = payload.block().hash(); + let block_number = payload.block().number; + + // assert the block has been committed to the blockchain + first_node.assert_new_block(tx_hash, block_hash, block_number).await?; + + // only send forkchoice update to second node + second_node.engine_api.update_forkchoice(block_hash, block_hash).await?; + + // expect second node advanced via p2p gossip + second_node.assert_new_block(tx_hash, block_hash, 1).await?; + + Ok(()) +} diff --git a/crates/node-ethereum/tests/e2e/utils.rs b/crates/ethereum/node/tests/e2e/utils.rs similarity index 77% rename from crates/node-ethereum/tests/e2e/utils.rs rename to crates/ethereum/node/tests/e2e/utils.rs index 52526c45f..2c1dc373b 100644 --- a/crates/node-ethereum/tests/e2e/utils.rs +++ b/crates/ethereum/node/tests/e2e/utils.rs @@ -1,7 +1,12 @@ use reth::rpc::types::engine::PayloadAttributes; +use reth_e2e_test_utils::NodeHelperType; +use reth_node_ethereum::EthereumNode; use reth_payload_builder::EthPayloadBuilderAttributes; use reth_primitives::{Address, B256}; +/// Ethereum Node Helper type +pub(crate) type EthNode = NodeHelperType; + /// Helper function to create a new eth payload attributes pub(crate) fn eth_payload_attributes(timestamp: u64) -> EthPayloadBuilderAttributes { let attributes = PayloadAttributes { diff --git a/crates/node-ethereum/tests/it/builder.rs b/crates/ethereum/node/tests/it/builder.rs similarity index 79% rename from crates/node-ethereum/tests/it/builder.rs rename to crates/ethereum/node/tests/it/builder.rs index 7cfc0d705..b48e58679 100644 --- a/crates/node-ethereum/tests/it/builder.rs +++ b/crates/ethereum/node/tests/it/builder.rs @@ -13,7 +13,7 @@ fn test_basic_setup() { let msg = "On components".to_string(); let _builder = NodeBuilder::new(config) .with_database(db) - .with_types(EthereumNode::default()) + .with_types::() .with_components(EthereumNode::components()) .on_component_initialized(move |ctx| { let _provider = ctx.provider(); @@ -33,3 +33,11 @@ fn test_basic_setup() { }) .check_launch(); } + +#[test] +fn test_node_setup() { + let config = NodeConfig::test(); + let db = create_test_rw_db(); + let _builder = + NodeBuilder::new(config).with_database(db).node(EthereumNode::default()).check_launch(); +} diff --git a/crates/node-ethereum/tests/it/exex.rs b/crates/ethereum/node/tests/it/exex.rs similarity index 95% rename from crates/node-ethereum/tests/it/exex.rs rename to crates/ethereum/node/tests/it/exex.rs index bbab6d9dc..80366ba23 100644 --- a/crates/node-ethereum/tests/it/exex.rs +++ b/crates/ethereum/node/tests/it/exex.rs @@ -31,7 +31,7 @@ fn basic_exex() { let db = create_test_rw_db(); let _builder = NodeBuilder::new(config) .with_database(db) - .with_types(EthereumNode::default()) + .with_types::() .with_components(EthereumNode::components()) .install_exex("dummy", move |ctx| future::ok(DummyExEx { _ctx: ctx })) .check_launch(); diff --git a/crates/node-ethereum/tests/it/main.rs b/crates/ethereum/node/tests/it/main.rs similarity index 100% rename from crates/node-ethereum/tests/it/main.rs rename to crates/ethereum/node/tests/it/main.rs diff --git a/crates/evm/Cargo.toml b/crates/evm/Cargo.toml index f13c471a7..854dcd95a 100644 --- a/crates/evm/Cargo.toml +++ b/crates/evm/Cargo.toml @@ -17,3 +17,11 @@ revm-primitives.workspace = true revm.workspace = true reth-interfaces.workspace = true +futures-util.workspace = true +parking_lot = { workspace = true, optional = true } + +[dev-dependencies] +parking_lot.workspace = true + +[features] +test-utils = ["dep:parking_lot"] \ No newline at end of file diff --git a/crates/evm/src/either.rs b/crates/evm/src/either.rs new file mode 100644 index 000000000..d1ae4ed78 --- /dev/null +++ b/crates/evm/src/either.rs @@ -0,0 +1,119 @@ +//! Helper type that represents one of two possible executor types + +use crate::execute::{ + BatchBlockExecutionOutput, BatchExecutor, BlockExecutionInput, BlockExecutionOutput, + BlockExecutorProvider, Executor, +}; +use reth_interfaces::{executor::BlockExecutionError, provider::ProviderError}; +use reth_primitives::{BlockNumber, BlockWithSenders, PruneModes, Receipt}; +use revm_primitives::db::Database; + +// re-export Either +pub use futures_util::future::Either; + +impl BlockExecutorProvider for Either +where + A: BlockExecutorProvider, + B: BlockExecutorProvider, +{ + type Executor> = Either, B::Executor>; + type BatchExecutor> = + Either, B::BatchExecutor>; + + fn executor(&self, db: DB) -> Self::Executor + where + DB: Database, + { + match self { + Either::Left(a) => Either::Left(a.executor(db)), + Either::Right(b) => Either::Right(b.executor(db)), + } + } + + fn batch_executor(&self, db: DB, prune_modes: PruneModes) -> Self::BatchExecutor + where + DB: Database, + { + match self { + Either::Left(a) => Either::Left(a.batch_executor(db, prune_modes)), + Either::Right(b) => Either::Right(b.batch_executor(db, prune_modes)), + } + } +} + +impl Executor for Either +where + A: for<'a> Executor< + DB, + Input<'a> = BlockExecutionInput<'a, BlockWithSenders>, + Output = BlockExecutionOutput, + Error = BlockExecutionError, + >, + B: for<'a> Executor< + DB, + Input<'a> = BlockExecutionInput<'a, BlockWithSenders>, + Output = BlockExecutionOutput, + Error = BlockExecutionError, + >, + DB: Database, +{ + type Input<'a> = BlockExecutionInput<'a, BlockWithSenders>; + type Output = BlockExecutionOutput; + type Error = BlockExecutionError; + + fn execute(self, input: Self::Input<'_>) -> Result { + match self { + Either::Left(a) => a.execute(input), + Either::Right(b) => b.execute(input), + } + } +} + +impl BatchExecutor for Either +where + A: for<'a> BatchExecutor< + DB, + Input<'a> = BlockExecutionInput<'a, BlockWithSenders>, + Output = BatchBlockExecutionOutput, + Error = BlockExecutionError, + >, + B: for<'a> BatchExecutor< + DB, + Input<'a> = BlockExecutionInput<'a, BlockWithSenders>, + Output = BatchBlockExecutionOutput, + Error = BlockExecutionError, + >, + DB: Database, +{ + type Input<'a> = BlockExecutionInput<'a, BlockWithSenders>; + type Output = BatchBlockExecutionOutput; + type Error = BlockExecutionError; + + fn execute_one(&mut self, input: Self::Input<'_>) -> Result<(), Self::Error> { + match self { + Either::Left(a) => a.execute_one(input), + Either::Right(b) => b.execute_one(input), + } + } + + fn finalize(self) -> Self::Output { + match self { + Either::Left(a) => a.finalize(), + Either::Right(b) => b.finalize(), + } + } + + fn set_tip(&mut self, tip: BlockNumber) { + match self { + Either::Left(a) => a.set_tip(tip), + Either::Right(b) => b.set_tip(tip), + } + } + + fn size_hint(&self) -> Option { + match self { + Either::Left(a) => a.size_hint(), + Either::Right(b) => b.size_hint(), + } + } +} diff --git a/crates/evm/src/execute.rs b/crates/evm/src/execute.rs index b8c153602..e7ce09e79 100644 --- a/crates/evm/src/execute.rs +++ b/crates/evm/src/execute.rs @@ -1,7 +1,7 @@ //! Traits for execution. -use reth_interfaces::provider::ProviderError; -use reth_primitives::U256; +use reth_interfaces::{executor::BlockExecutionError, provider::ProviderError}; +use reth_primitives::{BlockNumber, BlockWithSenders, PruneModes, Receipt, Receipts, U256}; use revm::db::BundleState; use revm_primitives::db::Database; @@ -21,8 +21,8 @@ pub trait Executor { fn execute(self, input: Self::Input<'_>) -> Result; } -/// An executor that can execute multiple blocks in a row and keep track of the state over the -/// entire batch. +/// A general purpose executor that can execute multiple inputs in sequence and keep track of the +/// state over the entire batch. pub trait BatchExecutor { /// The input type for the executor. type Input<'a>; @@ -32,17 +32,41 @@ pub trait BatchExecutor { type Error; /// Executes the next block in the batch and update the state internally. - fn execute_one(&mut self, input: Self::Input<'_>) -> Result; + fn execute_one(&mut self, input: Self::Input<'_>) -> Result<(), Self::Error>; + + /// Executes multiple inputs in the batch and update the state internally. + fn execute_many<'a, I>(&mut self, inputs: I) -> Result<(), Self::Error> + where + I: IntoIterator>, + { + for input in inputs { + self.execute_one(input)?; + } + Ok(()) + } + + /// Executes the entire batch and return the final state. + fn execute_batch<'a, I>(mut self, batch: I) -> Result + where + I: IntoIterator>, + Self: Sized, + { + self.execute_many(batch)?; + Ok(self.finalize()) + } /// Finishes the batch and return the final state. fn finalize(self) -> Self::Output; -} -/// The output of an executed block in a batch. -#[derive(Debug, Clone, Copy)] -pub struct BatchBlockOutput { - /// The size hint of the batch's tracked state. - pub size_hint: Option, + /// Set the expected tip of the batch. + /// + /// This can be used to optimize state pruning during execution. + fn set_tip(&mut self, tip: BlockNumber); + + /// The size hint of the batch's tracked state size. + /// + /// This is used to optimize DB commits depending on the size of the state. + fn size_hint(&self) -> Option; } /// The output of an ethereum block. @@ -51,7 +75,7 @@ pub struct BatchBlockOutput { /// /// TODO(mattsse): combine with BundleStateWithReceipts #[derive(Debug)] -pub struct EthBlockOutput { +pub struct BlockExecutionOutput { /// The changed state of the block after execution. pub state: BundleState, /// All the receipts of the transactions in the block. @@ -60,42 +84,94 @@ pub struct EthBlockOutput { pub gas_used: u64, } +/// The output of a batch of ethereum blocks. +#[derive(Debug)] +pub struct BatchBlockExecutionOutput { + /// Bundle state with reverts. + pub bundle: BundleState, + /// The collection of receipts. + /// Outer vector stores receipts for each block sequentially. + /// The inner vector stores receipts ordered by transaction number. + /// + /// If receipt is None it means it is pruned. + pub receipts: Receipts, + /// First block of bundle state. + pub first_block: BlockNumber, +} + +impl BatchBlockExecutionOutput { + /// Create Bundle State. + pub fn new(bundle: BundleState, receipts: Receipts, first_block: BlockNumber) -> Self { + Self { bundle, receipts, first_block } + } +} + /// A helper type for ethereum block inputs that consists of a block and the total difficulty. #[derive(Debug)] -pub struct EthBlockExecutionInput<'a, Block> { +pub struct BlockExecutionInput<'a, Block> { /// The block to execute. pub block: &'a Block, /// The total difficulty of the block. pub total_difficulty: U256, } -impl<'a, Block> EthBlockExecutionInput<'a, Block> { +impl<'a, Block> BlockExecutionInput<'a, Block> { /// Creates a new input. pub fn new(block: &'a Block, total_difficulty: U256) -> Self { Self { block, total_difficulty } } } -impl<'a, Block> From<(&'a Block, U256)> for EthBlockExecutionInput<'a, Block> { +impl<'a, Block> From<(&'a Block, U256)> for BlockExecutionInput<'a, Block> { fn from((block, total_difficulty): (&'a Block, U256)) -> Self { Self::new(block, total_difficulty) } } -/// A type that can create a new executor. -pub trait ExecutorProvider: Send + Sync + Clone { +/// A type that can create a new executor for block execution. +pub trait BlockExecutorProvider: Send + Sync + Clone + Unpin + 'static { /// An executor that can execute a single block given a database. - type Executor>: Executor; + /// + /// # Verification + /// + /// The on [Executor::execute] the executor is expected to validate the execution output of the + /// input, this includes: + /// - Cumulative gas used must match the input's gas used. + /// - Receipts must match the input's receipts root. + /// + /// It is not expected to validate the state trie root, this must be done by the caller using + /// the returned state. + type Executor>: for<'a> Executor< + DB, + Input<'a> = BlockExecutionInput<'a, BlockWithSenders>, + Output = BlockExecutionOutput, + Error = BlockExecutionError, + >; + /// An executor that can execute a batch of blocks given a database. + type BatchExecutor>: for<'a> BatchExecutor< + DB, + Input<'a> = BlockExecutionInput<'a, BlockWithSenders>, + // TODO: change to bundle state with receipts + Output = BatchBlockExecutionOutput, + Error = BlockExecutionError, + >; - type BatchExecutor>: BatchExecutor; /// Creates a new executor for single block execution. + /// + /// This is used to execute a single block and get the changed state. fn executor(&self, db: DB) -> Self::Executor where DB: Database; - /// Creates a new batch executor - fn batch_executor(&self, db: DB) -> Self::BatchExecutor + /// Creates a new batch executor with the given database and pruning modes. + /// + /// Batch executor is used to execute multiple blocks in sequence and keep track of the state + /// during historical sync which involves executing multiple blocks in sequence. + /// + /// The pruning modes are used to determine which parts of the state should be kept during + /// execution. + fn batch_executor(&self, db: DB, prune_modes: PruneModes) -> Self::BatchExecutor where DB: Database; } @@ -103,13 +179,14 @@ pub trait ExecutorProvider: Send + Sync + Clone { #[cfg(test)] mod tests { use super::*; + use reth_primitives::Block; use revm::db::{CacheDB, EmptyDBTyped}; use std::marker::PhantomData; #[derive(Clone, Default)] struct TestExecutorProvider; - impl ExecutorProvider for TestExecutorProvider { + impl BlockExecutorProvider for TestExecutorProvider { type Executor> = TestExecutor; type BatchExecutor> = TestExecutor; @@ -120,7 +197,7 @@ mod tests { TestExecutor(PhantomData) } - fn batch_executor(&self, _db: DB) -> Self::BatchExecutor + fn batch_executor(&self, _db: DB, _prune_modes: PruneModes) -> Self::BatchExecutor where DB: Database, { @@ -131,28 +208,35 @@ mod tests { struct TestExecutor(PhantomData); impl Executor for TestExecutor { - type Input<'a> = &'static str; - type Output = (); - type Error = String; + type Input<'a> = BlockExecutionInput<'a, BlockWithSenders>; + type Output = BlockExecutionOutput; + type Error = BlockExecutionError; fn execute(self, _input: Self::Input<'_>) -> Result { - Ok(()) + Err(BlockExecutionError::UnavailableForTest) } } impl BatchExecutor for TestExecutor { - type Input<'a> = &'static str; - type Output = (); - type Error = String; - - fn execute_one( - &mut self, - _input: Self::Input<'_>, - ) -> Result { - Ok(BatchBlockOutput { size_hint: None }) + type Input<'a> = BlockExecutionInput<'a, BlockWithSenders>; + type Output = BatchBlockExecutionOutput; + type Error = BlockExecutionError; + + fn execute_one(&mut self, _input: Self::Input<'_>) -> Result<(), Self::Error> { + Ok(()) + } + + fn finalize(self) -> Self::Output { + todo!() + } + + fn set_tip(&mut self, _tip: BlockNumber) { + todo!() } - fn finalize(self) -> Self::Output {} + fn size_hint(&self) -> Option { + None + } } #[test] @@ -160,6 +244,9 @@ mod tests { let provider = TestExecutorProvider; let db = CacheDB::>::default(); let executor = provider.executor(db); - executor.execute("test").unwrap(); + let block = + Block { header: Default::default(), body: vec![], ommers: vec![], withdrawals: None }; + let block = BlockWithSenders::new(block, Default::default()).unwrap(); + let _ = executor.execute(BlockExecutionInput::new(&block, U256::ZERO)); } } diff --git a/crates/evm/src/lib.rs b/crates/evm/src/lib.rs index 78a76e54c..94cac8bcc 100644 --- a/crates/evm/src/lib.rs +++ b/crates/evm/src/lib.rs @@ -8,32 +8,43 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -use reth_primitives::{revm::env::fill_block_env, Address, ChainSpec, Header, Transaction, U256}; +use reth_primitives::{ + revm::env::fill_block_env, Address, ChainSpec, Header, TransactionSigned, U256, +}; use revm::{inspector_handle_register, Database, Evm, EvmBuilder, GetInspector}; use revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, SpecId, TxEnv}; +pub mod either; pub mod execute; +#[cfg(any(test, feature = "test-utils"))] +/// test helpers for mocking executor +pub mod test_utils; + /// Trait for configuring the EVM for executing full blocks. pub trait ConfigureEvm: ConfigureEvmEnv { + /// Associated type for the default external context that should be configured for the EVM. + type DefaultExternalContext<'a>; + /// Returns new EVM with the given database /// /// This does not automatically configure the EVM with [ConfigureEvmEnv] methods. It is up to /// the caller to call an appropriate method to fill the transaction and block environment /// before executing any transactions using the provided EVM. - fn evm<'a, DB: Database + 'a>(&self, db: DB) -> Evm<'a, (), DB> { - EvmBuilder::default().with_db(db).build() - } + fn evm<'a, DB: Database + 'a>( + &'a self, + db: DB, + ) -> Evm<'a, Self::DefaultExternalContext<'a>, DB>; /// Returns a new EVM with the given database configured with the given environment settings, /// including the spec id. /// /// This will preserve any handler modifications fn evm_with_env<'a, DB: Database + 'a>( - &self, + &'a self, db: DB, env: EnvWithHandlerCfg, - ) -> Evm<'a, (), DB> { + ) -> Evm<'a, Self::DefaultExternalContext<'a>, DB> { let mut evm = self.evm(db); evm.modify_spec_id(env.spec_id()); evm.context.evm.env = env.env; @@ -43,9 +54,11 @@ pub trait ConfigureEvm: ConfigureEvmEnv { /// Returns a new EVM with the given database configured with the given environment settings, /// including the spec id. /// + /// This will use the given external inspector as the EVM external context. + /// /// This will preserve any handler modifications fn evm_with_env_and_inspector<'a, DB, I>( - &self, + &'a self, db: DB, env: EnvWithHandlerCfg, inspector: I, @@ -65,7 +78,7 @@ pub trait ConfigureEvm: ConfigureEvmEnv { /// Caution: This does not automatically configure the EVM with [ConfigureEvmEnv] methods. It is /// up to the caller to call an appropriate method to fill the transaction and block /// environment before executing any transactions using the provided EVM. - fn evm_with_inspector<'a, DB, I>(&self, db: DB, inspector: I) -> Evm<'a, I, DB> + fn evm_with_inspector<'a, DB, I>(&'a self, db: DB, inspector: I) -> Evm<'a, I, DB> where DB: Database + 'a, I: GetInspector, @@ -80,18 +93,9 @@ pub trait ConfigureEvm: ConfigureEvmEnv { /// This represents the set of methods used to configure the EVM's environment before block /// execution. -pub trait ConfigureEvmEnv: Send + Sync + Unpin + Clone { - /// The type of the transaction metadata that should be used to fill fields in the transaction - /// environment. - /// - /// On ethereum mainnet, this is `()`, and on optimism these are the L1 fee fields and - /// additional L1 block info. - type TxMeta; - - /// Fill transaction environment from a [Transaction] and the given sender address. - fn fill_tx_env(tx_env: &mut TxEnv, transaction: T, sender: Address, meta: Self::TxMeta) - where - T: AsRef; +pub trait ConfigureEvmEnv: Send + Sync + Unpin + Clone + 'static { + /// Fill transaction environment from a [TransactionSigned] and the given sender address. + fn fill_tx_env(tx_env: &mut TxEnv, transaction: &TransactionSigned, sender: Address); /// Fill [CfgEnvWithHandlerCfg] fields according to the chain spec and given header fn fill_cfg_env( diff --git a/crates/evm/src/test_utils.rs b/crates/evm/src/test_utils.rs new file mode 100644 index 000000000..e0ee46917 --- /dev/null +++ b/crates/evm/src/test_utils.rs @@ -0,0 +1,80 @@ +//! Helpers for testing. + +use crate::execute::{ + BatchBlockExecutionOutput, BatchExecutor, BlockExecutionInput, BlockExecutionOutput, + BlockExecutorProvider, Executor, +}; +use parking_lot::Mutex; +use reth_interfaces::{executor::BlockExecutionError, provider::ProviderError}; +use reth_primitives::{BlockNumber, BlockWithSenders, PruneModes, Receipt}; +use revm_primitives::db::Database; +use std::sync::Arc; + +/// A [BlockExecutorProvider] that returns mocked execution results. +#[derive(Clone, Debug, Default)] +pub struct MockExecutorProvider { + exec_results: Arc>>, +} + +impl MockExecutorProvider { + /// Extend the mocked execution results + pub fn extend(&self, results: impl IntoIterator>) { + self.exec_results.lock().extend(results.into_iter().map(Into::into)); + } +} + +impl BlockExecutorProvider for MockExecutorProvider { + type Executor> = Self; + + type BatchExecutor> = Self; + + fn executor(&self, _: DB) -> Self::Executor + where + DB: Database, + { + self.clone() + } + + fn batch_executor(&self, _: DB, _: PruneModes) -> Self::BatchExecutor + where + DB: Database, + { + self.clone() + } +} + +impl Executor for MockExecutorProvider { + type Input<'a> = BlockExecutionInput<'a, BlockWithSenders>; + type Output = BlockExecutionOutput; + type Error = BlockExecutionError; + + fn execute(self, _: Self::Input<'_>) -> Result { + let BatchBlockExecutionOutput { bundle, receipts, .. } = + self.exec_results.lock().pop().unwrap(); + Ok(BlockExecutionOutput { + state: bundle, + receipts: receipts.into_iter().flatten().flatten().collect(), + gas_used: 0, + }) + } +} + +impl BatchExecutor for MockExecutorProvider { + type Input<'a> = BlockExecutionInput<'a, BlockWithSenders>; + type Output = BatchBlockExecutionOutput; + type Error = BlockExecutionError; + + fn execute_one(&mut self, _: Self::Input<'_>) -> Result<(), Self::Error> { + Ok(()) + } + + fn finalize(self) -> Self::Output { + self.exec_results.lock().pop().unwrap() + } + + fn set_tip(&mut self, _: BlockNumber) {} + + fn size_hint(&self) -> Option { + None + } +} diff --git a/crates/exex/Cargo.toml b/crates/exex/Cargo.toml index 71f9c8bde..d16cb53f7 100644 --- a/crates/exex/Cargo.toml +++ b/crates/exex/Cargo.toml @@ -21,6 +21,8 @@ reth-primitives.workspace = true reth-provider.workspace = true reth-tasks.workspace = true reth-tracing.workspace = true +reth-network.workspace = true +reth-payload-builder.workspace = true ## async tokio.workspace = true diff --git a/crates/exex/src/context.rs b/crates/exex/src/context.rs index 619679e85..7cedb4977 100644 --- a/crates/exex/src/context.rs +++ b/crates/exex/src/context.rs @@ -1,32 +1,25 @@ -use reth_node_api::FullNodeComponents; +use reth_node_api::{FullNodeComponents, FullNodeTypes, NodeTypes}; use reth_node_core::{ dirs::{ChainPath, DataDirPath}, node_config::NodeConfig, }; use reth_primitives::Head; -use reth_provider::CanonStateNotification; use reth_tasks::TaskExecutor; use tokio::sync::mpsc::{Receiver, UnboundedSender}; -use crate::ExExEvent; +use crate::{ExExEvent, ExExNotification}; /// Captures the context that an ExEx has access to. #[derive(Debug)] pub struct ExExContext { /// The current head of the blockchain at launch. pub head: Head, - /// The configured provider to interact with the blockchain. - pub provider: Node::Provider, - /// The task executor of the node. - pub task_executor: TaskExecutor, /// The data dir of the node. pub data_dir: ChainPath, /// The config of the node pub config: NodeConfig, /// The loaded node config pub reth_config: reth_config::Config, - /// The transaction pool of the node. - pub pool: Node::Pool, /// Channel used to send [`ExExEvent`]s to the rest of the node. /// /// # Important @@ -35,12 +28,58 @@ pub struct ExExContext { /// Additionally, the exex can pre-emptively emit a `FinishedHeight` event to specify what /// blocks to receive notifications for. pub events: UnboundedSender, - /// Channel to receive [`CanonStateNotification`]s on state transitions. + /// Channel to receive [`ExExNotification`]s. /// /// # Important /// - /// Once a `CanonStateNotification` is sent over the channel, it is considered delivered by the + /// Once a an [`ExExNotification`] is sent over the channel, it is considered delivered by the /// node. - pub notifications: Receiver, - // TODO(alexey): add pool, payload builder, anything else? + pub notifications: Receiver, + + /// node components + pub components: Node, +} + +impl NodeTypes for ExExContext { + type Primitives = Node::Primitives; + type Engine = Node::Engine; +} + +impl FullNodeTypes for ExExContext { + type DB = Node::DB; + type Provider = Node::Provider; +} + +impl FullNodeComponents for ExExContext { + type Pool = Node::Pool; + type Evm = Node::Evm; + type Executor = Node::Executor; + + fn pool(&self) -> &Self::Pool { + self.components.pool() + } + + fn evm_config(&self) -> &Self::Evm { + self.components.evm_config() + } + + fn block_executor(&self) -> &Self::Executor { + self.components.block_executor() + } + + fn provider(&self) -> &Self::Provider { + self.components.provider() + } + + fn network(&self) -> &reth_network::NetworkHandle { + self.components.network() + } + + fn payload_builder(&self) -> &reth_payload_builder::PayloadBuilderHandle { + self.components.payload_builder() + } + + fn task_executor(&self) -> &TaskExecutor { + self.components.task_executor() + } } diff --git a/crates/exex/src/lib.rs b/crates/exex/src/lib.rs index 638d8af79..4e2d0dd85 100644 --- a/crates/exex/src/lib.rs +++ b/crates/exex/src/lib.rs @@ -42,3 +42,6 @@ pub use event::*; mod manager; pub use manager::*; + +mod notification; +pub use notification::*; diff --git a/crates/exex/src/manager.rs b/crates/exex/src/manager.rs index 59f2bde58..1de8c102e 100644 --- a/crates/exex/src/manager.rs +++ b/crates/exex/src/manager.rs @@ -1,8 +1,7 @@ -use crate::ExExEvent; +use crate::{ExExEvent, ExExNotification}; use metrics::Gauge; use reth_metrics::{metrics::Counter, Metrics}; use reth_primitives::{BlockNumber, FinishedExExHeight}; -use reth_provider::CanonStateNotification; use reth_tracing::tracing::debug; use std::{ collections::VecDeque, @@ -24,7 +23,7 @@ use tokio_util::sync::{PollSendError, PollSender, ReusableBoxFuture}; #[derive(Metrics)] #[metrics(scope = "exex")] struct ExExMetrics { - /// The total number of canonical state notifications sent to an ExEx. + /// The total number of notifications sent to an ExEx. notifications_sent_total: Counter, /// The total number of events an ExEx has sent to the manager. events_sent_total: Counter, @@ -42,8 +41,8 @@ pub struct ExExHandle { /// Metrics for an ExEx. metrics: ExExMetrics, - /// Channel to send [`CanonStateNotification`]s to the ExEx. - sender: PollSender, + /// Channel to send [`ExExNotification`]s to the ExEx. + sender: PollSender, /// Channel to receive [`ExExEvent`]s from the ExEx. receiver: UnboundedReceiver, /// The ID of the next notification to send to this ExEx. @@ -59,22 +58,22 @@ impl ExExHandle { /// Create a new handle for the given ExEx. /// /// Returns the handle, as well as a [`UnboundedSender`] for [`ExExEvent`]s and a - /// [`Receiver`] for [`CanonStateNotification`]s that should be given to the ExEx. - pub fn new(id: String) -> (Self, UnboundedSender, Receiver) { - let (canon_tx, canon_rx) = mpsc::channel(1); + /// [`Receiver`] for [`ExExNotification`]s that should be given to the ExEx. + pub fn new(id: String) -> (Self, UnboundedSender, Receiver) { + let (notification_tx, notification_rx) = mpsc::channel(1); let (event_tx, event_rx) = mpsc::unbounded_channel(); ( Self { id: id.clone(), metrics: ExExMetrics::new_with_labels(&[("exex", id)]), - sender: PollSender::new(canon_tx), + sender: PollSender::new(notification_tx), receiver: event_rx, next_notification_id: 0, finished_height: None, }, event_tx, - canon_rx, + notification_rx, ) } @@ -85,25 +84,53 @@ impl ExExHandle { fn send( &mut self, cx: &mut Context<'_>, - (event_id, notification): &(usize, CanonStateNotification), - ) -> Poll>> { - // check that this notification is above the finished height of the exex if the exex has set - // one + (notification_id, notification): &(usize, ExExNotification), + ) -> Poll>> { if let Some(finished_height) = self.finished_height { - if finished_height >= notification.tip().number { - self.next_notification_id = event_id + 1; - return Poll::Ready(Ok(())) + match notification { + ExExNotification::ChainCommitted { new } => { + // Skip the chain commit notification if the finished height of the ExEx is + // higher than or equal to the tip of the new notification. + // I.e., the ExEx has already processed the notification. + if finished_height >= new.tip().number { + debug!( + exex_id = %self.id, + %notification_id, + %finished_height, + new_tip = %new.tip().number, + "Skipping notification" + ); + + self.next_notification_id = notification_id + 1; + return Poll::Ready(Ok(())) + } + } + // Do not handle [ExExNotification::ChainReorged] and + // [ExExNotification::ChainReverted] cases and always send the + // notification, because the ExEx should be aware of the reorgs and reverts lower + // than its finished height + ExExNotification::ChainReorged { .. } | ExExNotification::ChainReverted { .. } => {} } } + debug!( + exex_id = %self.id, + %notification_id, + "Reserving slot for notification" + ); match self.sender.poll_reserve(cx) { Poll::Ready(Ok(())) => (), other => return other, } + debug!( + exex_id = %self.id, + %notification_id, + "Sending notification" + ); match self.sender.send_item(notification.clone()) { Ok(()) => { - self.next_notification_id = event_id + 1; + self.next_notification_id = notification_id + 1; self.metrics.notifications_sent_total.increment(1); Poll::Ready(Ok(())) } @@ -142,18 +169,18 @@ pub struct ExExManager { /// Handles to communicate with the ExEx's. exex_handles: Vec, - /// [`CanonStateNotification`] channel from the [`ExExManagerHandle`]s. - handle_rx: UnboundedReceiver, + /// [`ExExNotification`] channel from the [`ExExManagerHandle`]s. + handle_rx: UnboundedReceiver, /// The minimum notification ID currently present in the buffer. min_id: usize, - /// Monotonically increasing ID for [`CanonStateNotification`]s. + /// Monotonically increasing ID for [`ExExNotification`]s. next_id: usize, - /// Internal buffer of [`CanonStateNotification`]s. + /// Internal buffer of [`ExExNotification`]s. /// /// The first element of the tuple is a monotonically increasing ID unique to the notification /// (the second element of the tuple). - buffer: VecDeque<(usize, CanonStateNotification)>, + buffer: VecDeque<(usize, ExExNotification)>, /// Max size of the internal state notifications buffer. max_capacity: usize, /// Current state notifications buffer capacity. @@ -231,7 +258,7 @@ impl ExExManager { /// Updates the current buffer capacity and notifies all `is_ready` watchers of the manager's /// readiness to receive notifications. - fn update_capacity(&mut self) { + fn update_capacity(&self) { let capacity = self.max_capacity.saturating_sub(self.buffer.len()); self.current_capacity.store(capacity, Ordering::Relaxed); self.metrics.current_capacity.set(capacity as f64); @@ -244,7 +271,7 @@ impl ExExManager { /// Pushes a new notification into the managers internal buffer, assigning the notification a /// unique ID. - fn push_notification(&mut self, notification: CanonStateNotification) { + fn push_notification(&mut self, notification: ExExNotification) { let next_id = self.next_id; self.buffer.push_back((next_id, notification)); self.next_id += 1; @@ -258,7 +285,11 @@ impl Future for ExExManager { // drain handle notifications while self.buffer.len() < self.max_capacity { if let Poll::Ready(Some(notification)) = self.handle_rx.poll_recv(cx) { - debug!("received new notification"); + debug!( + committed_tip = ?notification.committed_chain().map(|chain| chain.tip().number), + reverted_tip = ?notification.reverted_chain().map(|chain| chain.tip().number), + "Received new notification" + ); self.push_notification(notification); continue } @@ -275,12 +306,11 @@ impl Future for ExExManager { // it is a logic error for this to ever underflow since the manager manages the // notification IDs - let notification_id = exex + let notification_index = exex .next_notification_id .checked_sub(self.min_id) .expect("exex expected notification ID outside the manager's range"); - if let Some(notification) = self.buffer.get(notification_id) { - debug!(exex.id, notification_id, "sent notification to exex"); + if let Some(notification) = self.buffer.get(notification_index) { if let Poll::Ready(Err(err)) = exex.send(cx, notification) { // the channel was closed, which is irrecoverable for the manager return Poll::Ready(Err(err.into())) @@ -291,9 +321,9 @@ impl Future for ExExManager { } // remove processed buffered notifications + debug!(%min_id, "Updating lowest notification id in buffer"); self.buffer.retain(|&(id, _)| id >= min_id); self.min_id = min_id; - debug!(min_id, "lowest notification id in buffer updated"); // update capacity self.update_capacity(); @@ -301,7 +331,7 @@ impl Future for ExExManager { // handle incoming exex events for exex in self.exex_handles.iter_mut() { while let Poll::Ready(Some(event)) = exex.receiver.poll_recv(cx) { - debug!(?event, id = exex.id, "received event from exex"); + debug!(exex_id = %exex.id, ?event, "Received event from exex"); exex.metrics.events_sent_total.increment(1); match event { ExExEvent::FinishedHeight(height) => exex.finished_height = Some(height), @@ -334,7 +364,7 @@ impl Future for ExExManager { #[derive(Debug)] pub struct ExExManagerHandle { /// Channel to send notifications to the ExEx manager. - exex_tx: UnboundedSender, + exex_tx: UnboundedSender, /// The number of ExEx's running on the node. num_exexs: usize, /// A watch channel denoting whether the manager is ready for new notifications or not. @@ -376,10 +406,7 @@ impl ExExManagerHandle { /// Synchronously send a notification over the channel to all execution extensions. /// /// Senders should call [`Self::has_capacity`] first. - pub fn send( - &self, - notification: CanonStateNotification, - ) -> Result<(), SendError> { + pub fn send(&self, notification: ExExNotification) -> Result<(), SendError> { self.exex_tx.send(notification) } @@ -389,8 +416,8 @@ impl ExExManagerHandle { /// capacity in the channel, the future will wait. pub async fn send_async( &mut self, - notification: CanonStateNotification, - ) -> Result<(), SendError> { + notification: ExExNotification, + ) -> Result<(), SendError> { self.ready().await; self.exex_tx.send(notification) } diff --git a/crates/exex/src/notification.rs b/crates/exex/src/notification.rs new file mode 100644 index 000000000..ae8091e0c --- /dev/null +++ b/crates/exex/src/notification.rs @@ -0,0 +1,54 @@ +use std::sync::Arc; + +use reth_provider::{CanonStateNotification, Chain}; + +/// Notifications sent to an ExEx. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum ExExNotification { + /// Chain got committed without a reorg, and only the new chain is returned. + ChainCommitted { + /// The new chain after commit. + new: Arc, + }, + /// Chain got reorged, and both the old and the new chains are returned. + ChainReorged { + /// The old chain before reorg. + old: Arc, + /// The new chain after reorg. + new: Arc, + }, + /// Chain got reverted, and only the old chain is returned. + ChainReverted { + /// The old chain before reversion. + old: Arc, + }, +} + +impl ExExNotification { + /// Returns the committed chain from the [Self::ChainCommitted] and [Self::ChainReorged] + /// variants, if any. + pub fn committed_chain(&self) -> Option> { + match self { + Self::ChainCommitted { new } | Self::ChainReorged { old: _, new } => Some(new.clone()), + Self::ChainReverted { .. } => None, + } + } + + /// Returns the reverted chain from the [Self::ChainReorged] and [Self::ChainReverted] variants, + /// if any. + pub fn reverted_chain(&self) -> Option> { + match self { + Self::ChainReorged { old, new: _ } | Self::ChainReverted { old } => Some(old.clone()), + Self::ChainCommitted { .. } => None, + } + } +} + +impl From for ExExNotification { + fn from(notification: CanonStateNotification) -> Self { + match notification { + CanonStateNotification::Commit { new } => Self::ChainCommitted { new }, + CanonStateNotification::Reorg { old, new } => Self::ChainReorged { old, new }, + } + } +} diff --git a/crates/interfaces/Cargo.toml b/crates/interfaces/Cargo.toml index 8f4aa494a..c2e276a33 100644 --- a/crates/interfaces/Cargo.toml +++ b/crates/interfaces/Cargo.toml @@ -12,9 +12,10 @@ workspace = true [dependencies] reth-primitives.workspace = true -reth-rpc-types.workspace = true reth-network-api.workspace = true reth-eth-wire-types.workspace = true +reth-consensus.workspace = true +reth-network-types.workspace = true # async futures.workspace = true @@ -34,12 +35,14 @@ parking_lot = { workspace = true, optional = true } rand = { workspace = true, optional = true } [dev-dependencies] +reth-consensus = { workspace = true, features = ["test-utils"] } + parking_lot.workspace = true rand.workspace = true tokio = { workspace = true, features = ["full"] } secp256k1 = { workspace = true, features = ["alloc", "recovery", "rand"] } [features] -test-utils = ["secp256k1", "rand", "parking_lot"] +test-utils = ["reth-consensus/test-utils", "secp256k1", "rand", "parking_lot"] cli = ["clap"] optimism = ["reth-eth-wire-types/optimism"] diff --git a/crates/interfaces/src/blockchain_tree/error.rs b/crates/interfaces/src/blockchain_tree/error.rs index e08211a4f..a98d76501 100644 --- a/crates/interfaces/src/blockchain_tree/error.rs +++ b/crates/interfaces/src/blockchain_tree/error.rs @@ -1,11 +1,11 @@ //! Error handling for the blockchain tree use crate::{ - consensus::ConsensusError, executor::{BlockExecutionError, BlockValidationError}, provider::ProviderError, RethError, }; +use reth_consensus::ConsensusError; use reth_primitives::{BlockHash, BlockNumber, SealedBlock}; /// Various error cases that can occur when a block violates tree assumptions. @@ -18,7 +18,7 @@ pub enum BlockchainTreeError { last_finalized: BlockNumber, }, /// Thrown if no side chain could be found for the block. - #[error("blockChainId can't be found in BlockchainTree with internal index {chain_id}")] + #[error("chainId can't be found in BlockchainTree with internal index {chain_id}")] BlockSideChainIdConsistency { /// The internal identifier for the side chain. chain_id: u64, @@ -47,6 +47,9 @@ pub enum BlockchainTreeError { /// The block hash of the block that failed to buffer. block_hash: BlockHash, }, + /// Thrown when trying to access genesis parent. + #[error("genesis block has no parent")] + GenesisBlockHasNoParent, } /// Canonical Errors @@ -67,6 +70,9 @@ pub enum CanonicalError { /// Error indicating a transaction failed to commit during execution. #[error("transaction error on commit: {0}")] CanonicalCommit(String), + /// Error indicating that a previous optimistic sync target was re-orged + #[error("transaction error on revert: {0}")] + OptimisticTargetRevert(BlockNumber), } impl CanonicalError { @@ -83,6 +89,15 @@ impl CanonicalError { CanonicalError::BlockchainTree(BlockchainTreeError::BlockHashNotFoundInChain { .. }) ) } + + /// Returns `Some(BlockNumber)` if the underlying error matches + /// [CanonicalError::OptimisticTargetRevert]. + pub fn optimistic_revert_block_number(&self) -> Option { + match self { + CanonicalError::OptimisticTargetRevert(block_number) => Some(*block_number), + _ => None, + } + } } /// Error thrown when inserting a block failed because the block is considered invalid. @@ -243,6 +258,36 @@ impl InsertBlockErrorKind { matches!(self, InsertBlockErrorKind::Consensus(_)) } + /// Returns true if this error is a state root error + pub fn is_state_root_error(&self) -> bool { + // we need to get the state root errors inside of the different variant branches + match self { + InsertBlockErrorKind::Execution(err) => { + matches!( + err, + BlockExecutionError::Validation(BlockValidationError::StateRoot { .. }) + ) + } + InsertBlockErrorKind::Canonical(err) => { + matches!( + err, + CanonicalError::Validation(BlockValidationError::StateRoot { .. }) | + CanonicalError::Provider( + ProviderError::StateRootMismatch(_) | + ProviderError::UnwindStateRootMismatch(_) + ) + ) + } + InsertBlockErrorKind::Provider(err) => { + matches!( + err, + ProviderError::StateRootMismatch(_) | ProviderError::UnwindStateRootMismatch(_) + ) + } + _ => false, + } + } + /// Returns true if the error is caused by an invalid block /// /// This is intended to be used to determine if the block should be marked as invalid. @@ -263,8 +308,7 @@ impl InsertBlockErrorKind { BlockExecutionError::CanonicalCommit { .. } | BlockExecutionError::AppendChainDoesntConnect { .. } | BlockExecutionError::UnavailableForTest => false, - #[cfg(feature = "optimism")] - BlockExecutionError::OptimismBlockExecution(_) => false, + BlockExecutionError::Other(_) => false, } } InsertBlockErrorKind::Tree(err) => { @@ -277,7 +321,8 @@ impl InsertBlockErrorKind { BlockchainTreeError::CanonicalChain { .. } | BlockchainTreeError::BlockNumberNotFoundInChain { .. } | BlockchainTreeError::BlockHashNotFoundInChain { .. } | - BlockchainTreeError::BlockBufferingFailed { .. } => false, + BlockchainTreeError::BlockBufferingFailed { .. } | + BlockchainTreeError::GenesisBlockHasNoParent => false, } } InsertBlockErrorKind::Provider(_) | InsertBlockErrorKind::Internal(_) => { @@ -287,7 +332,8 @@ impl InsertBlockErrorKind { InsertBlockErrorKind::Canonical(err) => match err { CanonicalError::BlockchainTree(_) | CanonicalError::CanonicalCommit(_) | - CanonicalError::CanonicalRevert(_) => false, + CanonicalError::CanonicalRevert(_) | + CanonicalError::OptimisticTargetRevert(_) => false, CanonicalError::Validation(_) => true, CanonicalError::Provider(_) => false, }, diff --git a/crates/interfaces/src/blockchain_tree/mod.rs b/crates/interfaces/src/blockchain_tree/mod.rs index d8ad667fc..7d2b50e41 100644 --- a/crates/interfaces/src/blockchain_tree/mod.rs +++ b/crates/interfaces/src/blockchain_tree/mod.rs @@ -78,6 +78,13 @@ pub trait BlockchainTreeEngine: BlockchainTreeViewer + Send + Sync { last_finalized_block: BlockNumber, ) -> RethResult<()>; + /// Update all block hashes. iterate over present and new list of canonical hashes and compare + /// them. Remove all mismatches, disconnect them, removes all chains and clears all buffered + /// blocks before the tip. + fn update_block_hashes_and_clear_buffered( + &self, + ) -> RethResult>; + /// Reads the last `N` canonical hashes from the database and updates the block indices of the /// tree by attempting to connect the buffered blocks to canonical hashes. /// diff --git a/crates/interfaces/src/error.rs b/crates/interfaces/src/error.rs index c49323595..38498c312 100644 --- a/crates/interfaces/src/error.rs +++ b/crates/interfaces/src/error.rs @@ -1,10 +1,10 @@ use crate::{ blockchain_tree::error::{BlockchainTreeError, CanonicalError}, - consensus::ConsensusError, db::DatabaseError, executor::BlockExecutionError, provider::ProviderError, }; +use reth_consensus::ConsensusError; use reth_network_api::NetworkError; use reth_primitives::fs::FsPathError; @@ -16,7 +16,7 @@ pub type RethResult = Result; /// This enum encapsulates various error types that can occur during blockchain interactions. /// /// It allows for structured error handling based on the nature of the encountered issue. -#[derive(Debug, thiserror::Error, Clone, PartialEq, Eq)] +#[derive(Debug, thiserror::Error)] pub enum RethError { /// Error encountered during block execution. #[error(transparent)] diff --git a/crates/interfaces/src/executor.rs b/crates/interfaces/src/executor.rs index 25e2f5710..04b9832f0 100644 --- a/crates/interfaces/src/executor.rs +++ b/crates/interfaces/src/executor.rs @@ -80,7 +80,7 @@ pub enum BlockValidationError { } /// BlockExecutor Errors -#[derive(Error, Debug, Clone, PartialEq, Eq)] +#[derive(Error, Debug)] pub enum BlockExecutionError { /// Validation error, transparently wrapping `BlockValidationError` #[error(transparent)] @@ -118,39 +118,37 @@ pub enum BlockExecutionError { /// Error when fetching latest block state. #[error(transparent)] LatestBlock(#[from] ProviderError), - /// Optimism Block Executor Errors - #[cfg(feature = "optimism")] #[error(transparent)] - OptimismBlockExecution(#[from] OptimismBlockExecutionError), -} - -/// Optimism Block Executor Errors -#[cfg(feature = "optimism")] -#[derive(Error, Debug, Clone, PartialEq, Eq)] -pub enum OptimismBlockExecutionError { - /// Error when trying to parse L1 block info - #[error("could not get L1 block info from L2 block: {message:?}")] - L1BlockInfoError { - /// The inner error message - message: String, - }, - /// Thrown when force deploy of create2deployer code fails. - #[error("failed to force create2deployer account code")] - ForceCreate2DeployerFail, - /// Thrown when a blob transaction is included in a sequencer's block. - #[error("blob transaction included in sequencer block")] - BlobTransactionRejected, - /// Thrown when a database account could not be loaded. - #[error("failed to load account {0}")] - AccountLoadFailed(reth_primitives::Address), + Other(Box), } impl BlockExecutionError { + /// Create a new `BlockExecutionError::Other` variant. + pub fn other(error: E) -> Self + where + E: std::error::Error + Send + Sync + 'static, + { + Self::Other(Box::new(error)) + } + + /// Returns the inner `BlockValidationError` if the error is a validation error. + pub const fn as_validation(&self) -> Option<&BlockValidationError> { + match self { + Self::Validation(err) => Some(err), + _ => None, + } + } + /// Returns `true` if the error is fatal. /// /// This represents an unrecoverable database related error. pub fn is_fatal(&self) -> bool { matches!(self, Self::CanonicalCommit { .. } | Self::CanonicalRevert { .. }) } + + /// Returns `true` if the error is a state root error. + pub fn is_state_root_error(&self) -> bool { + matches!(self, Self::Validation(BlockValidationError::StateRoot(_))) + } } diff --git a/crates/interfaces/src/lib.rs b/crates/interfaces/src/lib.rs index b8cfb7b39..e60d4a621 100644 --- a/crates/interfaces/src/lib.rs +++ b/crates/interfaces/src/lib.rs @@ -12,9 +12,6 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -/// Consensus traits. -pub mod consensus; - /// Database error pub mod db; diff --git a/crates/interfaces/src/p2p/download.rs b/crates/interfaces/src/p2p/download.rs index b9fb6ab3e..823860507 100644 --- a/crates/interfaces/src/p2p/download.rs +++ b/crates/interfaces/src/p2p/download.rs @@ -1,4 +1,4 @@ -use reth_primitives::PeerId; +use reth_network_types::PeerId; use std::fmt::Debug; /// Generic download client for peer penalization diff --git a/crates/interfaces/src/p2p/either.rs b/crates/interfaces/src/p2p/either.rs index 1a6bd170c..af7f15018 100644 --- a/crates/interfaces/src/p2p/either.rs +++ b/crates/interfaces/src/p2p/either.rs @@ -22,7 +22,7 @@ where A: DownloadClient, B: DownloadClient, { - fn report_bad_message(&self, peer_id: reth_primitives::PeerId) { + fn report_bad_message(&self, peer_id: reth_network_types::PeerId) { match self { EitherDownloader::Left(a) => a.report_bad_message(peer_id), EitherDownloader::Right(b) => b.report_bad_message(peer_id), diff --git a/crates/interfaces/src/p2p/error.rs b/crates/interfaces/src/p2p/error.rs index 3c4e351fc..1a847b649 100644 --- a/crates/interfaces/src/p2p/error.rs +++ b/crates/interfaces/src/p2p/error.rs @@ -1,8 +1,10 @@ use super::headers::client::HeadersRequest; -use crate::{consensus::ConsensusError, db::DatabaseError, provider::ProviderError}; +use crate::{db::DatabaseError, provider::ProviderError}; +use reth_consensus::ConsensusError; use reth_network_api::ReputationChangeKind; +use reth_network_types::WithPeerId; use reth_primitives::{ - BlockHashOrNumber, BlockNumber, GotExpected, GotExpectedBoxed, Header, WithPeerId, B256, + BlockHashOrNumber, BlockNumber, GotExpected, GotExpectedBoxed, Header, B256, }; use std::ops::RangeInclusive; use thiserror::Error; @@ -11,7 +13,7 @@ use tokio::sync::{mpsc, oneshot}; /// Result alias for result of a request. pub type RequestResult = Result; -/// Result with [PeerId][reth_primitives::PeerId] +/// Result with [PeerId][reth_network_types::PeerId] pub type PeerRequestResult = RequestResult>; /// Helper trait used to validate responses. @@ -157,10 +159,12 @@ pub enum DownloadError { /* ==================== BODIES ERRORS ==================== */ /// Block validation failed - #[error("failed to validate body for header {hash}: {error}")] + #[error("failed to validate body for header {hash}, block number {number}: {error}")] BodyValidation { - /// Hash of header failing validation + /// Hash of the block failing validation hash: B256, + /// Number of the block failing validation + number: u64, /// The details of validation failure #[source] error: Box, diff --git a/crates/interfaces/src/p2p/full_block.rs b/crates/interfaces/src/p2p/full_block.rs index 3ab8e7644..dd8cfff4d 100644 --- a/crates/interfaces/src/p2p/full_block.rs +++ b/crates/interfaces/src/p2p/full_block.rs @@ -1,15 +1,14 @@ use super::headers::client::HeadersRequest; -use crate::{ - consensus::{Consensus, ConsensusError}, - p2p::{ - bodies::client::{BodiesClient, SingleBodyRequest}, - error::PeerRequestResult, - headers::client::{HeadersClient, SingleHeaderRequest}, - }, +use crate::p2p::{ + bodies::client::{BodiesClient, SingleBodyRequest}, + error::PeerRequestResult, + headers::client::{HeadersClient, SingleHeaderRequest}, }; use futures::Stream; +use reth_consensus::{Consensus, ConsensusError}; +use reth_network_types::WithPeerId; use reth_primitives::{ - BlockBody, GotExpected, Header, HeadersDirection, SealedBlock, SealedHeader, WithPeerId, B256, + BlockBody, GotExpected, Header, HeadersDirection, SealedBlock, SealedHeader, B256, }; use std::{ cmp::Reverse, @@ -38,7 +37,7 @@ impl FullBlockClient { /// Returns a client with Test consensus #[cfg(any(test, feature = "test-utils"))] pub fn test_client(client: Client) -> Self { - Self::new(client, Arc::new(crate::test_utils::TestConsensus::default())) + Self::new(client, Arc::new(reth_consensus::test_utils::TestConsensus::default())) } } diff --git a/crates/interfaces/src/p2p/headers/downloader.rs b/crates/interfaces/src/p2p/headers/downloader.rs index 9eea13aab..500a1a1bc 100644 --- a/crates/interfaces/src/p2p/headers/downloader.rs +++ b/crates/interfaces/src/p2p/headers/downloader.rs @@ -1,11 +1,8 @@ use super::error::HeadersDownloaderResult; -use crate::{ - consensus::Consensus, - p2p::error::{DownloadError, DownloadResult}, -}; +use crate::p2p::error::{DownloadError, DownloadResult}; use futures::Stream; +use reth_consensus::Consensus; use reth_primitives::{BlockHashOrNumber, SealedHeader, B256}; - /// A downloader capable of fetching and yielding block headers. /// /// A downloader represents a distinct strategy for submitting requests to download block headers, diff --git a/crates/interfaces/src/p2p/headers/error.rs b/crates/interfaces/src/p2p/headers/error.rs index 12eab9548..f586aaf74 100644 --- a/crates/interfaces/src/p2p/headers/error.rs +++ b/crates/interfaces/src/p2p/headers/error.rs @@ -1,4 +1,4 @@ -use crate::consensus::ConsensusError; +use reth_consensus::ConsensusError; use reth_primitives::SealedHeader; use thiserror::Error; diff --git a/crates/interfaces/src/p2p/headers/mod.rs b/crates/interfaces/src/p2p/headers/mod.rs index 5746c1b2d..56aabf9d6 100644 --- a/crates/interfaces/src/p2p/headers/mod.rs +++ b/crates/interfaces/src/p2p/headers/mod.rs @@ -6,7 +6,7 @@ pub mod client; /// A downloader that receives and verifies block headers, is generic /// over the Consensus and the HeadersClient being used. /// -/// [`Consensus`]: crate::consensus::Consensus +/// [`Consensus`]: reth_consensus::Consensus /// [`HeadersClient`]: client::HeadersClient pub mod downloader; diff --git a/crates/interfaces/src/p2p/mod.rs b/crates/interfaces/src/p2p/mod.rs index 8e4d7c84f..75f3a8fc4 100644 --- a/crates/interfaces/src/p2p/mod.rs +++ b/crates/interfaces/src/p2p/mod.rs @@ -14,7 +14,7 @@ pub mod full_block; /// of a Linear and a Parallel downloader generic over the [`Consensus`] and /// [`HeadersClient`]. /// -/// [`Consensus`]: crate::consensus::Consensus +/// [`Consensus`]: reth_consensus::Consensus /// [`HeadersClient`]: crate::p2p::headers::client::HeadersClient pub mod headers; diff --git a/crates/interfaces/src/test_utils/bodies.rs b/crates/interfaces/src/test_utils/bodies.rs index e1d42a2a5..8f0bfcef0 100644 --- a/crates/interfaces/src/test_utils/bodies.rs +++ b/crates/interfaces/src/test_utils/bodies.rs @@ -22,7 +22,7 @@ impl Debug for TestBodiesClient { } impl DownloadClient for TestBodiesClient { - fn report_bad_message(&self, _peer_id: reth_primitives::PeerId) { + fn report_bad_message(&self, _peer_id: reth_network_types::PeerId) { // noop } diff --git a/crates/interfaces/src/test_utils/full_block.rs b/crates/interfaces/src/test_utils/full_block.rs index a97104919..95c1c2b3a 100644 --- a/crates/interfaces/src/test_utils/full_block.rs +++ b/crates/interfaces/src/test_utils/full_block.rs @@ -6,9 +6,10 @@ use crate::p2p::{ priority::Priority, }; use parking_lot::Mutex; +use reth_network_types::{PeerId, WithPeerId}; use reth_primitives::{ - BlockBody, BlockHashOrNumber, BlockNumHash, Header, HeadersDirection, PeerId, SealedBlock, - SealedHeader, WithPeerId, B256, + BlockBody, BlockHashOrNumber, BlockNumHash, Header, HeadersDirection, SealedBlock, + SealedHeader, B256, }; use std::{collections::HashMap, sync::Arc}; diff --git a/crates/interfaces/src/test_utils/generators.rs b/crates/interfaces/src/test_utils/generators.rs index e601d9629..506358276 100644 --- a/crates/interfaces/src/test_utils/generators.rs +++ b/crates/interfaces/src/test_utils/generators.rs @@ -4,10 +4,9 @@ use rand::{ }; use reth_primitives::{ proofs, sign_message, Account, Address, BlockNumber, Bytes, Header, Log, Receipt, SealedBlock, - SealedHeader, StorageEntry, Transaction, TransactionKind, TransactionSigned, TxLegacy, B256, - U256, + SealedHeader, StorageEntry, Transaction, TransactionSigned, TxKind, TxLegacy, B256, U256, }; -use secp256k1::{KeyPair, Secp256k1}; +use secp256k1::{Keypair, Secp256k1}; use std::{ cmp::{max, min}, collections::{hash_map::DefaultHasher, BTreeMap}, @@ -79,7 +78,7 @@ pub fn random_tx(rng: &mut R) -> Transaction { nonce: rng.gen::().into(), gas_price: rng.gen::().into(), gas_limit: rng.gen::().into(), - to: TransactionKind::Call(rng.gen()), + to: TxKind::Call(rng.gen()), value: U256::from(rng.gen::()), input: Bytes::default(), }) @@ -92,22 +91,22 @@ pub fn random_tx(rng: &mut R) -> Transaction { /// - There is no guarantee that the nonce is not used twice for the same account pub fn random_signed_tx(rng: &mut R) -> TransactionSigned { let secp = Secp256k1::new(); - let key_pair = KeyPair::new(&secp, rng); + let key_pair = Keypair::new(&secp, rng); let tx = random_tx(rng); sign_tx_with_key_pair(key_pair, tx) } /// Signs the [Transaction] with the given key pair. -pub fn sign_tx_with_key_pair(key_pair: KeyPair, tx: Transaction) -> TransactionSigned { +pub fn sign_tx_with_key_pair(key_pair: Keypair, tx: Transaction) -> TransactionSigned { let signature = sign_message(B256::from_slice(&key_pair.secret_bytes()[..]), tx.signature_hash()).unwrap(); TransactionSigned::from_transaction_and_signature(tx, signature) } -/// Generates a set of [KeyPair]s based on the desired count. -pub fn generate_keys(rng: &mut R, count: usize) -> Vec { +/// Generates a set of [Keypair]s based on the desired count. +pub fn generate_keys(rng: &mut R, count: usize) -> Vec { let secp = Secp256k1::new(); - (0..count).map(|_| KeyPair::new(&secp, rng)).collect() + (0..count).map(|_| Keypair::new(&secp, rng)).collect() } /// Generate a random block filled with signed transactions (generated using @@ -395,7 +394,7 @@ mod tests { chain_id: 1, nonce: 0x42, gas_limit: 44386, - to: TransactionKind::Call(hex!("6069a6c32cf691f5982febae4faf8a6f3ab2f0f6").into()), + to: TxKind::Call(hex!("6069a6c32cf691f5982febae4faf8a6f3ab2f0f6").into()), value: U256::from(0_u64), input: hex!("a22cb4650000000000000000000000005eee75727d804a2b13038928d36f8b188945a57a0000000000000000000000000000000000000000000000000000000000000000").into(), max_fee_per_gas: 0x4a817c800, @@ -405,7 +404,7 @@ mod tests { let signature_hash = tx.signature_hash(); for _ in 0..100 { - let key_pair = KeyPair::new(&secp, &mut rand::thread_rng()); + let key_pair = Keypair::new(&secp, &mut rand::thread_rng()); let signature = sign_message(B256::from_slice(&key_pair.secret_bytes()[..]), signature_hash) @@ -427,7 +426,7 @@ mod tests { nonce: 9, gas_price: 20 * 10_u128.pow(9), gas_limit: 21000, - to: TransactionKind::Call(hex!("3535353535353535353535353535353535353535").into()), + to: TxKind::Call(hex!("3535353535353535353535353535353535353535").into()), value: U256::from(10_u128.pow(18)), input: Bytes::default(), }); diff --git a/crates/interfaces/src/test_utils/headers.rs b/crates/interfaces/src/test_utils/headers.rs index 8262d9ae0..0272c68d3 100644 --- a/crates/interfaces/src/test_utils/headers.rs +++ b/crates/interfaces/src/test_utils/headers.rs @@ -1,33 +1,32 @@ //! Testing support for headers related interfaces. -use crate::{ - consensus::{self, Consensus, ConsensusError}, - p2p::{ - download::DownloadClient, - error::{DownloadError, DownloadResult, PeerRequestResult, RequestError}, - headers::{ - client::{HeadersClient, HeadersRequest}, - downloader::{HeaderDownloader, SyncTarget}, - error::HeadersDownloaderResult, - }, - priority::Priority, - }, -}; -use futures::{Future, FutureExt, Stream, StreamExt}; -use reth_primitives::{ - Header, HeadersDirection, PeerId, SealedBlock, SealedHeader, WithPeerId, U256, -}; use std::{ fmt, pin::Pin, sync::{ - atomic::{AtomicBool, AtomicU64, Ordering}, + atomic::{AtomicU64, Ordering}, Arc, }, task::{ready, Context, Poll}, }; + +use futures::{Future, FutureExt, Stream, StreamExt}; use tokio::sync::Mutex; +use crate::p2p::{ + download::DownloadClient, + error::{DownloadError, DownloadResult, PeerRequestResult, RequestError}, + headers::{ + client::{HeadersClient, HeadersRequest}, + downloader::{HeaderDownloader, SyncTarget}, + error::HeadersDownloaderResult, + }, + priority::Priority, +}; +use reth_consensus::{test_utils::TestConsensus, Consensus}; +use reth_network_types::{PeerId, WithPeerId}; +use reth_primitives::{Header, HeadersDirection, SealedHeader}; + /// A test downloader which just returns the values that have been pushed to it. #[derive(Debug)] pub struct TestHeaderDownloader { @@ -245,70 +244,3 @@ impl HeadersClient for TestHeadersClient { }) } } - -/// Consensus engine implementation for testing -#[derive(Debug)] -pub struct TestConsensus { - /// Flag whether the header validation should purposefully fail - fail_validation: AtomicBool, -} - -impl Default for TestConsensus { - fn default() -> Self { - Self { fail_validation: AtomicBool::new(false) } - } -} - -impl TestConsensus { - /// Get the failed validation flag. - pub fn fail_validation(&self) -> bool { - self.fail_validation.load(Ordering::SeqCst) - } - - /// Update the validation flag. - pub fn set_fail_validation(&self, val: bool) { - self.fail_validation.store(val, Ordering::SeqCst) - } -} - -impl Consensus for TestConsensus { - fn validate_header(&self, _header: &SealedHeader) -> Result<(), ConsensusError> { - if self.fail_validation() { - Err(consensus::ConsensusError::BaseFeeMissing) - } else { - Ok(()) - } - } - - fn validate_header_against_parent( - &self, - _header: &SealedHeader, - _parent: &SealedHeader, - ) -> Result<(), ConsensusError> { - if self.fail_validation() { - Err(consensus::ConsensusError::BaseFeeMissing) - } else { - Ok(()) - } - } - - fn validate_header_with_total_difficulty( - &self, - _header: &Header, - _total_difficulty: U256, - ) -> Result<(), ConsensusError> { - if self.fail_validation() { - Err(consensus::ConsensusError::BaseFeeMissing) - } else { - Ok(()) - } - } - - fn validate_block(&self, _block: &SealedBlock) -> Result<(), consensus::ConsensusError> { - if self.fail_validation() { - Err(consensus::ConsensusError::BaseFeeMissing) - } else { - Ok(()) - } - } -} diff --git a/crates/metrics/src/common/mpsc.rs b/crates/metrics/src/common/mpsc.rs index 3c35c745e..98c670ef7 100644 --- a/crates/metrics/src/common/mpsc.rs +++ b/crates/metrics/src/common/mpsc.rs @@ -173,7 +173,7 @@ impl MeteredSender { /// Calls the underlying [Sender](mpsc::Sender)'s `send`, incrementing the appropriate /// metrics depending on the result. - pub async fn send(&mut self, value: T) -> Result<(), SendError> { + pub async fn send(&self, value: T) -> Result<(), SendError> { match self.sender.send(value).await { Ok(()) => { self.metrics.messages_sent.increment(1); diff --git a/crates/net/common/Cargo.toml b/crates/net/common/Cargo.toml index 8d85fc906..0c3b253a5 100644 --- a/crates/net/common/Cargo.toml +++ b/crates/net/common/Cargo.toml @@ -13,7 +13,7 @@ workspace = true [dependencies] # reth -reth-primitives.workspace = true +reth-network-types.workspace = true # async pin-project.workspace = true diff --git a/crates/net/common/src/ban_list.rs b/crates/net/common/src/ban_list.rs index 0527c8620..11d4c6049 100644 --- a/crates/net/common/src/ban_list.rs +++ b/crates/net/common/src/ban_list.rs @@ -1,6 +1,6 @@ //! Support for banning peers. -use reth_primitives::PeerId; +use reth_network_types::PeerId; use std::{collections::HashMap, net::IpAddr, time::Instant}; /// Determines whether or not the IP is globally routable. diff --git a/crates/net/discv4/Cargo.toml b/crates/net/discv4/Cargo.toml index fa0e284ff..49e9b4ecc 100644 --- a/crates/net/discv4/Cargo.toml +++ b/crates/net/discv4/Cargo.toml @@ -16,13 +16,18 @@ workspace = true reth-primitives.workspace = true reth-net-common.workspace = true reth-net-nat.workspace = true +reth-network-types.workspace = true # ethereum alloy-rlp = { workspace = true, features = ["derive"] } discv5.workspace = true -secp256k1 = { workspace = true, features = ["global-context", "rand-std", "recovery", "serde"] } -enr = { workspace = true, default-features = false, features = ["rust-secp256k1"] } -rlp = "0.5" # needed for enr +secp256k1 = { workspace = true, features = [ + "global-context", + "rand-std", + "recovery", + "serde", +] } +enr.workspace = true # async/futures tokio = { workspace = true, features = ["io-util", "net", "time"] } tokio-stream.workspace = true @@ -36,6 +41,7 @@ generic-array = "0.14" serde = { workspace = true, optional = true } [dev-dependencies] +assert_matches.workspace = true rand.workspace = true tokio = { workspace = true, features = ["macros"] } reth-tracing.workspace = true diff --git a/crates/net/discv4/src/config.rs b/crates/net/discv4/src/config.rs index 8da6db4b7..c9007a910 100644 --- a/crates/net/discv4/src/config.rs +++ b/crates/net/discv4/src/config.rs @@ -197,6 +197,12 @@ impl Discv4ConfigBuilder { self } + /// Sets the expiration duration for lookup neighbor requests + pub fn lookup_neighbours_expiration(&mut self, duration: Duration) -> &mut Self { + self.config.neighbours_expiration = duration; + self + } + /// Sets the expiration duration for a bond with a peer pub fn bond_expiration(&mut self, duration: Duration) -> &mut Self { self.config.bond_expiration = duration; diff --git a/crates/net/discv4/src/lib.rs b/crates/net/discv4/src/lib.rs index 48e25c163..77cc309eb 100644 --- a/crates/net/discv4/src/lib.rs +++ b/crates/net/discv4/src/lib.rs @@ -28,7 +28,6 @@ use crate::{ error::{DecodePacketError, Discv4Error}, proto::{FindNode, Message, Neighbours, Packet, Ping, Pong}, }; -use alloy_rlp::{RlpDecodable, RlpEncodable}; use discv5::{ kbucket, kbucket::{ @@ -39,8 +38,9 @@ use discv5::{ }; use enr::Enr; use parking_lot::Mutex; -use proto::{EnrRequest, EnrResponse, EnrWrapper}; -use reth_primitives::{bytes::Bytes, hex, ForkId, PeerId, B256}; +use proto::{EnrRequest, EnrResponse}; +use reth_network_types::PeerId; +use reth_primitives::{bytes::Bytes, hex, ForkId, B256}; use secp256k1::SecretKey; use std::{ cell::RefCell, @@ -94,16 +94,6 @@ pub const DEFAULT_DISCOVERY_ADDR: IpAddr = IpAddr::V4(Ipv4Addr::UNSPECIFIED); /// Note: the default TCP port is the same. pub const DEFAULT_DISCOVERY_PORT: u16 = 30303; -/// The default address for discv5 via UDP. -/// -/// Note: the default TCP address is the same. -pub const DEFAULT_DISCOVERY_V5_ADDR: IpAddr = IpAddr::V4(Ipv4Addr::UNSPECIFIED); - -/// The default port for discv5 via UDP. -/// -/// Default is port 9000. -pub const DEFAULT_DISCOVERY_V5_PORT: u16 = 9000; - /// The default address for discv4 via UDP: "0.0.0.0:30303" /// /// Note: The default TCP address is the same. @@ -119,10 +109,21 @@ const MIN_PACKET_SIZE: usize = 32 + 65 + 1; /// Concurrency factor for `FindNode` requests to pick `ALPHA` closest nodes, const ALPHA: usize = 3; -/// Maximum number of nodes to ping at concurrently. 2 full `Neighbours` responses with 16 _new_ -/// nodes. This will apply some backpressure in recursive lookups. +/// Maximum number of nodes to ping at concurrently. +/// +/// This corresponds to 2 full `Neighbours` responses with 16 _new_ nodes. This will apply some +/// backpressure in recursive lookups. const MAX_NODES_PING: usize = 2 * MAX_NODES_PER_BUCKET; +/// Maximum number of pings to keep queued. +/// +/// If we are currently sending too many pings, any new pings will be queued. To prevent unbounded +/// growth of the queue, the queue has a maximum capacity, after which any additional pings will be +/// discarded. +/// +/// This corresponds to 2 full `Neighbours` responses with 16 new nodes. +const MAX_QUEUED_PINGS: usize = 2 * MAX_NODES_PER_BUCKET; + /// The size of the datagram is limited [`MAX_PACKET_SIZE`], 16 nodes, as the discv4 specifies don't /// fit in one datagram. The safe number of nodes that always fit in a datagram is 12, with worst /// case all of them being IPv6 nodes. This is calculated by `(MAX_PACKET_SIZE - (header + expire + @@ -211,7 +212,8 @@ impl Discv4 { /// # use std::io; /// use rand::thread_rng; /// use reth_discv4::{Discv4, Discv4Config}; - /// use reth_primitives::{pk2id, NodeRecord, PeerId}; + /// use reth_network_types::{pk2id, PeerId}; + /// use reth_primitives::NodeRecord; /// use secp256k1::SECP256K1; /// use std::{net::SocketAddr, str::FromStr}; /// # async fn t() -> io::Result<()> { @@ -569,7 +571,7 @@ impl Discv4Service { _tasks: tasks, ingress: ingress_rx, egress: egress_tx, - queued_pings: Default::default(), + queued_pings: VecDeque::with_capacity(MAX_QUEUED_PINGS), pending_pings: Default::default(), pending_lookup: Default::default(), pending_find_nodes: Default::default(), @@ -980,7 +982,7 @@ impl Discv4Service { } /// Encodes the packet, sends it and returns the hash. - pub(crate) fn send_packet(&mut self, msg: Message, to: SocketAddr) -> B256 { + pub(crate) fn send_packet(&self, msg: Message, to: SocketAddr) -> B256 { let (payload, hash) = msg.encode(&self.secret_key); trace!(target: "discv4", r#type=?msg.msg_type(), ?to, ?hash, "sending packet"); let _ = self.egress.try_send((payload, to)).map_err(|err| { @@ -1130,7 +1132,7 @@ impl Discv4Service { if self.pending_pings.len() < MAX_NODES_PING { self.send_ping(node, reason); - } else { + } else if self.queued_pings.len() < MAX_QUEUED_PINGS { self.queued_pings.push_back((node, reason)); } } @@ -1265,7 +1267,7 @@ impl Discv4Service { /// Handler for incoming `EnrRequest` message fn on_enr_request( - &mut self, + &self, msg: EnrRequest, remote_addr: SocketAddr, id: PeerId, @@ -1279,7 +1281,7 @@ impl Discv4Service { self.send_packet( Message::EnrResponse(EnrResponse { request_hash, - enr: EnrWrapper::new(self.local_eip_868_enr.clone()), + enr: self.local_eip_868_enr.clone(), }), remote_addr, ); @@ -1375,7 +1377,16 @@ impl Discv4Service { BucketEntry::SelfEntry => { // we received our own node entry } - _ => self.find_node(&closest, ctx.clone()), + BucketEntry::Present(mut entry, _) => { + if entry.value_mut().has_endpoint_proof { + self.find_node(&closest, ctx.clone()); + } + } + BucketEntry::Pending(mut entry, _) => { + if entry.value().has_endpoint_proof { + self.find_node(&closest, ctx.clone()); + } + } } } } @@ -1420,7 +1431,7 @@ impl Discv4Service { let mut failed_lookups = Vec::new(); self.pending_lookup.retain(|node_id, (lookup_sent_at, _)| { - if now.duration_since(*lookup_sent_at) > self.config.ping_expiration { + if now.duration_since(*lookup_sent_at) > self.config.request_timeout { failed_lookups.push(*node_id); return false } @@ -1440,7 +1451,7 @@ impl Discv4Service { fn evict_failed_neighbours(&mut self, now: Instant) { let mut failed_neighbours = Vec::new(); self.pending_find_nodes.retain(|node_id, find_node_request| { - if now.duration_since(find_node_request.sent_at) > self.config.request_timeout { + if now.duration_since(find_node_request.sent_at) > self.config.neighbours_expiration { if !find_node_request.answered { // node actually responded but with fewer entries than expected, but we don't // treat this as an hard error since it responded. @@ -2174,33 +2185,13 @@ pub enum DiscoveryUpdate { Batch(Vec), } -/// Represents a forward-compatible ENR entry for including the forkid in a node record via -/// EIP-868. Forward compatibility is achieved by allowing trailing fields. -/// -/// See: -/// -/// -/// for how geth implements ForkId values and forward compatibility. -#[derive(Debug, Clone, PartialEq, Eq, RlpEncodable, RlpDecodable)] -#[rlp(trailing)] -pub struct EnrForkIdEntry { - /// The inner forkid - pub fork_id: ForkId, -} - -impl From for EnrForkIdEntry { - fn from(fork_id: ForkId) -> Self { - Self { fork_id } - } -} - #[cfg(test)] mod tests { use super::*; use crate::test_utils::{create_discv4, create_discv4_with_config, rng_endpoint, rng_record}; use alloy_rlp::{Decodable, Encodable}; use rand::{thread_rng, Rng}; - use reth_primitives::{hex, mainnet_nodes, ForkHash}; + use reth_primitives::{hex, mainnet_nodes, EnrForkIdEntry, ForkHash}; use std::future::poll_fn; #[tokio::test] @@ -2568,6 +2559,7 @@ mod tests { let config = Discv4Config::builder() .request_timeout(Duration::from_millis(200)) .ping_expiration(Duration::from_millis(200)) + .lookup_neighbours_expiration(Duration::from_millis(200)) .add_eip868_pair("eth", fork_id) .build(); let (_disv4, mut service) = create_discv4_with_config(config).await; diff --git a/crates/net/discv4/src/node.rs b/crates/net/discv4/src/node.rs index 2e8dc1773..62e45db0e 100644 --- a/crates/net/discv4/src/node.rs +++ b/crates/net/discv4/src/node.rs @@ -1,5 +1,6 @@ use generic_array::GenericArray; -use reth_primitives::{keccak256, NodeRecord, PeerId}; +use reth_network_types::PeerId; +use reth_primitives::{keccak256, NodeRecord}; /// The key type for the table. #[derive(Debug, Copy, Clone, Eq, PartialEq)] diff --git a/crates/net/discv4/src/proto.rs b/crates/net/discv4/src/proto.rs index 8bbb84b62..62dd9235d 100644 --- a/crates/net/discv4/src/proto.rs +++ b/crates/net/discv4/src/proto.rs @@ -1,13 +1,12 @@ //! Discovery v4 protocol implementation. -use crate::{error::DecodePacketError, EnrForkIdEntry, PeerId, MAX_PACKET_SIZE, MIN_PACKET_SIZE}; -use alloy_rlp::{ - length_of_length, Decodable, Encodable, Error as RlpError, Header, RlpDecodable, RlpEncodable, -}; -use enr::{Enr, EnrKey}; +use crate::{error::DecodePacketError, MAX_PACKET_SIZE, MIN_PACKET_SIZE}; +use alloy_rlp::{Decodable, Encodable, Error as RlpError, Header, RlpDecodable, RlpEncodable}; +use enr::Enr; +use reth_network_types::{pk2id, PeerId}; use reth_primitives::{ bytes::{Buf, BufMut, Bytes, BytesMut}, - keccak256, pk2id, ForkId, NodeRecord, B256, + keccak256, EnrForkIdEntry, ForkId, NodeRecord, B256, }; use secp256k1::{ ecdsa::{RecoverableSignature, RecoveryId}, @@ -112,8 +111,7 @@ impl Message { // Sign the payload with the secret key using recoverable ECDSA let signature: RecoverableSignature = SECP256K1.sign_ecdsa_recoverable( - &secp256k1::Message::from_slice(keccak256(&payload).as_ref()) - .expect("B256.len() == MESSAGE_SIZE"), + &secp256k1::Message::from_digest(keccak256(&payload).0), secret_key, ); @@ -158,7 +156,7 @@ impl Message { let recoverable_sig = RecoverableSignature::from_compact(signature, recovery_id)?; // recover the public key - let msg = secp256k1::Message::from_slice(keccak256(&packet[97..]).as_slice())?; + let msg = secp256k1::Message::from_digest(keccak256(&packet[97..]).0); let pk = SECP256K1.recover_ecdsa(&msg, &recoverable_sig)?; let node_id = pk2id(&pk); @@ -217,7 +215,7 @@ impl NodeEndpoint { } /// A [FindNode packet](https://github.com/ethereum/devp2p/blob/master/discv4.md#findnode-packet-0x03). -#[derive(Clone, Copy, Debug, Eq, PartialEq, RlpEncodable, RlpDecodable)] +#[derive(Clone, Copy, Debug, Eq, PartialEq, RlpEncodable)] pub struct FindNode { /// The target node's ID, a 64-byte secp256k1 public key. pub id: PeerId, @@ -225,8 +223,41 @@ pub struct FindNode { pub expire: u64, } +impl Decodable for FindNode { + // NOTE(onbjerg): Manual implementation to satisfy EIP-8. + // + // See https://eips.ethereum.org/EIPS/eip-8 + fn decode(buf: &mut &[u8]) -> alloy_rlp::Result { + let b = &mut &**buf; + let rlp_head = Header::decode(b)?; + if !rlp_head.list { + return Err(RlpError::UnexpectedString) + } + let started_len = b.len(); + + let this = Self { id: Decodable::decode(b)?, expire: Decodable::decode(b)? }; + + // NOTE(onbjerg): Because of EIP-8, we only check that we did not consume *more* than the + // payload length, i.e. it is ok if payload length is greater than what we consumed, as we + // just discard the remaining list items + let consumed = started_len - b.len(); + if consumed > rlp_head.payload_length { + return Err(RlpError::ListLengthMismatch { + expected: rlp_head.payload_length, + got: consumed, + }) + } + + let rem = rlp_head.payload_length - consumed; + b.advance(rem); + *buf = *b; + + Ok(this) + } +} + /// A [Neighbours packet](https://github.com/ethereum/devp2p/blob/master/discv4.md#neighbors-packet-0x04). -#[derive(Clone, Debug, Eq, PartialEq, RlpEncodable, RlpDecodable)] +#[derive(Clone, Debug, Eq, PartialEq, RlpEncodable)] pub struct Neighbours { /// The list of nodes containing IP, UDP port, TCP port, and node ID. pub nodes: Vec, @@ -234,105 +265,92 @@ pub struct Neighbours { pub expire: u64, } -/// Passthrough newtype to [`Enr`]. -/// -/// We need to wrap the ENR type because of Rust's orphan rules not allowing -/// implementing a foreign trait on a foreign type. -#[derive(Clone, Debug, Eq, PartialEq)] -pub struct EnrWrapper(Enr); - -impl EnrWrapper { - /// Creates a new instance of [`EnrWrapper`]. - pub fn new(enr: Enr) -> Self { - EnrWrapper(enr) - } -} - -impl Encodable for EnrWrapper -where - K: EnrKey, -{ - fn encode(&self, out: &mut dyn BufMut) { - let payload_length = self.0.signature().length() + - self.0.seq().length() + - self.0.iter().fold(0, |acc, (k, v)| acc + k.as_slice().length() + v.len()); - - let header = Header { list: true, payload_length }; - header.encode(out); +impl Decodable for Neighbours { + // NOTE(onbjerg): Manual implementation to satisfy EIP-8. + // + // See https://eips.ethereum.org/EIPS/eip-8 + fn decode(buf: &mut &[u8]) -> alloy_rlp::Result { + let b = &mut &**buf; + let rlp_head = Header::decode(b)?; + if !rlp_head.list { + return Err(RlpError::UnexpectedString) + } + let started_len = b.len(); - self.0.signature().encode(out); - self.0.seq().encode(out); + let this = Self { nodes: Decodable::decode(b)?, expire: Decodable::decode(b)? }; - for (k, v) in self.0.iter() { - // Keys are byte data - k.as_slice().encode(out); - // Values are raw RLP encoded data - out.put_slice(v); + // NOTE(onbjerg): Because of EIP-8, we only check that we did not consume *more* than the + // payload length, i.e. it is ok if payload length is greater than what we consumed, as we + // just discard the remaining list items + let consumed = started_len - b.len(); + if consumed > rlp_head.payload_length { + return Err(RlpError::ListLengthMismatch { + expected: rlp_head.payload_length, + got: consumed, + }) } - } - - fn length(&self) -> usize { - let payload_length = self.0.signature().length() + - self.0.seq().length() + - self.0.iter().fold(0, |acc, (k, v)| acc + k.as_slice().length() + v.len()); - payload_length + length_of_length(payload_length) - } -} -fn to_alloy_rlp_error(e: rlp::DecoderError) -> RlpError { - match e { - rlp::DecoderError::RlpIsTooShort => RlpError::InputTooShort, - rlp::DecoderError::RlpInvalidLength => RlpError::Overflow, - rlp::DecoderError::RlpExpectedToBeList => RlpError::UnexpectedString, - rlp::DecoderError::RlpExpectedToBeData => RlpError::UnexpectedList, - rlp::DecoderError::RlpDataLenWithZeroPrefix | - rlp::DecoderError::RlpListLenWithZeroPrefix => RlpError::LeadingZero, - rlp::DecoderError::RlpInvalidIndirection => RlpError::NonCanonicalSize, - rlp::DecoderError::RlpIncorrectListLen => { - RlpError::Custom("incorrect list length when decoding rlp") - } - rlp::DecoderError::RlpIsTooBig => RlpError::Custom("rlp is too big"), - rlp::DecoderError::RlpInconsistentLengthAndData => { - RlpError::Custom("inconsistent length and data when decoding rlp") - } - rlp::DecoderError::Custom(s) => RlpError::Custom(s), - } -} + let rem = rlp_head.payload_length - consumed; + b.advance(rem); + *buf = *b; -impl Decodable for EnrWrapper { - fn decode(buf: &mut &[u8]) -> alloy_rlp::Result { - let enr = as rlp::Decodable>::decode(&rlp::Rlp::new(buf)) - .map_err(to_alloy_rlp_error) - .map(EnrWrapper::new); - if enr.is_ok() { - // Decode was successful, advance buffer - let header = Header::decode(buf)?; - buf.advance(header.payload_length); - } - enr + Ok(this) } } /// A [ENRRequest packet](https://github.com/ethereum/devp2p/blob/master/discv4.md#enrrequest-packet-0x05). /// /// This packet is used to request the current version of a node's Ethereum Node Record (ENR). -#[derive(Clone, Copy, Debug, Eq, PartialEq, RlpEncodable, RlpDecodable)] +#[derive(Clone, Copy, Debug, Eq, PartialEq, RlpEncodable)] pub struct EnrRequest { /// The expiration timestamp for the request. No reply should be sent if it refers to a time in /// the past. pub expire: u64, } +impl Decodable for EnrRequest { + // NOTE(onbjerg): Manual implementation to satisfy EIP-8. + // + // See https://eips.ethereum.org/EIPS/eip-8 + fn decode(buf: &mut &[u8]) -> alloy_rlp::Result { + let b = &mut &**buf; + let rlp_head = Header::decode(b)?; + if !rlp_head.list { + return Err(RlpError::UnexpectedString) + } + let started_len = b.len(); + + let this = Self { expire: Decodable::decode(b)? }; + + // NOTE(onbjerg): Because of EIP-8, we only check that we did not consume *more* than the + // payload length, i.e. it is ok if payload length is greater than what we consumed, as we + // just discard the remaining list items + let consumed = started_len - b.len(); + if consumed > rlp_head.payload_length { + return Err(RlpError::ListLengthMismatch { + expected: rlp_head.payload_length, + got: consumed, + }) + } + + let rem = rlp_head.payload_length - consumed; + b.advance(rem); + *buf = *b; + + Ok(this) + } +} + /// A [ENRResponse packet](https://github.com/ethereum/devp2p/blob/master/discv4.md#enrresponse-packet-0x06). /// /// This packet is used to respond to an ENRRequest packet and includes the requested ENR along with /// the hash of the original request. -#[derive(Clone, Debug, Eq, PartialEq, RlpEncodable)] +#[derive(Clone, Debug, Eq, PartialEq, RlpEncodable, RlpDecodable)] pub struct EnrResponse { /// The hash of the ENRRequest packet being replied to. pub request_hash: B256, /// The ENR (Ethereum Node Record) for the responding node. - pub enr: EnrWrapper, + pub enr: Enr, } // === impl EnrResponse === @@ -342,34 +360,8 @@ impl EnrResponse { /// /// See also pub fn eth_fork_id(&self) -> Option { - let mut maybe_fork_id = self.enr.0.get_raw_rlp(b"eth")?; - EnrForkIdEntry::decode(&mut maybe_fork_id).ok().map(|entry| entry.fork_id) - } -} - -impl Decodable for EnrResponse { - fn decode(buf: &mut &[u8]) -> alloy_rlp::Result { - let b = &mut &**buf; - let rlp_head = Header::decode(b)?; - if !rlp_head.list { - return Err(RlpError::UnexpectedString) - } - // let started_len = b.len(); - let this = Self { - request_hash: alloy_rlp::Decodable::decode(b)?, - enr: EnrWrapper::::decode(b)?, - }; - // TODO: `Decodable` can be derived once we have native alloy_rlp decoding for ENR: - // Skipping the size check here is fine since the `buf` is the UDP datagram - // let consumed = started_len - b.len(); - // if consumed != rlp_head.payload_length { - // return Err(alloy_rlp::Error::ListLengthMismatch { - // expected: rlp_head.payload_length, - // got: consumed, - // }) - // } - *buf = *b; - Ok(this) + let mut maybe_fork_id = self.enr.get_raw_rlp(b"eth")?; + EnrForkIdEntry::decode(&mut maybe_fork_id).ok().map(Into::into) } } @@ -549,6 +541,7 @@ mod tests { test_utils::{rng_endpoint, rng_ipv4_record, rng_ipv6_record, rng_message}, DEFAULT_DISCOVERY_PORT, SAFE_MAX_DATAGRAM_NEIGHBOUR_RECORDS, }; + use assert_matches::assert_matches; use enr::EnrPublicKey; use rand::{thread_rng, Rng, RngCore}; use reth_primitives::{hex, ForkHash}; @@ -750,7 +743,6 @@ mod tests { #[test] fn encode_decode_enr_msg() { - use self::EnrWrapper; use alloy_rlp::Decodable; use enr::secp256k1::SecretKey; use std::net::Ipv4Addr; @@ -770,7 +762,7 @@ mod tests { let forkentry = EnrForkIdEntry { fork_id }; forkentry.encode(&mut buf); builder.add_value_rlp("eth", buf.into()); - EnrWrapper::new(builder.build(&key).unwrap()) + builder.build(&key).unwrap() }; let enr_response = EnrResponse { request_hash: rng.gen(), enr }; @@ -789,30 +781,25 @@ mod tests { #[test] fn encode_known_rlp_enr() { - use self::EnrWrapper; use alloy_rlp::Decodable; use enr::{secp256k1::SecretKey, EnrPublicKey}; use std::net::Ipv4Addr; - let valid_record = - hex!("f884b8407098ad865b00a582051940cb9cf36836572411a47278783077011599ed5cd16b76f2635f4e234738f30813a89eb9137e3e3df5266e3a1f11df72ecf1145ccb9c01826964827634826970847f00000189736563703235366b31a103ca634cae0d49acb401d8a4c6b6fe8c55b70d115bf400769cc1400f3258cd31388375647082765f" - ); - let signature = - hex!("7098ad865b00a582051940cb9cf36836572411a47278783077011599ed5cd16b76f2635f4e234738f30813a89eb9137e3e3df5266e3a1f11df72ecf1145ccb9c" - ); + let valid_record = hex!("f884b8407098ad865b00a582051940cb9cf36836572411a47278783077011599ed5cd16b76f2635f4e234738f30813a89eb9137e3e3df5266e3a1f11df72ecf1145ccb9c01826964827634826970847f00000189736563703235366b31a103ca634cae0d49acb401d8a4c6b6fe8c55b70d115bf400769cc1400f3258cd31388375647082765f"); + let signature = hex!("7098ad865b00a582051940cb9cf36836572411a47278783077011599ed5cd16b76f2635f4e234738f30813a89eb9137e3e3df5266e3a1f11df72ecf1145ccb9c"); let expected_pubkey = hex!("03ca634cae0d49acb401d8a4c6b6fe8c55b70d115bf400769cc1400f3258cd3138"); - let enr = EnrWrapper::::decode(&mut &valid_record[..]).unwrap(); - let pubkey = enr.0.public_key().encode(); + let enr = Enr::::decode(&mut &valid_record[..]).unwrap(); + let pubkey = enr.public_key().encode(); - assert_eq!(enr.0.ip4(), Some(Ipv4Addr::new(127, 0, 0, 1))); - assert_eq!(enr.0.id(), Some(String::from("v4"))); - assert_eq!(enr.0.udp4(), Some(DEFAULT_DISCOVERY_PORT)); - assert_eq!(enr.0.tcp4(), None); - assert_eq!(enr.0.signature(), &signature[..]); + assert_eq!(enr.ip4(), Some(Ipv4Addr::new(127, 0, 0, 1))); + assert_eq!(enr.id(), Some(String::from("v4"))); + assert_eq!(enr.udp4(), Some(DEFAULT_DISCOVERY_PORT)); + assert_eq!(enr.tcp4(), None); + assert_eq!(enr.signature(), &signature[..]); assert_eq!(pubkey.to_vec(), expected_pubkey); - assert!(enr.0.verify()); + assert!(enr.verify()); assert_eq!(&alloy_rlp::encode(&enr)[..], &valid_record[..]); @@ -833,19 +820,19 @@ mod tests { hex!("03ca634cae0d49acb401d8a4c6b6fe8c55b70d115bf400769cc1400f3258cd3138"); let mut valid_record_buf = valid_record.as_slice(); - let enr = EnrWrapper::::decode(&mut valid_record_buf).unwrap(); - let pubkey = enr.0.public_key().encode(); + let enr = Enr::::decode(&mut valid_record_buf).unwrap(); + let pubkey = enr.public_key().encode(); // Byte array must be consumed after enr has finished decoding assert!(valid_record_buf.is_empty()); - assert_eq!(enr.0.ip4(), Some(Ipv4Addr::new(127, 0, 0, 1))); - assert_eq!(enr.0.id(), Some(String::from("v4"))); - assert_eq!(enr.0.udp4(), Some(DEFAULT_DISCOVERY_PORT)); - assert_eq!(enr.0.tcp4(), None); - assert_eq!(enr.0.signature(), &signature[..]); + assert_eq!(enr.ip4(), Some(Ipv4Addr::new(127, 0, 0, 1))); + assert_eq!(enr.id(), Some(String::from("v4"))); + assert_eq!(enr.udp4(), Some(DEFAULT_DISCOVERY_PORT)); + assert_eq!(enr.tcp4(), None); + assert_eq!(enr.signature(), &signature[..]); assert_eq!(pubkey.to_vec(), expected_pubkey); - assert!(enr.0.verify()); + assert!(enr.verify()); } // test vector from the enr library rlp encoding tests @@ -863,20 +850,116 @@ mod tests { let mut builder = Enr::builder(); builder.ip(ip.into()); builder.tcp4(tcp); - EnrWrapper::new(builder.build(&key).unwrap()) + builder.build(&key).unwrap() }; let mut encoded_bytes = &alloy_rlp::encode(&enr)[..]; - let decoded_enr = EnrWrapper::::decode(&mut encoded_bytes).unwrap(); + let decoded_enr = Enr::::decode(&mut encoded_bytes).unwrap(); // Byte array must be consumed after enr has finished decoding assert!(encoded_bytes.is_empty()); assert_eq!(decoded_enr, enr); - assert_eq!(decoded_enr.0.id(), Some("v4".into())); - assert_eq!(decoded_enr.0.ip4(), Some(ip)); - assert_eq!(decoded_enr.0.tcp4(), Some(tcp)); - assert_eq!(decoded_enr.0.public_key().encode(), key.public().encode()); - assert!(decoded_enr.0.verify()); + assert_eq!(decoded_enr.id(), Some("v4".into())); + assert_eq!(decoded_enr.ip4(), Some(ip)); + assert_eq!(decoded_enr.tcp4(), Some(tcp)); + assert_eq!( + decoded_enr.public_key().encode(), + key.public_key(secp256k1::SECP256K1).encode() + ); + assert!(decoded_enr.verify()); + } + + mod eip8 { + use super::*; + + fn junk_enr_request() -> Vec { + let mut buf = Vec::new(); + // enr request is just an expiration + let expire: u64 = 123456; + + // add some junk + let junk: u64 = 112233; + + // rlp header encoding + let payload_length = expire.length() + junk.length(); + alloy_rlp::Header { list: true, payload_length }.encode(&mut buf); + + // fields + expire.encode(&mut buf); + junk.encode(&mut buf); + + buf + } + + // checks that junk data at the end of the packet is discarded according to eip-8 + #[test] + fn eip8_decode_enr_request() { + let enr_request_with_junk = junk_enr_request(); + + let mut buf = enr_request_with_junk.as_slice(); + let decoded = EnrRequest::decode(&mut buf).unwrap(); + assert_eq!(decoded.expire, 123456); + } + + // checks that junk data at the end of the packet is discarded according to eip-8 + // + // test vector from eip-8: https://eips.ethereum.org/EIPS/eip-8 + #[test] + fn eip8_decode_findnode() { + let findnode_with_junk = hex!("c7c44041b9f7c7e41934417ebac9a8e1a4c6298f74553f2fcfdcae6ed6fe53163eb3d2b52e39fe91831b8a927bf4fc222c3902202027e5e9eb812195f95d20061ef5cd31d502e47ecb61183f74a504fe04c51e73df81f25c4d506b26db4517490103f84eb840ca634cae0d49acb401d8a4c6b6fe8c55b70d115bf400769cc1400f3258cd31387574077f301b421bc84df7266c44e9e6d569fc56be00812904767bf5ccd1fc7f8443b9a35582999983999999280dc62cc8255c73471e0a61da0c89acdc0e035e260add7fc0c04ad9ebf3919644c91cb247affc82b69bd2ca235c71eab8e49737c937a2c396"); + + let buf = findnode_with_junk.as_slice(); + let decoded = Message::decode(buf).unwrap(); + + let expected_id = hex!("ca634cae0d49acb401d8a4c6b6fe8c55b70d115bf400769cc1400f3258cd31387574077f301b421bc84df7266c44e9e6d569fc56be00812904767bf5ccd1fc7f"); + assert_matches!(decoded.msg, Message::FindNode(FindNode { id, expire: 1136239445 }) if id == expected_id); + } + + // checks that junk data at the end of the packet is discarded according to eip-8 + // + // test vector from eip-8: https://eips.ethereum.org/EIPS/eip-8 + #[test] + fn eip8_decode_neighbours() { + let neighbours_with_junk = hex!("c679fc8fe0b8b12f06577f2e802d34f6fa257e6137a995f6f4cbfc9ee50ed3710faf6e66f932c4c8d81d64343f429651328758b47d3dbc02c4042f0fff6946a50f4a49037a72bb550f3a7872363a83e1b9ee6469856c24eb4ef80b7535bcf99c0004f9015bf90150f84d846321163782115c82115db8403155e1427f85f10a5c9a7755877748041af1bcd8d474ec065eb33df57a97babf54bfd2103575fa829115d224c523596b401065a97f74010610fce76382c0bf32f84984010203040101b840312c55512422cf9b8a4097e9a6ad79402e87a15ae909a4bfefa22398f03d20951933beea1e4dfa6f968212385e829f04c2d314fc2d4e255e0d3bc08792b069dbf8599020010db83c4d001500000000abcdef12820d05820d05b84038643200b172dcfef857492156971f0e6aa2c538d8b74010f8e140811d53b98c765dd2d96126051913f44582e8c199ad7c6d6819e9a56483f637feaac9448aacf8599020010db885a308d313198a2e037073488203e78203e8b8408dcab8618c3253b558d459da53bd8fa68935a719aff8b811197101a4b2b47dd2d47295286fc00cc081bb542d760717d1bdd6bec2c37cd72eca367d6dd3b9df738443b9a355010203b525a138aa34383fec3d2719a0"); + + let buf = neighbours_with_junk.as_slice(); + let decoded = Message::decode(buf).unwrap(); + + let _ = NodeRecord { + address: "99.33.22.55".parse().unwrap(), + tcp_port: 4444, + udp_port: 4445, + id: hex!("3155e1427f85f10a5c9a7755877748041af1bcd8d474ec065eb33df57a97babf54bfd2103575fa829115d224c523596b401065a97f74010610fce76382c0bf32").into(), + }.length(); + + let expected_nodes: Vec = vec![ + NodeRecord { + address: "99.33.22.55".parse().unwrap(), + tcp_port: 4444, + udp_port: 4445, + id: hex!("3155e1427f85f10a5c9a7755877748041af1bcd8d474ec065eb33df57a97babf54bfd2103575fa829115d224c523596b401065a97f74010610fce76382c0bf32").into(), + }, + NodeRecord { + address: "1.2.3.4".parse().unwrap(), + tcp_port: 1, + udp_port: 1, + id: hex!("312c55512422cf9b8a4097e9a6ad79402e87a15ae909a4bfefa22398f03d20951933beea1e4dfa6f968212385e829f04c2d314fc2d4e255e0d3bc08792b069db").into(), + }, + NodeRecord { + address: "2001:db8:3c4d:15::abcd:ef12".parse().unwrap(), + tcp_port: 3333, + udp_port: 3333, + id: hex!("38643200b172dcfef857492156971f0e6aa2c538d8b74010f8e140811d53b98c765dd2d96126051913f44582e8c199ad7c6d6819e9a56483f637feaac9448aac").into(), + }, + NodeRecord { + address: "2001:db8:85a3:8d3:1319:8a2e:370:7348".parse().unwrap(), + tcp_port: 999, + udp_port: 1000, + id: hex!("8dcab8618c3253b558d459da53bd8fa68935a719aff8b811197101a4b2b47dd2d47295286fc00cc081bb542d760717d1bdd6bec2c37cd72eca367d6dd3b9df73").into(), + }, + ]; + assert_matches!(decoded.msg, Message::Neighbours(Neighbours { nodes, expire: 1136239445 }) if nodes == expected_nodes); + } } } diff --git a/crates/net/discv4/src/table.rs b/crates/net/discv4/src/table.rs index c7d75778c..00e1fe50c 100644 --- a/crates/net/discv4/src/table.rs +++ b/crates/net/discv4/src/table.rs @@ -1,6 +1,6 @@ //! Additional support for tracking nodes. -use reth_primitives::PeerId; +use reth_network_types::PeerId; use std::{collections::HashMap, net::IpAddr, time::Instant}; /// Keeps track of nodes from which we have received a `Pong` message. diff --git a/crates/net/discv4/src/test_utils.rs b/crates/net/discv4/src/test_utils.rs index ccd4f9a03..d4930f204 100644 --- a/crates/net/discv4/src/test_utils.rs +++ b/crates/net/discv4/src/test_utils.rs @@ -6,7 +6,8 @@ use crate::{ IngressReceiver, PeerId, SAFE_MAX_DATAGRAM_NEIGHBOUR_RECORDS, }; use rand::{thread_rng, Rng, RngCore}; -use reth_primitives::{hex, pk2id, ForkHash, ForkId, NodeRecord, B256}; +use reth_network_types::pk2id; +use reth_primitives::{hex, ForkHash, ForkId, NodeRecord, B256}; use secp256k1::{SecretKey, SECP256K1}; use std::{ collections::{HashMap, HashSet}, @@ -113,7 +114,7 @@ impl MockDiscovery { } /// Encodes the packet, sends it and returns the hash. - fn send_packet(&mut self, msg: Message, to: SocketAddr) -> B256 { + fn send_packet(&self, msg: Message, to: SocketAddr) -> B256 { let (payload, hash) = msg.encode(&self.secret_key); let _ = self.egress.try_send((payload, to)); hash diff --git a/crates/net/discv5/Cargo.toml b/crates/net/discv5/Cargo.toml index 03b856be9..a73888ae0 100644 --- a/crates/net/discv5/Cargo.toml +++ b/crates/net/discv5/Cargo.toml @@ -15,12 +15,12 @@ workspace = true # reth reth-primitives.workspace = true reth-metrics.workspace = true +reth-network-types.workspace = true # ethereum alloy-rlp.workspace = true -rlp = "0.5.2" discv5 = { workspace = true, features = ["libp2p"] } -enr = { workspace = true, default-features = false, features = ["rust-secp256k1"] } +enr.workspace = true multiaddr = { version = "0.18", default-features = false } libp2p-identity = "0.2" secp256k1.workspace = true diff --git a/crates/net/discv5/src/config.rs b/crates/net/discv5/src/config.rs index 809f0fa32..2a246d3d5 100644 --- a/crates/net/discv5/src/config.rs +++ b/crates/net/discv5/src/config.rs @@ -2,45 +2,76 @@ use std::{ collections::HashSet, - net::{IpAddr, SocketAddr}, + fmt::Debug, + net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6}, }; use derive_more::Display; use discv5::ListenConfig; use multiaddr::{Multiaddr, Protocol}; -use reth_primitives::{Bytes, ForkId, NodeRecord, MAINNET}; +use reth_primitives::{Bytes, EnrForkIdEntry, ForkId, NodeRecord}; +use tracing::warn; -use crate::{enr::discv4_id_to_multiaddr_id, filter::MustNotIncludeKeys}; +use crate::{enr::discv4_id_to_multiaddr_id, filter::MustNotIncludeKeys, NetworkStackId}; -/// L1 EL -pub const ETH: &[u8] = b"eth"; -/// L1 CL -pub const ETH2: &[u8] = b"eth2"; -/// Optimism -pub const OPSTACK: &[u8] = b"opstack"; +/// The default address for discv5 via UDP is IPv4. +/// +/// Default is 0.0.0.0, all interfaces. See [`discv5::ListenConfig`] default. +pub const DEFAULT_DISCOVERY_V5_ADDR: Ipv4Addr = Ipv4Addr::UNSPECIFIED; + +/// The default IPv6 address for discv5 via UDP. +/// +/// Default is ::, all interfaces. +pub const DEFAULT_DISCOVERY_V5_ADDR_IPV6: Ipv6Addr = Ipv6Addr::UNSPECIFIED; + +/// The default port for discv5 via UDP. +/// +/// Default is port 9000. See [`discv5::ListenConfig`] default. +pub const DEFAULT_DISCOVERY_V5_PORT: u16 = 9000; /// Default interval in seconds at which to run a lookup up query. /// /// Default is 60 seconds. -const DEFAULT_SECONDS_LOOKUP_INTERVAL: u64 = 60; +pub const DEFAULT_SECONDS_LOOKUP_INTERVAL: u64 = 60; + +/// Default number of times to do pulse lookup queries, at bootstrap (pulse intervals, defaulting +/// to 5 seconds). +/// +/// Default is 100 counts. +pub const DEFAULT_COUNT_BOOTSTRAP_LOOKUPS: u64 = 100; + +/// Default duration of look up interval, for pulse look ups at bootstrap. +/// +/// Default is 5 seconds. +pub const DEFAULT_SECONDS_BOOTSTRAP_LOOKUP_INTERVAL: u64 = 5; /// Builds a [`Config`]. -#[derive(Debug, Default)] +#[derive(Debug)] pub struct ConfigBuilder { /// Config used by [`discv5::Discv5`]. Contains the discovery listen socket. discv5_config: Option, /// Nodes to boot from. bootstrap_nodes: HashSet, - /// [`ForkId`] to set in local node record. + /// Fork kv-pair to set in local node record. Identifies which network/chain/fork the node + /// belongs, e.g. `(b"opstack", ChainId)` or `(b"eth", ForkId)`. + /// + /// Defaults to L1 mainnet if not set. fork: Option<(&'static [u8], ForkId)>, - /// RLPx TCP port to advertise. Note: so long as `reth_network` handles [`NodeRecord`]s as - /// opposed to [`Enr`](enr::Enr)s, TCP is limited to same IP address as UDP, since - /// [`NodeRecord`] doesn't supply an extra field for and alternative TCP address. - tcp_port: u16, - /// Additional kv-pairs that should be advertised to peers by including in local node record. - other_enr_data: Vec<(&'static str, Bytes)>, + /// RLPx TCP socket to advertise. + /// + /// NOTE: IP address of RLPx socket overwrites IP address of same IP version in + /// [`discv5::ListenConfig`]. + tcp_socket: SocketAddr, + /// List of `(key, rlp-encoded-value)` tuples that should be advertised in local node record + /// (in addition to tcp port, udp port and fork). + other_enr_kv_pairs: Vec<(&'static [u8], Bytes)>, /// Interval in seconds at which to run a lookup up query to populate kbuckets. lookup_interval: Option, + /// Interval in seconds at which to run pulse lookup queries at bootstrap to boost kbucket + /// population. + bootstrap_lookup_interval: Option, + /// Number of times to run boost lookup queries at start up. + bootstrap_lookup_countdown: Option, /// Custom filter rules to apply to a discovered peer in order to determine if it should be /// passed up to rlpx or dropped. discovered_peer_filter: Option, @@ -52,20 +83,24 @@ impl ConfigBuilder { let Config { discv5_config, bootstrap_nodes, - fork: fork_id, - tcp_port, - other_enr_data, + fork, + tcp_socket, + other_enr_kv_pairs, lookup_interval, + bootstrap_lookup_interval, + bootstrap_lookup_countdown, discovered_peer_filter, } = discv5_config; Self { discv5_config: Some(discv5_config), bootstrap_nodes, - fork: Some(fork_id), - tcp_port, - other_enr_data, + fork: fork.map(|(key, fork_id)| (key, fork_id.fork_id)), + tcp_socket, + other_enr_kv_pairs, lookup_interval: Some(lookup_interval), + bootstrap_lookup_interval: Some(bootstrap_lookup_interval), + bootstrap_lookup_countdown: Some(bootstrap_lookup_countdown), discovered_peer_filter: Some(discovered_peer_filter), } } @@ -117,21 +152,45 @@ impl ConfigBuilder { self } - /// Set [`ForkId`], and key used to identify it, to set in local [`Enr`](discv5::enr::Enr). - pub fn fork(mut self, key: &'static [u8], value: ForkId) -> Self { - self.fork = Some((key, value)); + /// Set fork ID kv-pair to set in local [`Enr`](discv5::enr::Enr). This lets peers on discovery + /// network know which chain this node belongs to. + pub fn fork(mut self, fork_key: &'static [u8], fork_id: ForkId) -> Self { + self.fork = Some((fork_key, fork_id)); + self + } + + /// Sets the tcp socket to advertise in the local [`Enr`](discv5::enr::Enr). The IP address of + /// this socket will overwrite the discovery address of the same IP version, if one is + /// configured. + pub fn tcp_socket(mut self, socket: SocketAddr) -> Self { + self.tcp_socket = socket; self } - /// Sets the tcp port to advertise in the local [`Enr`](discv5::enr::Enr). - fn tcp_port(mut self, port: u16) -> Self { - self.tcp_port = port; + /// Adds an additional kv-pair to include in the local [`Enr`](discv5::enr::Enr). Takes the key + /// to use for the kv-pair and the rlp encoded value. + pub fn add_enr_kv_pair(mut self, key: &'static [u8], value: Bytes) -> Self { + self.other_enr_kv_pairs.push((key, value)); self } - /// Adds an additional kv-pair to include in the local [`Enr`](discv5::enr::Enr). - pub fn add_enr_kv_pair(mut self, kv_pair: (&'static str, Bytes)) -> Self { - self.other_enr_data.push(kv_pair); + /// Sets the interval at which to run lookup queries, in order to fill kbuckets. Lookup queries + /// are done periodically at the given interval for the whole run of the program. + pub fn lookup_interval(mut self, seconds: u64) -> Self { + self.lookup_interval = Some(seconds); + self + } + + /// Sets the interval at which to run boost lookup queries at start up. Queries will be started + /// at this interval for the configured number of times after start up. + pub fn bootstrap_lookup_interval(mut self, seconds: u64) -> Self { + self.bootstrap_lookup_interval = Some(seconds); + self + } + + /// Sets the the number of times at which to run boost lookup queries to bootstrap the node. + pub fn bootstrap_lookup_countdown(mut self, counts: u64) -> Self { + self.bootstrap_lookup_countdown = Some(counts); self } @@ -151,29 +210,40 @@ impl ConfigBuilder { discv5_config, bootstrap_nodes, fork, - tcp_port, - other_enr_data, + tcp_socket, + other_enr_kv_pairs, lookup_interval, + bootstrap_lookup_interval, + bootstrap_lookup_countdown, discovered_peer_filter, } = self; - let discv5_config = discv5_config + let mut discv5_config = discv5_config .unwrap_or_else(|| discv5::ConfigBuilder::new(ListenConfig::default()).build()); - let fork = fork.unwrap_or((ETH, MAINNET.latest_fork_id())); + discv5_config.listen_config = + amend_listen_config_wrt_rlpx(&discv5_config.listen_config, tcp_socket.ip()); + + let fork = fork.map(|(key, fork_id)| (key, fork_id.into())); let lookup_interval = lookup_interval.unwrap_or(DEFAULT_SECONDS_LOOKUP_INTERVAL); + let bootstrap_lookup_interval = + bootstrap_lookup_interval.unwrap_or(DEFAULT_SECONDS_BOOTSTRAP_LOOKUP_INTERVAL); + let bootstrap_lookup_countdown = + bootstrap_lookup_countdown.unwrap_or(DEFAULT_COUNT_BOOTSTRAP_LOOKUPS); - let discovered_peer_filter = - discovered_peer_filter.unwrap_or_else(|| MustNotIncludeKeys::new(&[ETH2])); + let discovered_peer_filter = discovered_peer_filter + .unwrap_or_else(|| MustNotIncludeKeys::new(&[NetworkStackId::ETH2])); Config { discv5_config, bootstrap_nodes, fork, - tcp_port, - other_enr_data, + tcp_socket, + other_enr_kv_pairs, lookup_interval, + bootstrap_lookup_interval, + bootstrap_lookup_countdown, discovered_peer_filter, } } @@ -187,23 +257,44 @@ pub struct Config { pub(super) discv5_config: discv5::Config, /// Nodes to boot from. pub(super) bootstrap_nodes: HashSet, - /// [`ForkId`] to set in local node record. - pub(super) fork: (&'static [u8], ForkId), - /// RLPx TCP port to advertise. - pub(super) tcp_port: u16, - /// Additional kv-pairs to include in local node record. - pub(super) other_enr_data: Vec<(&'static str, Bytes)>, + /// Fork kv-pair to set in local node record. Identifies which network/chain/fork the node + /// belongs, e.g. `(b"opstack", ChainId)` or `(b"eth", [ForkId])`. + pub(super) fork: Option<(&'static [u8], EnrForkIdEntry)>, + /// RLPx TCP socket to advertise. + /// + /// NOTE: IP address of RLPx socket overwrites IP address of same IP version in + /// [`discv5::ListenConfig`]. + pub(super) tcp_socket: SocketAddr, + /// Additional kv-pairs (besides tcp port, udp port and fork) that should be advertised to + /// peers by including in local node record. + pub(super) other_enr_kv_pairs: Vec<(&'static [u8], Bytes)>, /// Interval in seconds at which to run a lookup up query with to populate kbuckets. pub(super) lookup_interval: u64, + /// Interval in seconds at which to run pulse lookup queries at bootstrap to boost kbucket + /// population. + pub(super) bootstrap_lookup_interval: u64, + /// Number of times to run boost lookup queries at start up. + pub(super) bootstrap_lookup_countdown: u64, /// Custom filter rules to apply to a discovered peer in order to determine if it should be /// passed up to rlpx or dropped. pub(super) discovered_peer_filter: MustNotIncludeKeys, } impl Config { - /// Returns a new [`ConfigBuilder`], with the RLPx TCP port set to the given port. - pub fn builder(rlpx_tcp_port: u16) -> ConfigBuilder { - ConfigBuilder::default().tcp_port(rlpx_tcp_port) + /// Returns a new [`ConfigBuilder`], with the RLPx TCP port and IP version configured w.r.t. + /// the given socket. + pub fn builder(rlpx_tcp_socket: SocketAddr) -> ConfigBuilder { + ConfigBuilder { + discv5_config: None, + bootstrap_nodes: HashSet::new(), + fork: None, + tcp_socket: rlpx_tcp_socket, + other_enr_kv_pairs: Vec::new(), + lookup_interval: None, + bootstrap_lookup_interval: None, + bootstrap_lookup_countdown: None, + discovered_peer_filter: None, + } } } @@ -221,12 +312,104 @@ impl Config { /// Returns the RLPx (TCP) socket contained in the [`discv5::Config`]. This socket will be /// advertised to peers in the local [`Enr`](discv5::enr::Enr). - pub fn rlpx_socket(&self) -> SocketAddr { - let port = self.tcp_port; - match self.discv5_config.listen_config { - ListenConfig::Ipv4 { ip, .. } => (ip, port).into(), - ListenConfig::Ipv6 { ip, .. } => (ip, port).into(), - ListenConfig::DualStack { ipv4, .. } => (ipv4, port).into(), + pub fn rlpx_socket(&self) -> &SocketAddr { + &self.tcp_socket + } +} + +/// Returns the IPv4 discovery socket if one is configured. +pub fn ipv4(listen_config: &ListenConfig) -> Option { + match listen_config { + ListenConfig::Ipv4 { ip, port } | + ListenConfig::DualStack { ipv4: ip, ipv4_port: port, .. } => { + Some(SocketAddrV4::new(*ip, *port)) + } + ListenConfig::Ipv6 { .. } => None, + } +} + +/// Returns the IPv6 discovery socket if one is configured. +pub fn ipv6(listen_config: &ListenConfig) -> Option { + match listen_config { + ListenConfig::Ipv4 { .. } => None, + ListenConfig::Ipv6 { ip, port } | + ListenConfig::DualStack { ipv6: ip, ipv6_port: port, .. } => { + Some(SocketAddrV6::new(*ip, *port, 0, 0)) + } + } +} + +/// Returns the amended [`discv5::ListenConfig`] based on the RLPx IP address. The ENR is limited +/// to one IP address per IP version (atm, may become spec'd how to advertise different addresses). +/// The RLPx address overwrites the discv5 address w.r.t. IP version. +pub fn amend_listen_config_wrt_rlpx( + listen_config: &ListenConfig, + rlpx_addr: IpAddr, +) -> ListenConfig { + let discv5_socket_ipv4 = ipv4(listen_config); + let discv5_socket_ipv6 = ipv6(listen_config); + + let discv5_port_ipv4 = + discv5_socket_ipv4.map(|socket| socket.port()).unwrap_or(DEFAULT_DISCOVERY_V5_PORT); + let discv5_addr_ipv4 = discv5_socket_ipv4.map(|socket| *socket.ip()); + let discv5_port_ipv6 = + discv5_socket_ipv6.map(|socket| socket.port()).unwrap_or(DEFAULT_DISCOVERY_V5_PORT); + let discv5_addr_ipv6 = discv5_socket_ipv6.map(|socket| *socket.ip()); + + let (discv5_socket_ipv4, discv5_socket_ipv6) = discv5_sockets_wrt_rlpx_addr( + rlpx_addr, + discv5_addr_ipv4, + discv5_port_ipv4, + discv5_addr_ipv6, + discv5_port_ipv6, + ); + + ListenConfig::from_two_sockets(discv5_socket_ipv4, discv5_socket_ipv6) +} + +/// Returns the sockets that can be used for discv5 with respect to the RLPx address. ENR specs only +/// acknowledge one address per IP version. +pub fn discv5_sockets_wrt_rlpx_addr( + rlpx_addr: IpAddr, + discv5_addr_ipv4: Option, + discv5_port_ipv4: u16, + discv5_addr_ipv6: Option, + discv5_port_ipv6: u16, +) -> (Option, Option) { + match rlpx_addr { + IpAddr::V4(rlpx_addr) => { + let discv5_socket_ipv6 = + discv5_addr_ipv6.map(|ip| SocketAddrV6::new(ip, discv5_port_ipv6, 0, 0)); + + if let Some(discv5_addr) = discv5_addr_ipv4 { + warn!(target: "discv5", + %discv5_addr, + %rlpx_addr, + "Overwriting discv5 IPv4 address with RLPx IPv4 address, limited to one advertised IP address per IP version" + ); + } + + // overwrite discv5 ipv4 addr with RLPx address. this is since there is no + // spec'd way to advertise a different address for rlpx and discovery in the + // ENR. + (Some(SocketAddrV4::new(rlpx_addr, discv5_port_ipv4)), discv5_socket_ipv6) + } + IpAddr::V6(rlpx_addr) => { + let discv5_socket_ipv4 = + discv5_addr_ipv4.map(|ip| SocketAddrV4::new(ip, discv5_port_ipv4)); + + if let Some(discv5_addr) = discv5_addr_ipv6 { + warn!(target: "discv5", + %discv5_addr, + %rlpx_addr, + "Overwriting discv5 IPv6 address with RLPx IPv6 address, limited to one advertised IP address per IP version" + ); + } + + // overwrite discv5 ipv6 addr with RLPx address. this is since there is no + // spec'd way to advertise a different address for rlpx and discovery in the + // ENR. + (discv5_socket_ipv4, Some(SocketAddrV6::new(rlpx_addr, discv5_port_ipv6, 0, 0))) } } } @@ -286,7 +469,7 @@ mod test { fn parse_boot_nodes() { const OP_SEPOLIA_CL_BOOTNODES: &str ="enr:-J64QBwRIWAco7lv6jImSOjPU_W266lHXzpAS5YOh7WmgTyBZkgLgOwo_mxKJq3wz2XRbsoBItbv1dCyjIoNq67mFguGAYrTxM42gmlkgnY0gmlwhBLSsHKHb3BzdGFja4S0lAUAiXNlY3AyNTZrMaEDmoWSi8hcsRpQf2eJsNUx-sqv6fH4btmo2HsAzZFAKnKDdGNwgiQGg3VkcIIkBg,enr:-J64QFa3qMsONLGphfjEkeYyF6Jkil_jCuJmm7_a42ckZeUQGLVzrzstZNb1dgBp1GGx9bzImq5VxJLP-BaptZThGiWGAYrTytOvgmlkgnY0gmlwhGsV-zeHb3BzdGFja4S0lAUAiXNlY3AyNTZrMaEDahfSECTIS_cXyZ8IyNf4leANlZnrsMEWTkEYxf4GMCmDdGNwgiQGg3VkcIIkBg"; - let config = Config::builder(30303) + let config = Config::builder((Ipv4Addr::UNSPECIFIED, 30303).into()) .add_cl_serialized_signed_boot_nodes(OP_SEPOLIA_CL_BOOTNODES) .build(); @@ -306,7 +489,7 @@ mod test { #[test] fn parse_enodes() { - let config = Config::builder(30303) + let config = Config::builder((Ipv4Addr::UNSPECIFIED, 30303).into()) .add_serialized_unsigned_boot_nodes(BOOT_NODES_OP_MAINNET_AND_BASE_MAINNET) .build(); @@ -317,4 +500,34 @@ mod test { assert!(bootstrap_nodes.contains(&node.to_string())); } } + + #[test] + fn overwrite_ipv4_addr() { + let rlpx_addr: Ipv4Addr = "192.168.0.1".parse().unwrap(); + + let listen_config = ListenConfig::default(); + + let amended_config = amend_listen_config_wrt_rlpx(&listen_config, rlpx_addr.into()); + + let config_socket_ipv4 = ipv4(&amended_config).unwrap(); + + assert_eq!(*config_socket_ipv4.ip(), rlpx_addr); + assert_eq!(config_socket_ipv4.port(), DEFAULT_DISCOVERY_V5_PORT); + assert_eq!(ipv6(&amended_config), ipv6(&listen_config)); + } + + #[test] + fn overwrite_ipv6_addr() { + let rlpx_addr: Ipv6Addr = "fe80::1".parse().unwrap(); + + let listen_config = ListenConfig::default(); + + let amended_config = amend_listen_config_wrt_rlpx(&listen_config, rlpx_addr.into()); + + let config_socket_ipv6 = ipv6(&amended_config).unwrap(); + + assert_eq!(*config_socket_ipv6.ip(), rlpx_addr); + assert_eq!(config_socket_ipv6.port(), DEFAULT_DISCOVERY_V5_PORT); + assert_eq!(ipv4(&amended_config), ipv4(&listen_config)); + } } diff --git a/crates/net/discv5/src/enr.rs b/crates/net/discv5/src/enr.rs index b810c1dc6..162370bb4 100644 --- a/crates/net/discv5/src/enr.rs +++ b/crates/net/discv5/src/enr.rs @@ -3,7 +3,7 @@ use discv5::enr::{CombinedPublicKey, EnrPublicKey, NodeId}; use enr::Enr; -use reth_primitives::{id2pk, pk2id, PeerId}; +use reth_network_types::{id2pk, pk2id, PeerId}; use secp256k1::{PublicKey, SecretKey}; /// Extracts a [`CombinedPublicKey::Secp256k1`] from a [`discv5::Enr`] and converts it to a @@ -41,30 +41,25 @@ pub struct EnrCombinedKeyWrapper(pub discv5::Enr); impl From> for EnrCombinedKeyWrapper { fn from(value: Enr) -> Self { - let encoded_enr = rlp::encode(&value); - let enr = rlp::decode::(&encoded_enr).unwrap(); - - Self(enr) + let encoded_enr = alloy_rlp::encode(&value); + Self(alloy_rlp::Decodable::decode(&mut &encoded_enr[..]).unwrap()) } } impl From for Enr { fn from(val: EnrCombinedKeyWrapper) -> Self { - let EnrCombinedKeyWrapper(enr) = val; - let encoded_enr = rlp::encode(&enr); - - rlp::decode::>(&encoded_enr).unwrap() + let encoded_enr = alloy_rlp::encode(&val.0); + alloy_rlp::Decodable::decode(&mut &encoded_enr[..]).unwrap() } } #[cfg(test)] mod tests { + use super::*; use alloy_rlp::Encodable; use discv5::enr::{CombinedKey, EnrKey}; use reth_primitives::{Hardfork, NodeRecord, MAINNET}; - use super::*; - #[test] fn discv5_discv4_id_conversion() { let discv5_pk = CombinedKey::generate_secp256k1().public(); diff --git a/crates/net/discv5/src/error.rs b/crates/net/discv5/src/error.rs index 7e4fa8653..277631464 100644 --- a/crates/net/discv5/src/error.rs +++ b/crates/net/discv5/src/error.rs @@ -11,6 +11,9 @@ pub enum Error { /// Node record has incompatible key type. #[error("incompatible key type (not secp256k1)")] IncompatibleKeyType, + /// No key used to identify rlpx network is configured. + #[error("network stack identifier is not configured")] + NetworkStackIdNotConfigured, /// Missing key used to identify rlpx network. #[error("fork missing on enr, key missing")] ForkMissing(&'static [u8]), @@ -32,4 +35,7 @@ pub enum Error { /// An error from underlying [`discv5::Discv5`] node. #[error("sigp/discv5 error, {0}")] Discv5Error(discv5::Error), + /// The [`ListenConfig`](discv5::ListenConfig) has been misconfigured. + #[error("misconfigured listen config, RLPx TCP address must also be supported by discv5")] + ListenConfigMisconfigured, } diff --git a/crates/net/discv5/src/filter.rs b/crates/net/discv5/src/filter.rs index 5cb7be18c..d62a7584a 100644 --- a/crates/net/discv5/src/filter.rs +++ b/crates/net/discv5/src/filter.rs @@ -35,14 +35,12 @@ impl MustIncludeKey { /// Returns [`FilterOutcome::Ok`] if [`Enr`](discv5::Enr) contains the configured kv-pair key. pub fn filter(&self, enr: &discv5::Enr) -> FilterOutcome { if enr.get_raw_rlp(self.key).is_none() { - return FilterOutcome::Ignore { reason: self.ignore_reason() } + return FilterOutcome::Ignore { + reason: format!("{} fork required", String::from_utf8_lossy(self.key)), + } } FilterOutcome::Ok } - - fn ignore_reason(&self) -> String { - format!("{} fork required", String::from_utf8_lossy(self.key)) - } } /// Filter requiring that peers not advertise kv-pairs using certain keys, e.g. b"eth2". @@ -69,20 +67,18 @@ impl MustNotIncludeKeys { pub fn filter(&self, enr: &discv5::Enr) -> FilterOutcome { for key in self.keys.iter() { if matches!(key.filter(enr), FilterOutcome::Ok) { - return FilterOutcome::Ignore { reason: self.ignore_reason() } + return FilterOutcome::Ignore { + reason: format!( + "{} forks not allowed", + self.keys.iter().map(|key| String::from_utf8_lossy(key.key)).format(",") + ), + } } } FilterOutcome::Ok } - fn ignore_reason(&self) -> String { - format!( - "{} forks not allowed", - self.keys.iter().map(|key| String::from_utf8_lossy(key.key)).format(",") - ) - } - /// Adds a key that must not be present for any kv-pair in a node record. pub fn add_disallowed_keys(&mut self, keys: &[&'static [u8]]) { for key in keys { @@ -96,7 +92,7 @@ mod tests { use alloy_rlp::Bytes; use discv5::enr::{CombinedKey, Enr}; - use crate::config::{ETH, ETH2}; + use crate::NetworkStackId; use super::*; @@ -104,16 +100,21 @@ mod tests { fn must_not_include_key_filter() { // rig test - let filter = MustNotIncludeKeys::new(&[ETH, ETH2]); + let filter = MustNotIncludeKeys::new(&[NetworkStackId::ETH, NetworkStackId::ETH2]); // enr_1 advertises a fork from one of the keys configured in filter let sk = CombinedKey::generate_secp256k1(); - let enr_1 = - Enr::builder().add_value_rlp(ETH as &[u8], Bytes::from("cancun")).build(&sk).unwrap(); + let enr_1 = Enr::builder() + .add_value_rlp(NetworkStackId::ETH as &[u8], Bytes::from("cancun")) + .build(&sk) + .unwrap(); // enr_2 advertises a fork from one the other key configured in filter let sk = CombinedKey::generate_secp256k1(); - let enr_2 = Enr::builder().add_value_rlp(ETH2, Bytes::from("deneb")).build(&sk).unwrap(); + let enr_2 = Enr::builder() + .add_value_rlp(NetworkStackId::ETH2, Bytes::from("deneb")) + .build(&sk) + .unwrap(); // test diff --git a/crates/net/discv5/src/lib.rs b/crates/net/discv5/src/lib.rs index 218d4299d..ffa3c9caf 100644 --- a/crates/net/discv5/src/lib.rs +++ b/crates/net/discv5/src/lib.rs @@ -17,13 +17,13 @@ use std::{ }; use ::enr::Enr; -use alloy_rlp::Decodable; use discv5::ListenConfig; use enr::{discv4_id_to_discv5_id, EnrCombinedKeyWrapper}; use futures::future::join_all; use itertools::Itertools; use rand::{Rng, RngCore}; -use reth_primitives::{bytes::Bytes, ForkId, NodeRecord, PeerId}; +use reth_network_types::PeerId; +use reth_primitives::{bytes::Bytes, EnrForkIdEntry, ForkId, NodeRecord}; use secp256k1::SecretKey; use tokio::{sync::mpsc, task}; use tracing::{debug, error, trace}; @@ -33,26 +33,23 @@ pub mod enr; pub mod error; pub mod filter; pub mod metrics; +pub mod network_stack_id; pub use discv5::{self, IpMode}; -pub use config::{BootNode, Config, ConfigBuilder}; +pub use config::{ + BootNode, Config, ConfigBuilder, DEFAULT_COUNT_BOOTSTRAP_LOOKUPS, DEFAULT_DISCOVERY_V5_ADDR, + DEFAULT_DISCOVERY_V5_ADDR_IPV6, DEFAULT_DISCOVERY_V5_PORT, + DEFAULT_SECONDS_BOOTSTRAP_LOOKUP_INTERVAL, DEFAULT_SECONDS_LOOKUP_INTERVAL, +}; pub use enr::enr_to_discv4_id; pub use error::Error; pub use filter::{FilterOutcome, MustNotIncludeKeys}; -use metrics::{DiscoveredPeersMetrics, Discv5Metrics}; +pub use network_stack_id::NetworkStackId; -/// Default number of times to do pulse lookup queries, at bootstrap (5 second intervals). -/// -/// Default is 100 seconds. -pub const DEFAULT_COUNT_PULSE_LOOKUPS_AT_BOOTSTRAP: u64 = 100; - -/// Default duration of look up interval, for pulse look ups at bootstrap. -/// -/// Default is 5 seconds. -pub const DEFAULT_SECONDS_PULSE_LOOKUP_INTERVAL: u64 = 5; +use metrics::{DiscoveredPeersMetrics, Discv5Metrics}; -/// Max kbucket index. +/// Max kbucket index is 255. /// /// This is the max log2distance for 32 byte [`NodeId`](discv5::enr::NodeId) - 1. See . pub const MAX_KBUCKET_INDEX: usize = 255; @@ -69,10 +66,10 @@ pub const DEFAULT_MIN_TARGET_KBUCKET_INDEX: usize = 0; pub struct Discv5 { /// sigp/discv5 node. discv5: Arc, - /// [`IpMode`] of the the node. - ip_mode: IpMode, - /// Key used in kv-pair to ID chain. - fork_id_key: &'static [u8], + /// [`IpMode`] of the the RLPx network. + rlpx_ip_mode: IpMode, + /// Key used in kv-pair to ID chain, e.g. 'opstack' or 'eth'. + fork_key: Option<&'static [u8]>, /// Filter applied to a discovered peers before passing it up to app. discovered_peer_filter: MustNotIncludeKeys, /// Metrics for underlying [`discv5::Discv5`] node and filtered discovered peers. @@ -165,82 +162,27 @@ impl Discv5 { // // 1. make local enr from listen config // + let (enr, bc_enr, fork_key, rlpx_ip_mode) = build_local_enr(sk, &discv5_config); + + trace!(target: "net::discv5", + ?enr, + "local ENR" + ); + + // + // 2. start discv5 + // let Config { discv5_config, bootstrap_nodes, - fork, - tcp_port, - other_enr_data, lookup_interval, + bootstrap_lookup_interval, + bootstrap_lookup_countdown, discovered_peer_filter, + .. } = discv5_config; - let (enr, bc_enr, ip_mode, fork_id_key) = { - let mut builder = discv5::enr::Enr::builder(); - - let (ip_mode, socket) = match discv5_config.listen_config { - ListenConfig::Ipv4 { ip, port } => { - if ip != Ipv4Addr::UNSPECIFIED { - builder.ip4(ip); - } - builder.udp4(port); - builder.tcp4(tcp_port); - - (IpMode::Ip4, (ip, port).into()) - } - ListenConfig::Ipv6 { ip, port } => { - if ip != Ipv6Addr::UNSPECIFIED { - builder.ip6(ip); - } - builder.udp6(port); - builder.tcp6(tcp_port); - - (IpMode::Ip6, (ip, port).into()) - } - ListenConfig::DualStack { ipv4, ipv4_port, ipv6, ipv6_port } => { - if ipv4 != Ipv4Addr::UNSPECIFIED { - builder.ip4(ipv4); - } - builder.udp4(ipv4_port); - builder.tcp4(tcp_port); - - if ipv6 != Ipv6Addr::UNSPECIFIED { - builder.ip6(ipv6); - } - builder.udp6(ipv6_port); - - (IpMode::DualStack, (ipv6, ipv6_port).into()) - } - }; - - // add fork id - let (chain, fork_id) = fork; - builder.add_value_rlp(chain, alloy_rlp::encode(fork_id).into()); - - // add other data - for (key, value) in other_enr_data { - builder.add_value_rlp(key, alloy_rlp::encode(value).into()); - } - - // enr v4 not to get confused with discv4, independent versioning enr and - // discovery - let enr = builder.build(sk).expect("should build enr v4"); - let EnrCombinedKeyWrapper(enr) = enr.into(); - - trace!(target: "net::discv5", - ?enr, - "local ENR" - ); - - // backwards compatible enr - let bc_enr = NodeRecord::from_secret_key(socket, sk); - - (enr, bc_enr, ip_mode, chain) - }; - - // - // 2. start discv5 - // + let EnrCombinedKeyWrapper(enr) = enr.into(); let sk = discv5::enr::CombinedKey::secp256k1_from_bytes(&mut sk.secret_bytes()).unwrap(); let mut discv5 = match discv5::Discv5::new(enr, sk, discv5_config) { Ok(discv5) => discv5, @@ -256,125 +198,32 @@ impl Discv5 { // // 3. add boot nodes // - Self::bootstrap(bootstrap_nodes, &discv5).await?; + bootstrap(bootstrap_nodes, &discv5).await?; let metrics = Discv5Metrics::default(); // - // 4. bg kbuckets maintenance + // 4. start bg kbuckets maintenance // - Self::spawn_populate_kbuckets_bg(lookup_interval, metrics.clone(), discv5.clone()); + spawn_populate_kbuckets_bg( + lookup_interval, + bootstrap_lookup_interval, + bootstrap_lookup_countdown, + metrics.clone(), + discv5.clone(), + ); Ok(( - Self { discv5, ip_mode, fork_id_key, discovered_peer_filter, metrics }, + Self { discv5, rlpx_ip_mode, fork_key, discovered_peer_filter, metrics }, discv5_updates, bc_enr, )) } - /// Bootstraps underlying [`discv5::Discv5`] node with configured peers. - async fn bootstrap( - bootstrap_nodes: HashSet, - discv5: &Arc, - ) -> Result<(), Error> { - trace!(target: "net::discv5", - ?bootstrap_nodes, - "adding bootstrap nodes .." - ); - - let mut enr_requests = vec![]; - for node in bootstrap_nodes { - match node { - BootNode::Enr(node) => { - if let Err(err) = discv5.add_enr(node) { - return Err(Error::AddNodeFailed(err)) - } - } - BootNode::Enode(enode) => { - let discv5 = discv5.clone(); - enr_requests.push(async move { - if let Err(err) = discv5.request_enr(enode.to_string()).await { - debug!(target: "net::discv5", - ?enode, - %err, - "failed adding boot node" - ); - } - }) - } - } - } - - // If a session is established, the ENR is added straight away to discv5 kbuckets - Ok(_ = join_all(enr_requests).await) - } - - /// Backgrounds regular look up queries, in order to keep kbuckets populated. - fn spawn_populate_kbuckets_bg( - lookup_interval: u64, - metrics: Discv5Metrics, - discv5: Arc, - ) { - task::spawn({ - let local_node_id = discv5.local_enr().node_id(); - let lookup_interval = Duration::from_secs(lookup_interval); - let metrics = metrics.discovered_peers; - let mut kbucket_index = MAX_KBUCKET_INDEX; - let pulse_lookup_interval = Duration::from_secs(DEFAULT_SECONDS_PULSE_LOOKUP_INTERVAL); - // todo: graceful shutdown - - async move { - // make many fast lookup queries at bootstrap, trying to fill kbuckets at furthest - // log2distance from local node - for i in (0..DEFAULT_COUNT_PULSE_LOOKUPS_AT_BOOTSTRAP).rev() { - let target = discv5::enr::NodeId::random(); - - trace!(target: "net::discv5", - %target, - bootstrap_boost_runs_count_down=i, - lookup_interval=format!("{:#?}", pulse_lookup_interval), - "starting bootstrap boost lookup query" - ); - - lookup(target, &discv5, &metrics).await; - - tokio::time::sleep(pulse_lookup_interval).await; - } - - // initiate regular lookups to populate kbuckets - loop { - // make sure node is connected to each subtree in the network by target - // selection (ref kademlia) - let target = get_lookup_target(kbucket_index, local_node_id); - - trace!(target: "net::discv5", - %target, - lookup_interval=format!("{:#?}", lookup_interval), - "starting periodic lookup query" - ); - - lookup(target, &discv5, &metrics).await; - - if kbucket_index > DEFAULT_MIN_TARGET_KBUCKET_INDEX { - // try to populate bucket one step closer - kbucket_index -= 1 - } else { - // start over with bucket furthest away - kbucket_index = MAX_KBUCKET_INDEX - } - - tokio::time::sleep(lookup_interval).await; - } - } - }); - } - /// Process an event from the underlying [`discv5::Discv5`] node. - pub fn on_discv5_update(&mut self, update: discv5::Event) -> Option { + pub fn on_discv5_update(&self, update: discv5::Event) -> Option { match update { discv5::Event::SocketUpdated(_) | discv5::Event::TalkRequest(_) | - // `EnrAdded` not used in discv5 codebase - discv5::Event::EnrAdded { .. } | // `Discovered` not unique discovered peers discv5::Event::Discovered(_) => None, discv5::Event::NodeInserted { replaced: _, .. } => { @@ -388,31 +237,62 @@ impl Discv5 { None } discv5::Event::SessionEstablished(enr, remote_socket) => { - // covers `reth_discv4::DiscoveryUpdate` equivalents `DiscoveryUpdate::Added(_)` - // and `DiscoveryUpdate::DiscoveredAtCapacity(_) + // this branch is semantically similar to branches of + // `reth_discv4::DiscoveryUpdate`: `DiscoveryUpdate::Added(_)` and + // `DiscoveryUpdate::DiscoveredAtCapacity(_) // peer has been discovered as part of query, or, by incoming session (peer has // discovered us) - self.metrics.discovered_peers_advertised_networks.increment_once_by_network_type(&enr); - self.metrics.discovered_peers.increment_established_sessions_raw(1); self.on_discovered_peer(&enr, remote_socket) } + discv5::Event::UnverifiableEnr { + enr, + socket, + node_id: _, + } => { + // this branch is semantically similar to branches of + // `reth_discv4::DiscoveryUpdate`: `DiscoveryUpdate::Added(_)` and + // `DiscoveryUpdate::DiscoveredAtCapacity(_) + + // peer has been discovered as part of query, or, by an outgoing session (but peer + // is behind NAT and responds from a different socket) + + // NOTE: `discv5::Discv5` won't initiate a session with any peer with an + // unverifiable node record, for example one that advertises a reserved LAN IP + // address on a WAN network. This is in order to prevent DoS attacks, where some + // malicious peers may advertise a victim's socket. We will still try and connect + // to them over RLPx, to be compatible with EL discv5 implementations that don't + // enforce this security measure. + + trace!(target: "net::discv5", + ?enr, + %socket, + "discovered unverifiable enr, source socket doesn't match socket advertised in ENR" + ); + + self.metrics.discovered_peers.increment_unverifiable_enrs_raw_total(1); + + self.on_discovered_peer(&enr, socket) + } + _ => None } } /// Processes a discovered peer. Returns `true` if peer is added to - fn on_discovered_peer( - &mut self, + pub fn on_discovered_peer( + &self, enr: &discv5::Enr, socket: SocketAddr, ) -> Option { + self.metrics.discovered_peers_advertised_networks.increment_once_by_network_type(enr); + let node_record = match self.try_into_reachable(enr, socket) { Ok(enr_bc) => enr_bc, Err(err) => { - trace!(target: "net::discovery::discv5", + trace!(target: "net::discv5", %err, ?enr, "discovered peer is unreachable" @@ -423,22 +303,24 @@ impl Discv5 { return None } }; - let fork_id = match self.filter_discovered_peer(enr) { - FilterOutcome::Ok => self.get_fork_id(enr).ok(), - FilterOutcome::Ignore { reason } => { - trace!(target: "net::discovery::discv5", - ?enr, - reason, - "filtered out discovered peer" - ); + if let FilterOutcome::Ignore { reason } = self.filter_discovered_peer(enr) { + trace!(target: "net::discv5", + ?enr, + reason, + "filtered out discovered peer" + ); - self.metrics.discovered_peers.increment_established_sessions_filtered(1); + self.metrics.discovered_peers.increment_established_sessions_filtered(1); - return None - } - }; + return None + } + + // todo: extend for all network stacks in reth-network rlpx logic + let fork_id = (self.fork_key == Some(NetworkStackId::ETH)) + .then(|| self.get_fork_id(enr).ok()) + .flatten(); - trace!(target: "net::discovery::discv5", + trace!(target: "net::discv5", ?fork_id, ?enr, "discovered peer" @@ -448,47 +330,47 @@ impl Discv5 { } /// Tries to convert an [`Enr`](discv5::Enr) into the backwards compatible type [`NodeRecord`], - /// w.r.t. local [`IpMode`]. Tries the socket from which the ENR was sent, if socket is missing - /// from ENR. - /// - /// Note: [`discv5::Discv5`] won't initiate a session with any peer with a malformed node - /// record, that advertises a reserved IP address on a WAN network. - fn try_into_reachable( + /// w.r.t. local RLPx [`IpMode`]. Uses source socket as udp socket. + pub fn try_into_reachable( &self, enr: &discv5::Enr, socket: SocketAddr, ) -> Result { let id = enr_to_discv4_id(enr).ok_or(Error::IncompatibleKeyType)?; - let udp_socket = self.ip_mode().get_contactable_addr(enr).unwrap_or(socket); - - // since we, on bootstrap, set tcp4 in local ENR for `IpMode::Dual`, we prefer tcp4 here - // too - let Some(tcp_port) = (match self.ip_mode() { - IpMode::Ip4 | IpMode::DualStack => enr.tcp4(), + if enr.tcp4().is_none() && enr.tcp6().is_none() { + return Err(Error::UnreachableRlpx) + } + let Some(tcp_port) = (match self.rlpx_ip_mode { + IpMode::Ip4 => enr.tcp4(), IpMode::Ip6 => enr.tcp6(), + _ => unimplemented!("dual-stack support not implemented for rlpx"), }) else { - return Err(Error::IpVersionMismatchRlpx(self.ip_mode())) + return Err(Error::IpVersionMismatchRlpx(self.rlpx_ip_mode)) }; - Ok(NodeRecord { address: udp_socket.ip(), tcp_port, udp_port: udp_socket.port(), id }) + Ok(NodeRecord { address: socket.ip(), tcp_port, udp_port: socket.port(), id }) } /// Applies filtering rules on an ENR. Returns [`Ok`](FilterOutcome::Ok) if peer should be /// passed up to app, and [`Ignore`](FilterOutcome::Ignore) if peer should instead be dropped. - fn filter_discovered_peer(&self, enr: &discv5::Enr) -> FilterOutcome { + pub fn filter_discovered_peer(&self, enr: &discv5::Enr) -> FilterOutcome { self.discovered_peer_filter.filter(enr) } - /// Returns the [`ForkId`] of the given [`Enr`](discv5::Enr), if field is set. - fn get_fork_id( + /// Returns the [`ForkId`] of the given [`Enr`](discv5::Enr) w.r.t. the local node's network + /// stack, if field is set. + pub fn get_fork_id( &self, enr: &discv5::enr::Enr, ) -> Result { - let key = self.fork_id_key; - let mut fork_id_bytes = enr.get_raw_rlp(key).ok_or(Error::ForkMissing(key))?; + let Some(key) = self.fork_key else { return Err(Error::NetworkStackIdNotConfigured) }; + let fork_id = enr + .get_decodable::(key) + .ok_or(Error::ForkMissing(key))? + .map(Into::into)?; - Ok(ForkId::decode(&mut fork_id_bytes)?) + Ok(fork_id) } //////////////////////////////////////////////////////////////////////////////////////////////// @@ -507,14 +389,14 @@ impl Discv5 { // Complementary //////////////////////////////////////////////////////////////////////////////////////////////// - /// Returns the [`IpMode`] of the local node. + /// Returns the RLPx [`IpMode`] of the local node. pub fn ip_mode(&self) -> IpMode { - self.ip_mode + self.rlpx_ip_mode } /// Returns the key to use to identify the [`ForkId`] kv-pair on the [`Enr`](discv5::Enr). - pub fn fork_id_key(&self) -> &[u8] { - self.fork_id_key + pub fn fork_key(&self) -> Option<&[u8]> { + self.fork_key } } @@ -533,6 +415,172 @@ pub struct DiscoveredPeer { pub fork_id: Option, } +/// Builds the local ENR with the supplied key. +pub fn build_local_enr( + sk: &SecretKey, + config: &Config, +) -> (Enr, NodeRecord, Option<&'static [u8]>, IpMode) { + let mut builder = discv5::enr::Enr::builder(); + + let Config { discv5_config, fork, tcp_socket, other_enr_kv_pairs, .. } = config; + + let socket = match discv5_config.listen_config { + ListenConfig::Ipv4 { ip, port } => { + if ip != Ipv4Addr::UNSPECIFIED { + builder.ip4(ip); + } + builder.udp4(port); + builder.tcp4(tcp_socket.port()); + + (ip, port).into() + } + ListenConfig::Ipv6 { ip, port } => { + if ip != Ipv6Addr::UNSPECIFIED { + builder.ip6(ip); + } + builder.udp6(port); + builder.tcp6(tcp_socket.port()); + + (ip, port).into() + } + ListenConfig::DualStack { ipv4, ipv4_port, ipv6, ipv6_port } => { + if ipv4 != Ipv4Addr::UNSPECIFIED { + builder.ip4(ipv4); + } + builder.udp4(ipv4_port); + builder.tcp4(tcp_socket.port()); + + if ipv6 != Ipv6Addr::UNSPECIFIED { + builder.ip6(ipv6); + } + builder.udp6(ipv6_port); + + (ipv6, ipv6_port).into() + } + }; + + let rlpx_ip_mode = if tcp_socket.is_ipv4() { IpMode::Ip4 } else { IpMode::Ip6 }; + + // identifies which network node is on + let network_stack_id = fork.as_ref().map(|(network_stack_id, fork_value)| { + builder.add_value_rlp(network_stack_id, alloy_rlp::encode(fork_value).into()); + *network_stack_id + }); + + // add other data + for (key, value) in other_enr_kv_pairs { + builder.add_value_rlp(key, value.clone().into()); + } + + // enr v4 not to get confused with discv4, independent versioning enr and + // discovery + let enr = builder.build(sk).expect("should build enr v4"); + + // backwards compatible enr + let bc_enr = NodeRecord::from_secret_key(socket, sk); + + (enr, bc_enr, network_stack_id, rlpx_ip_mode) +} + +/// Bootstraps underlying [`discv5::Discv5`] node with configured peers. +pub async fn bootstrap( + bootstrap_nodes: HashSet, + discv5: &Arc, +) -> Result<(), Error> { + trace!(target: "net::discv5", + ?bootstrap_nodes, + "adding bootstrap nodes .." + ); + + let mut enr_requests = vec![]; + for node in bootstrap_nodes { + match node { + BootNode::Enr(node) => { + if let Err(err) = discv5.add_enr(node) { + return Err(Error::AddNodeFailed(err)) + } + } + BootNode::Enode(enode) => { + let discv5 = discv5.clone(); + enr_requests.push(async move { + if let Err(err) = discv5.request_enr(enode.to_string()).await { + debug!(target: "net::discv5", + ?enode, + %err, + "failed adding boot node" + ); + } + }) + } + } + } + + // If a session is established, the ENR is added straight away to discv5 kbuckets + Ok(_ = join_all(enr_requests).await) +} + +/// Backgrounds regular look up queries, in order to keep kbuckets populated. +pub fn spawn_populate_kbuckets_bg( + lookup_interval: u64, + bootstrap_lookup_interval: u64, + bootstrap_lookup_countdown: u64, + metrics: Discv5Metrics, + discv5: Arc, +) { + task::spawn({ + let local_node_id = discv5.local_enr().node_id(); + let lookup_interval = Duration::from_secs(lookup_interval); + let metrics = metrics.discovered_peers; + let mut kbucket_index = MAX_KBUCKET_INDEX; + let pulse_lookup_interval = Duration::from_secs(bootstrap_lookup_interval); + // todo: graceful shutdown + + async move { + // make many fast lookup queries at bootstrap, trying to fill kbuckets at furthest + // log2distance from local node + for i in (0..bootstrap_lookup_countdown).rev() { + let target = discv5::enr::NodeId::random(); + + trace!(target: "net::discv5", + %target, + bootstrap_boost_runs_countdown=i, + lookup_interval=format!("{:#?}", pulse_lookup_interval), + "starting bootstrap boost lookup query" + ); + + lookup(target, &discv5, &metrics).await; + + tokio::time::sleep(pulse_lookup_interval).await; + } + + // initiate regular lookups to populate kbuckets + loop { + // make sure node is connected to each subtree in the network by target + // selection (ref kademlia) + let target = get_lookup_target(kbucket_index, local_node_id); + + trace!(target: "net::discv5", + %target, + lookup_interval=format!("{:#?}", lookup_interval), + "starting periodic lookup query" + ); + + lookup(target, &discv5, &metrics).await; + + if kbucket_index > DEFAULT_MIN_TARGET_KBUCKET_INDEX { + // try to populate bucket one step closer + kbucket_index -= 1 + } else { + // start over with bucket furthest away + kbucket_index = MAX_KBUCKET_INDEX + } + + tokio::time::sleep(lookup_interval).await; + } + } + }); +} + /// Gets the next lookup target, based on which bucket is currently being targeted. pub fn get_lookup_target( kbucket_index: usize, @@ -600,9 +648,10 @@ pub async fn lookup( } #[cfg(test)] -mod tests { +mod test { use super::*; use ::enr::{CombinedKey, EnrKey}; + use reth_primitives::MAINNET; use secp256k1::rand::thread_rng; use tracing::trace; @@ -617,8 +666,8 @@ mod tests { ) .unwrap(), ), - ip_mode: IpMode::Ip4, - fork_id_key: b"noop", + rlpx_ip_mode: IpMode::Ip4, + fork_key: None, discovered_peer_filter: MustNotIncludeKeys::default(), metrics: Discv5Metrics::default(), } @@ -630,9 +679,10 @@ mod tests { let secret_key = SecretKey::new(&mut thread_rng()); let discv5_addr: SocketAddr = format!("127.0.0.1:{udp_port_discv5}").parse().unwrap(); + let rlpx_addr: SocketAddr = "127.0.0.1:30303".parse().unwrap(); let discv5_listen_config = ListenConfig::from(discv5_addr); - let discv5_config = Config::builder(30303) + let discv5_config = Config::builder(rlpx_addr) .discv5_config(discv5::ConfigBuilder::new(discv5_listen_config).build()) .build(); @@ -653,7 +703,7 @@ mod tests { let (node_2, mut stream_2, _) = start_discovery_node(30355).await; let node_2_enr = node_2.with_discv5(|discv5| discv5.local_enr()); - trace!(target: "net::discovery::tests", + trace!(target: "net::discv5::test", node_1_node_id=format!("{:#}", node_1_enr.node_id()), node_2_node_id=format!("{:#}", node_2_enr.node_id()), "started nodes" @@ -704,7 +754,7 @@ mod tests { let remote_key = CombinedKey::generate_secp256k1(); let remote_enr = Enr::builder().tcp4(REMOTE_RLPX_PORT).build(&remote_key).unwrap(); - let mut discv5 = discv5_noop(); + let discv5 = discv5_noop(); // test let filtered_peer = discv5.on_discovered_peer(&remote_enr, remote_socket); @@ -818,4 +868,26 @@ mod tests { assert_eq!(local_node_id.log2_distance(&target), Some(bucket_index as u64 + 1)); } } + + #[test] + fn build_enr_from_config() { + const TCP_PORT: u16 = 30303; + let fork_id = MAINNET.latest_fork_id(); + + let config = Config::builder((Ipv4Addr::UNSPECIFIED, TCP_PORT).into()) + .fork(NetworkStackId::ETH, fork_id) + .build(); + + let sk = SecretKey::new(&mut thread_rng()); + let (enr, _, _, _) = build_local_enr(&sk, &config); + + let decoded_fork_id = enr + .get_decodable::(NetworkStackId::ETH) + .unwrap() + .map(Into::into) + .unwrap(); + + assert_eq!(fork_id, decoded_fork_id); + assert_eq!(TCP_PORT, enr.tcp4().unwrap()); // listen config is defaulting to ip mode ipv4 + } } diff --git a/crates/net/discv5/src/metrics.rs b/crates/net/discv5/src/metrics.rs index 72ea5fc0e..d58ed66e0 100644 --- a/crates/net/discv5/src/metrics.rs +++ b/crates/net/discv5/src/metrics.rs @@ -2,7 +2,7 @@ use metrics::{Counter, Gauge}; use reth_metrics::Metrics; -use crate::config::{ETH, ETH2, OPSTACK}; +use crate::NetworkStackId; /// Information tracked by [`Discv5`](crate::Discv5). #[derive(Debug, Default, Clone)] @@ -21,7 +21,7 @@ pub struct DiscoveredPeersMetrics { // Kbuckets //////////////////////////////////////////////////////////////////////////////////////////////// /// Total peers currently in [`discv5::Discv5`]'s kbuckets. - total_kbucket_peers_raw: Gauge, + kbucket_peers_raw_total: Gauge, /// Total discovered peers that are inserted into [`discv5::Discv5`]'s kbuckets. /// /// This is a subset of the total established sessions, in which all peers advertise a udp @@ -29,58 +29,72 @@ pub struct DiscoveredPeersMetrics { /// it into [`discv5::Discv5`]'s kbuckets and will hence be included in queries. /// /// Note: the definition of 'discovered' is not exactly synonymous in `reth_discv4::Discv4`. - total_inserted_kbucket_peers_raw: Counter, + inserted_kbucket_peers_raw_total: Counter, //////////////////////////////////////////////////////////////////////////////////////////////// // Sessions //////////////////////////////////////////////////////////////////////////////////////////////// /// Total peers currently connected to [`discv5::Discv5`]. - total_sessions_raw: Gauge, + sessions_raw_total: Gauge, /// Total number of sessions established by [`discv5::Discv5`]. - total_established_sessions_raw: Counter, + established_sessions_raw_total: Counter, /// Total number of sessions established by [`discv5::Discv5`], with peers that don't advertise /// a socket which is reachable from the local node in their node record. /// /// These peers can't make it into [`discv5::Discv5`]'s kbuckets, and hence won't be part of /// queries (neither shared with peers in NODES responses, nor queried for peers with FINDNODE /// requests). - total_established_sessions_unreachable_enr: Counter, + established_sessions_unreachable_enr_total: Counter, /// Total number of sessions established by [`discv5::Discv5`], that pass configured /// [`filter`](crate::filter) rules. - total_established_sessions_custom_filtered: Counter, + established_sessions_custom_filtered_total: Counter, + /// Total number of unverifiable ENRs discovered by [`discv5::Discv5`]. + /// + /// These are peers that fail [`discv5::Discv5`] session establishment, because the UDP socket + /// they're making a connection from doesn't match the UDP socket advertised in their ENR. + /// These peers will be denied a session (and hence can't make it into kbuckets) until they + /// have update their ENR, to reflect their actual UDP socket. + unverifiable_enrs_raw_total: Counter, } impl DiscoveredPeersMetrics { /// Sets current total number of peers in [`discv5::Discv5`]'s kbuckets. pub fn set_total_kbucket_peers(&self, num: usize) { - self.total_kbucket_peers_raw.set(num as f64) + self.kbucket_peers_raw_total.set(num as f64) } /// Increments the number of kbucket insertions in [`discv5::Discv5`]. pub fn increment_kbucket_insertions(&self, num: u64) { - self.total_inserted_kbucket_peers_raw.increment(num) + self.inserted_kbucket_peers_raw_total.increment(num) } /// Sets current total number of peers connected to [`discv5::Discv5`]. pub fn set_total_sessions(&self, num: usize) { - self.total_sessions_raw.set(num as f64) + self.sessions_raw_total.set(num as f64) } /// Increments number of sessions established by [`discv5::Discv5`]. pub fn increment_established_sessions_raw(&self, num: u64) { - self.total_established_sessions_raw.increment(num) + self.established_sessions_raw_total.increment(num) } /// Increments number of sessions established by [`discv5::Discv5`], with peers that don't have /// a reachable node record. pub fn increment_established_sessions_unreachable_enr(&self, num: u64) { - self.total_established_sessions_unreachable_enr.increment(num) + self.established_sessions_unreachable_enr_total.increment(num) } /// Increments number of sessions established by [`discv5::Discv5`], that pass configured /// [`filter`](crate::filter) rules. pub fn increment_established_sessions_filtered(&self, num: u64) { - self.total_established_sessions_custom_filtered.increment(num) + self.established_sessions_custom_filtered_total.increment(num) + } + + /// Increments number of unverifiable ENRs discovered by [`discv5::Discv5`]. These are peers + /// that fail session establishment because their advertised UDP socket doesn't match the + /// socket they are making the connection from. + pub fn increment_unverifiable_enrs_raw_total(&self, num: u64) { + self.unverifiable_enrs_raw_total.increment(num) } } @@ -91,26 +105,34 @@ impl DiscoveredPeersMetrics { #[derive(Metrics, Clone)] #[metrics(scope = "discv5")] pub struct AdvertisedChainMetrics { - /// Frequency of node records with a kv-pair with [`OPSTACK`] as key. + /// Frequency of node records with a kv-pair with [`OPEL`](NetworkStackId::OPEL) as + /// key. + opel: Counter, + + /// Frequency of node records with a kv-pair with [`OPSTACK`](NetworkStackId::OPSTACK) as + /// key. opstack: Counter, - /// Frequency of node records with a kv-pair with [`ETH`] as key. + /// Frequency of node records with a kv-pair with [`ETH`](NetworkStackId::ETH) as key. eth: Counter, - /// Frequency of node records with a kv-pair with [`ETH2`] as key. + /// Frequency of node records with a kv-pair with [`ETH2`](NetworkStackId::ETH2) as key. eth2: Counter, } impl AdvertisedChainMetrics { - /// Counts each recognised network type that is advertised on node record, once. + /// Counts each recognised network stack type that is advertised on node record, once. pub fn increment_once_by_network_type(&self, enr: &discv5::Enr) { - if enr.get_raw_rlp(OPSTACK).is_some() { + if enr.get_raw_rlp(NetworkStackId::OPEL).is_some() { + self.opel.increment(1u64) + } + if enr.get_raw_rlp(NetworkStackId::OPSTACK).is_some() { self.opstack.increment(1u64) } - if enr.get_raw_rlp(ETH).is_some() { + if enr.get_raw_rlp(NetworkStackId::ETH).is_some() { self.eth.increment(1u64) } - if enr.get_raw_rlp(ETH2).is_some() { + if enr.get_raw_rlp(NetworkStackId::ETH2).is_some() { self.eth2.increment(1u64) } } diff --git a/crates/net/discv5/src/network_stack_id.rs b/crates/net/discv5/src/network_stack_id.rs new file mode 100644 index 000000000..7bfeff517 --- /dev/null +++ b/crates/net/discv5/src/network_stack_id.rs @@ -0,0 +1,33 @@ +//! Keys of ENR [`ForkId`](reth_primitives::ForkId) kv-pair. Identifies which network stack a node +//! belongs to. + +use reth_primitives::ChainSpec; + +/// Identifies which Ethereum network stack a node belongs to, on the discovery network. +#[derive(Debug)] +pub struct NetworkStackId; + +impl NetworkStackId { + /// ENR fork ID kv-pair key, for an Ethereum L1 EL node. + pub const ETH: &'static [u8] = b"eth"; + + /// ENR fork ID kv-pair key, for an Ethereum L1 CL node. + pub const ETH2: &'static [u8] = b"eth2"; + + /// ENR fork ID kv-pair key, for an Optimism EL node. + pub const OPEL: &'static [u8] = b"opel"; + + /// ENR fork ID kv-pair key, for an Optimism CL node. + pub const OPSTACK: &'static [u8] = b"opstack"; + + /// Returns the [`NetworkStackId`] that matches the given [`ChainSpec`]. + pub fn id(chain: &ChainSpec) -> Option<&'static [u8]> { + if chain.is_optimism() { + return Some(Self::OPEL) + } else if chain.is_eth() { + return Some(Self::ETH) + } + + None + } +} diff --git a/crates/net/dns/Cargo.toml b/crates/net/dns/Cargo.toml index 003a6cad7..8076bd4e1 100644 --- a/crates/net/dns/Cargo.toml +++ b/crates/net/dns/Cargo.toml @@ -15,11 +15,11 @@ workspace = true # reth reth-primitives.workspace = true reth-net-common.workspace = true +reth-network-types.workspace = true # ethereum -alloy-rlp.workspace = true secp256k1 = { workspace = true, features = ["global-context", "rand-std", "recovery", "serde"] } -enr = { workspace = true, default-features = false, features = ["rust-secp256k1"] } +enr.workspace = true # async/futures tokio = { workspace = true, features = ["io-util", "net", "time"] } @@ -39,8 +39,10 @@ serde = { workspace = true, optional = true } serde_with = { version = "3.3.0", optional = true } [dev-dependencies] +alloy-rlp.workspace = true tokio = { workspace = true, features = ["sync", "rt", "rt-multi-thread"] } reth-tracing.workspace = true +rand.workspace = true [features] default = ["serde"] diff --git a/crates/net/dns/src/lib.rs b/crates/net/dns/src/lib.rs index 6db9c9ee2..5000e524e 100644 --- a/crates/net/dns/src/lib.rs +++ b/crates/net/dns/src/lib.rs @@ -22,7 +22,8 @@ use crate::{ pub use config::DnsDiscoveryConfig; use enr::Enr; use error::ParseDnsEntryError; -use reth_primitives::{pk2id, ForkId, NodeRecord}; +use reth_network_types::pk2id; +use reth_primitives::{EnrForkIdEntry, ForkId, NodeRecord}; use schnellru::{ByLength, LruMap}; use secp256k1::SecretKey; use std::{ @@ -66,13 +67,13 @@ pub struct DnsDiscoveryHandle { impl DnsDiscoveryHandle { /// Starts syncing the given link to a tree. - pub fn sync_tree(&mut self, link: &str) -> Result<(), ParseDnsEntryError> { + pub fn sync_tree(&self, link: &str) -> Result<(), ParseDnsEntryError> { self.sync_tree_with_link(link.parse()?); Ok(()) } /// Starts syncing the given link to a tree. - pub fn sync_tree_with_link(&mut self, link: LinkEntry) { + pub fn sync_tree_with_link(&self, link: LinkEntry) { let _ = self.to_service.send(DnsDiscoveryCommand::SyncTree(link)); } @@ -392,8 +393,6 @@ pub enum DnsDiscoveryEvent { /// Converts an [Enr] into a [NodeRecord] fn convert_enr_node_record(enr: &Enr) -> Option { - use alloy_rlp::Decodable; - let node_record = NodeRecord { address: enr.ip4().map(IpAddr::from).or_else(|| enr.ip6().map(IpAddr::from))?, tcp_port: enr.tcp4().or_else(|| enr.tcp6())?, @@ -402,8 +401,8 @@ fn convert_enr_node_record(enr: &Enr) -> Option } .into_ipv4_mapped(); - let mut maybe_fork_id = enr.get(b"eth")?; - let fork_id = ForkId::decode(&mut maybe_fork_id).ok(); + let fork_id = + enr.get_decodable::(b"eth").transpose().ok().flatten().map(Into::into); Some(DnsNodeRecordUpdate { node_record, fork_id, enr: enr.clone() }) } @@ -412,12 +411,63 @@ fn convert_enr_node_record(enr: &Enr) -> Option mod tests { use super::*; use crate::tree::TreeRootEntry; - use alloy_rlp::Encodable; + use alloy_rlp::{Decodable, Encodable}; use enr::EnrKey; - use reth_primitives::{Chain, Hardfork, MAINNET}; + use reth_primitives::{Chain, ForkHash, Hardfork, MAINNET}; use secp256k1::rand::thread_rng; use std::{future::poll_fn, net::Ipv4Addr}; + #[test] + fn test_convert_enr_node_record() { + // rig + let secret_key = SecretKey::new(&mut secp256k1::rand::thread_rng()); + let enr = Enr::builder() + .ip("127.0.0.1".parse().unwrap()) + .udp4(9000) + .tcp4(30303) + .add_value(b"eth", &EnrForkIdEntry::from(MAINNET.latest_fork_id())) + .build(&secret_key) + .unwrap(); + + // test + let node_record_update = convert_enr_node_record(&enr).unwrap(); + + assert_eq!(node_record_update.node_record.address, "127.0.0.1".parse::().unwrap()); + assert_eq!(node_record_update.node_record.tcp_port, 30303); + assert_eq!(node_record_update.node_record.udp_port, 9000); + assert_eq!(node_record_update.fork_id, Some(MAINNET.latest_fork_id())); + assert_eq!(node_record_update.enr, enr); + } + + #[test] + fn test_decode_and_convert_enr_node_record() { + // rig + + let secret_key = SecretKey::new(&mut secp256k1::rand::thread_rng()); + let enr = Enr::builder() + .ip("127.0.0.1".parse().unwrap()) + .udp4(9000) + .tcp4(30303) + .add_value(b"eth", &EnrForkIdEntry::from(MAINNET.latest_fork_id())) + .add_value(b"opstack", &ForkId { hash: ForkHash(rand::random()), next: rand::random() }) + .build(&secret_key) + .unwrap(); + + let mut encoded_enr = vec![]; + enr.encode(&mut encoded_enr); + + // test + let decoded_enr = Enr::decode(&mut &encoded_enr[..]).unwrap(); + + let node_record_update = convert_enr_node_record(&decoded_enr).unwrap(); + + assert_eq!(node_record_update.node_record.address, "127.0.0.1".parse::().unwrap()); + assert_eq!(node_record_update.node_record.tcp_port, 30303); + assert_eq!(node_record_update.node_record.udp_port, 9000); + assert_eq!(node_record_update.fork_id, Some(MAINNET.latest_fork_id())); + assert_eq!(node_record_update.enr, enr); + } + #[tokio::test] async fn test_start_root_sync() { reth_tracing::init_test_tracing(); @@ -461,10 +511,12 @@ mod tests { resolver.insert(link.domain.clone(), root.to_string()); let mut builder = Enr::builder(); - let mut buf = Vec::new(); let fork_id = MAINNET.hardfork_fork_id(Hardfork::Frontier).unwrap(); - fork_id.encode(&mut buf); - builder.ip4(Ipv4Addr::LOCALHOST).udp4(30303).tcp4(30303).add_value(b"eth", &buf); + builder + .ip4(Ipv4Addr::LOCALHOST) + .udp4(30303) + .tcp4(30303) + .add_value(b"eth", &EnrForkIdEntry::from(fork_id)); let enr = builder.build(&secret_key).unwrap(); resolver.insert(format!("{}.{}", root.enr_root.clone(), link.domain), enr.to_base64()); diff --git a/crates/net/dns/src/tree.rs b/crates/net/dns/src/tree.rs index 53220f694..614d5f1d2 100644 --- a/crates/net/dns/src/tree.rs +++ b/crates/net/dns/src/tree.rs @@ -22,7 +22,7 @@ use crate::error::{ ParseEntryResult, }; use data_encoding::{BASE32_NOPAD, BASE64URL_NOPAD}; -use enr::{Enr, EnrError, EnrKey, EnrKeyUnambiguous, EnrPublicKey}; +use enr::{Enr, EnrKey, EnrKeyUnambiguous, EnrPublicKey, Error as EnrError}; use reth_primitives::{hex, Bytes}; use secp256k1::SecretKey; #[cfg(feature = "serde")] diff --git a/crates/net/downloaders/Cargo.toml b/crates/net/downloaders/Cargo.toml index 7ae6db8e6..353956d3b 100644 --- a/crates/net/downloaders/Cargo.toml +++ b/crates/net/downloaders/Cargo.toml @@ -18,6 +18,8 @@ reth-primitives.workspace = true reth-tasks.workspace = true reth-provider.workspace = true reth-config.workspace = true +reth-consensus.workspace = true +reth-network-types.workspace = true # async futures.workspace = true @@ -44,6 +46,7 @@ itertools.workspace = true [dev-dependencies] reth-db = { workspace = true, features = ["test-utils"] } +reth-consensus = { workspace = true, features = ["test-utils"] } reth-interfaces = { workspace = true, features = ["test-utils"] } reth-provider = { workspace = true, features = ["test-utils"] } reth-tracing.workspace = true @@ -57,5 +60,5 @@ rand.workspace = true tempfile.workspace = true [features] -test-utils = ["dep:tempfile", "reth-db/test-utils", "reth-interfaces/test-utils"] +test-utils = ["dep:tempfile", "reth-db/test-utils", "reth-consensus/test-utils", "reth-interfaces/test-utils"] diff --git a/crates/net/downloaders/src/bodies/bodies.rs b/crates/net/downloaders/src/bodies/bodies.rs index d45c9b191..8f97e09c7 100644 --- a/crates/net/downloaders/src/bodies/bodies.rs +++ b/crates/net/downloaders/src/bodies/bodies.rs @@ -3,16 +3,14 @@ use crate::{bodies::task::TaskDownloader, metrics::BodyDownloaderMetrics}; use futures::Stream; use futures_util::StreamExt; use reth_config::BodiesConfig; -use reth_interfaces::{ - consensus::Consensus, - p2p::{ - bodies::{ - client::BodiesClient, - downloader::{BodyDownloader, BodyDownloaderResult}, - response::BlockResponse, - }, - error::{DownloadError, DownloadResult}, +use reth_consensus::Consensus; +use reth_interfaces::p2p::{ + bodies::{ + client::BodiesClient, + downloader::{BodyDownloader, BodyDownloaderResult}, + response::BlockResponse, }, + error::{DownloadError, DownloadResult}, }; use reth_primitives::{BlockNumber, SealedHeader}; use reth_provider::HeaderProvider; @@ -70,7 +68,7 @@ where Provider: HeaderProvider + Unpin + 'static, { /// Returns the next contiguous request. - fn next_headers_request(&mut self) -> DownloadResult>> { + fn next_headers_request(&self) -> DownloadResult>> { let start_at = match self.in_progress_queue.last_requested_block_number { Some(num) => num + 1, None => *self.download_range.start(), @@ -606,8 +604,9 @@ mod tests { test_utils::{generate_bodies, TestBodiesClient}, }; use assert_matches::assert_matches; + use reth_consensus::test_utils::TestConsensus; use reth_db::test_utils::{create_test_rw_db, create_test_static_files_dir}; - use reth_interfaces::test_utils::{generators, generators::random_block_range, TestConsensus}; + use reth_interfaces::test_utils::{generators, generators::random_block_range}; use reth_primitives::{BlockBody, B256, MAINNET}; use reth_provider::ProviderFactory; use std::collections::HashMap; diff --git a/crates/net/downloaders/src/bodies/mod.rs b/crates/net/downloaders/src/bodies/mod.rs index f8931ea81..d4f613413 100644 --- a/crates/net/downloaders/src/bodies/mod.rs +++ b/crates/net/downloaders/src/bodies/mod.rs @@ -2,6 +2,9 @@ #[allow(clippy::module_inception)] pub mod bodies; +/// A body downloader that does nothing. Useful to build unwind-only pipelines. +pub mod noop; + /// A downloader implementation that spawns a downloader to a task pub mod task; diff --git a/crates/net/downloaders/src/bodies/noop.rs b/crates/net/downloaders/src/bodies/noop.rs new file mode 100644 index 000000000..5885a17c1 --- /dev/null +++ b/crates/net/downloaders/src/bodies/noop.rs @@ -0,0 +1,29 @@ +use futures::Stream; +use reth_interfaces::p2p::{ + bodies::{downloader::BodyDownloader, response::BlockResponse}, + error::{DownloadError, DownloadResult}, +}; +use reth_primitives::BlockNumber; +use std::ops::RangeInclusive; + +/// A [BodyDownloader] implementation that does nothing. +#[derive(Debug, Default)] +#[non_exhaustive] +pub struct NoopBodiesDownloader; + +impl BodyDownloader for NoopBodiesDownloader { + fn set_download_range(&mut self, _: RangeInclusive) -> DownloadResult<()> { + Ok(()) + } +} + +impl Stream for NoopBodiesDownloader { + type Item = Result, DownloadError>; + + fn poll_next( + self: std::pin::Pin<&mut Self>, + _: &mut std::task::Context<'_>, + ) -> std::task::Poll> { + panic!("NoopBodiesDownloader shouldn't be polled.") + } +} diff --git a/crates/net/downloaders/src/bodies/queue.rs b/crates/net/downloaders/src/bodies/queue.rs index 0fc9635df..072e059a4 100644 --- a/crates/net/downloaders/src/bodies/queue.rs +++ b/crates/net/downloaders/src/bodies/queue.rs @@ -2,12 +2,10 @@ use super::request::BodiesRequestFuture; use crate::metrics::BodyDownloaderMetrics; use futures::{stream::FuturesUnordered, Stream}; use futures_util::StreamExt; -use reth_interfaces::{ - consensus::Consensus, - p2p::{ - bodies::{client::BodiesClient, response::BlockResponse}, - error::DownloadResult, - }, +use reth_consensus::Consensus; +use reth_interfaces::p2p::{ + bodies::{client::BodiesClient, response::BlockResponse}, + error::DownloadResult, }; use reth_primitives::{BlockNumber, SealedHeader}; use std::{ diff --git a/crates/net/downloaders/src/bodies/request.rs b/crates/net/downloaders/src/bodies/request.rs index 302256ef4..dfe877a0b 100644 --- a/crates/net/downloaders/src/bodies/request.rs +++ b/crates/net/downloaders/src/bodies/request.rs @@ -1,16 +1,13 @@ use crate::metrics::{BodyDownloaderMetrics, ResponseMetrics}; use futures::{Future, FutureExt}; -use reth_interfaces::{ - consensus::{Consensus as ConsensusTrait, Consensus}, - p2p::{ - bodies::{client::BodiesClient, response::BlockResponse}, - error::{DownloadError, DownloadResult}, - priority::Priority, - }, -}; -use reth_primitives::{ - BlockBody, GotExpected, PeerId, SealedBlock, SealedHeader, WithPeerId, B256, +use reth_consensus::Consensus; +use reth_interfaces::p2p::{ + bodies::{client::BodiesClient, response::BlockResponse}, + error::{DownloadError, DownloadResult}, + priority::Priority, }; +use reth_network_types::{PeerId, WithPeerId}; +use reth_primitives::{BlockBody, GotExpected, SealedBlock, SealedHeader, B256}; use std::{ collections::VecDeque, mem, @@ -186,8 +183,13 @@ where if let Err(error) = self.consensus.validate_block(&block) { // Body is invalid, put the header back and return an error let hash = block.hash(); + let number = block.number; self.pending_headers.push_front(block.header); - return Err(DownloadError::BodyValidation { hash, error: Box::new(error) }) + return Err(DownloadError::BodyValidation { + hash, + number, + error: Box::new(error), + }) } self.buffer.push(BlockResponse::Full(block)); @@ -252,7 +254,8 @@ mod tests { bodies::test_utils::zip_blocks, test_utils::{generate_bodies, TestBodiesClient}, }; - use reth_interfaces::test_utils::{generators, generators::random_header_range, TestConsensus}; + use reth_consensus::test_utils::TestConsensus; + use reth_interfaces::test_utils::{generators, generators::random_header_range}; /// Check if future returns empty bodies without dispatching any requests. #[tokio::test] diff --git a/crates/net/downloaders/src/bodies/task.rs b/crates/net/downloaders/src/bodies/task.rs index a57e5e486..f8815bcb0 100644 --- a/crates/net/downloaders/src/bodies/task.rs +++ b/crates/net/downloaders/src/bodies/task.rs @@ -42,8 +42,9 @@ impl TaskDownloader { /// # Example /// /// ``` + /// use reth_consensus::Consensus; /// use reth_downloaders::bodies::{bodies::BodiesDownloaderBuilder, task::TaskDownloader}; - /// use reth_interfaces::{consensus::Consensus, p2p::bodies::client::BodiesClient}; + /// use reth_interfaces::p2p::bodies::client::BodiesClient; /// use reth_provider::HeaderProvider; /// use std::sync::Arc; /// @@ -169,7 +170,8 @@ mod tests { test_utils::{generate_bodies, TestBodiesClient}, }; use assert_matches::assert_matches; - use reth_interfaces::{p2p::error::DownloadError, test_utils::TestConsensus}; + use reth_consensus::test_utils::TestConsensus; + use reth_interfaces::p2p::error::DownloadError; use reth_provider::test_utils::create_test_provider_factory; use std::sync::Arc; diff --git a/crates/net/downloaders/src/file_client.rs b/crates/net/downloaders/src/file_client.rs index 7d29cc577..85fac4642 100644 --- a/crates/net/downloaders/src/file_client.rs +++ b/crates/net/downloaders/src/file_client.rs @@ -1,4 +1,5 @@ use super::file_codec::BlockFileCodec; +use futures::Future; use itertools::Either; use reth_interfaces::p2p::{ bodies::client::{BodiesClient, BodiesFut}, @@ -7,11 +8,12 @@ use reth_interfaces::p2p::{ headers::client::{HeadersClient, HeadersFut, HeadersRequest}, priority::Priority, }; +use reth_network_types::PeerId; use reth_primitives::{ BlockBody, BlockHash, BlockHashOrNumber, BlockNumber, BytesMut, Header, HeadersDirection, - PeerId, SealedHeader, B256, + SealedHeader, B256, }; -use std::{collections::HashMap, path::Path}; +use std::{collections::HashMap, io, path::Path}; use thiserror::Error; use tokio::{fs::File, io::AsyncReadExt}; use tokio_stream::StreamExt; @@ -56,6 +58,16 @@ pub enum FileClientError { /// An error occurred when decoding blocks, headers, or rlp headers from the file. #[error("{0}")] Rlp(alloy_rlp::Error, Vec), + + /// Custom error message. + #[error("{0}")] + Custom(&'static str), +} + +impl From<&'static str> for FileClientError { + fn from(value: &'static str) -> Self { + Self::Custom(value) + } } impl FileClient { @@ -77,82 +89,6 @@ impl FileClient { Ok(Self::from_reader(&reader[..], file_len).await?.0) } - /// Initialize the [`FileClient`] from bytes that have been read from file. - pub(crate) async fn from_reader( - reader: B, - num_bytes: u64, - ) -> Result<(Self, Vec), FileClientError> - where - B: AsyncReadExt + Unpin, - { - let mut headers = HashMap::new(); - let mut hash_to_number = HashMap::new(); - let mut bodies = HashMap::new(); - - // use with_capacity to make sure the internal buffer contains the entire chunk - let mut stream = FramedRead::with_capacity(reader, BlockFileCodec, num_bytes as usize); - - trace!(target: "downloaders::file", - target_num_bytes=num_bytes, - capacity=stream.read_buffer().capacity(), - "init decode stream" - ); - - let mut remaining_bytes = vec![]; - - let mut log_interval = 0; - let mut log_interval_start_block = 0; - - while let Some(block_res) = stream.next().await { - let block = match block_res { - Ok(block) => block, - Err(FileClientError::Rlp(err, bytes)) => { - trace!(target: "downloaders::file", - %err, - bytes_len=bytes.len(), - "partial block returned from decoding chunk" - ); - remaining_bytes = bytes; - break - } - Err(err) => return Err(err), - }; - let block_number = block.header.number; - let block_hash = block.header.hash_slow(); - - // add to the internal maps - headers.insert(block.header.number, block.header.clone()); - hash_to_number.insert(block_hash, block.header.number); - bodies.insert( - block_hash, - BlockBody { - transactions: block.body, - ommers: block.ommers, - withdrawals: block.withdrawals, - }, - ); - - if log_interval == 0 { - trace!(target: "downloaders::file", - block_number, - "read first block" - ); - log_interval_start_block = block_number; - } else if log_interval % 100_000 == 0 { - trace!(target: "downloaders::file", - blocks=?log_interval_start_block..=block_number, - "read blocks from file" - ); - log_interval_start_block = block_number + 1; - } - log_interval += 1; - } - - trace!(target: "downloaders::file", blocks = headers.len(), "Initialized file client"); - - Ok((Self { headers, hash_to_number, bodies }, remaining_bytes)) - } - /// Get the tip hash of the chain. pub fn tip(&self) -> Option { self.headers.get(&self.max_block()?).map(|h| h.hash_slow()) @@ -179,12 +115,6 @@ impl FileClient { self.headers.get(&self.max_block()?).map(|h| h.clone().seal_slow()) } - /// Clones and returns the lowest header of this client has or `None` if empty. Seals header - /// before returning. - pub fn start_header(&self) -> Option { - self.headers.get(&self.min_block()?).map(|h| h.clone().seal_slow()) - } - /// Returns true if all blocks are canonical (no gaps) pub fn has_canonical_blocks(&self) -> bool { if self.headers.is_empty() { @@ -227,6 +157,105 @@ impl FileClient { pub fn bodies_len(&self) -> usize { self.bodies.len() } + + /// Returns an iterator over headers in the client. + pub fn headers_iter(&self) -> impl Iterator { + self.headers.values() + } + + /// Returns a mutable iterator over bodies in the client. + pub fn bodies_iter_mut(&mut self) -> impl Iterator { + let bodies = &mut self.bodies; + let headers = &self.headers; + headers.keys().zip(bodies.values_mut()) + } + + /// Returns the current number of transactions in the client. + pub fn total_transactions(&self) -> usize { + self.bodies.iter().flat_map(|(_, body)| &body.transactions).count() + } +} + +impl FromReader for FileClient { + type Error = FileClientError; + + /// Initialize the [`FileClient`] from bytes that have been read from file. + fn from_reader( + reader: B, + num_bytes: u64, + ) -> impl Future), Self::Error>> + where + B: AsyncReadExt + Unpin, + { + let mut headers = HashMap::new(); + let mut hash_to_number = HashMap::new(); + let mut bodies = HashMap::new(); + + // use with_capacity to make sure the internal buffer contains the entire chunk + let mut stream = FramedRead::with_capacity(reader, BlockFileCodec, num_bytes as usize); + + trace!(target: "downloaders::file", + target_num_bytes=num_bytes, + capacity=stream.read_buffer().capacity(), + "init decode stream" + ); + + let mut remaining_bytes = vec![]; + + let mut log_interval = 0; + let mut log_interval_start_block = 0; + + async move { + while let Some(block_res) = stream.next().await { + let block = match block_res { + Ok(block) => block, + Err(FileClientError::Rlp(err, bytes)) => { + trace!(target: "downloaders::file", + %err, + bytes_len=bytes.len(), + "partial block returned from decoding chunk" + ); + remaining_bytes = bytes; + break + } + Err(err) => return Err(err), + }; + let block_number = block.header.number; + let block_hash = block.header.hash_slow(); + + // add to the internal maps + headers.insert(block.header.number, block.header.clone()); + hash_to_number.insert(block_hash, block.header.number); + bodies.insert( + block_hash, + BlockBody { + transactions: block.body, + ommers: block.ommers, + withdrawals: block.withdrawals, + }, + ); + + if log_interval == 0 { + trace!(target: "downloaders::file", + block_number, + "read first block" + ); + log_interval_start_block = block_number; + } else if log_interval % 100_000 == 0 { + trace!(target: "downloaders::file", + blocks=?log_interval_start_block..=block_number, + "read blocks from file" + ); + log_interval_start_block = block_number + 1; + } + log_interval += 1; + } + + trace!(target: "downloaders::file", blocks = headers.len(), "Initialized file client"); + + Ok((Self { headers, hash_to_number, bodies }, remaining_bytes)) + } + } } impl HeadersClient for FileClient { @@ -329,6 +358,11 @@ pub struct ChunkedFileReader { } impl ChunkedFileReader { + /// Returns the remaining file length. + pub fn file_len(&self) -> u64 { + self.file_byte_len + } + /// Opens the file to import from given path. Returns a new instance. If no chunk byte length /// is passed, chunks have [`DEFAULT_BYTE_LEN_CHUNK_CHAIN_FILE`] (one static file). pub async fn new>( @@ -365,7 +399,10 @@ impl ChunkedFileReader { } /// Read next chunk from file. Returns [`FileClient`] containing decoded chunk. - pub async fn next_chunk(&mut self) -> Result, FileClientError> { + pub async fn next_chunk(&mut self) -> Result, T::Error> + where + T: FromReader, + { if self.file_byte_len == 0 && self.chunk.is_empty() { // eof return Ok(None) @@ -379,6 +416,7 @@ impl ChunkedFileReader { // read new bytes from file let mut reader = BytesMut::zeroed(new_read_bytes_target_len as usize); + // actual bytes that have been read let new_read_bytes_len = self.file.read_exact(&mut reader).await? as u64; @@ -404,14 +442,7 @@ impl ChunkedFileReader { // make new file client from chunk let (file_client, bytes) = - FileClient::from_reader(&self.chunk[..], next_chunk_byte_len as u64).await?; - - debug!(target: "downloaders::file", - headers_len=file_client.headers.len(), - bodies_len=file_client.bodies.len(), - remaining_bytes_len=bytes.len(), - "parsed blocks that were read from file" - ); + T::from_reader(&self.chunk[..], next_chunk_byte_len as u64).await?; // save left over bytes self.chunk = bytes; @@ -420,6 +451,20 @@ impl ChunkedFileReader { } } +/// Constructs a file client from a reader. +pub trait FromReader { + /// Error returned by file client type. + type Error: From; + /// Returns a file client + fn from_reader( + reader: B, + num_bytes: u64, + ) -> impl Future), Self::Error>> + where + Self: Sized, + B: AsyncReadExt + Unpin; +} + #[cfg(test)] mod tests { use super::*; @@ -434,12 +479,10 @@ mod tests { use assert_matches::assert_matches; use futures_util::stream::StreamExt; use rand::Rng; - use reth_interfaces::{ - p2p::{ - bodies::downloader::BodyDownloader, - headers::downloader::{HeaderDownloader, SyncTarget}, - }, - test_utils::TestConsensus, + use reth_consensus::test_utils::TestConsensus; + use reth_interfaces::p2p::{ + bodies::downloader::BodyDownloader, + headers::downloader::{HeaderDownloader, SyncTarget}, }; use reth_provider::test_utils::create_test_provider_factory; use std::{mem, sync::Arc}; @@ -584,7 +627,7 @@ mod tests { // test - while let Some(client) = reader.next_chunk().await.unwrap() { + while let Some(client) = reader.next_chunk::().await.unwrap() { let sync_target = client.tip_header().unwrap(); let sync_target_hash = sync_target.hash(); diff --git a/crates/net/downloaders/src/file_codec_ovm_receipt.rs b/crates/net/downloaders/src/file_codec_ovm_receipt.rs new file mode 100644 index 000000000..5b3c81a92 --- /dev/null +++ b/crates/net/downloaders/src/file_codec_ovm_receipt.rs @@ -0,0 +1,344 @@ +//! Codec for reading raw receipts from a file. + +use alloy_rlp::{Decodable, RlpDecodable}; +use reth_primitives::{ + bytes::{Buf, BytesMut}, + Address, Bloom, Bytes, Log, Receipt, TxType, B256, +}; +use tokio_util::codec::Decoder; + +use crate::{file_client::FileClientError, receipt_file_client::ReceiptWithBlockNumber}; + +/// Codec for reading raw receipts from a file. +/// +/// If using with [`FramedRead`](tokio_util::codec::FramedRead), the user should make sure the +/// framed reader has capacity for the entire receipts file. Otherwise, the decoder will return +/// [`InputTooShort`](alloy_rlp::Error::InputTooShort), because RLP receipts can only be +/// decoded if the internal buffer is large enough to contain the entire receipt. +/// +/// Without ensuring the framed reader has capacity for the entire file, a receipt is likely to +/// fall across two read buffers, the decoder will not be able to decode the receipt, which will +/// cause it to fail. +/// +/// It's recommended to use [`with_capacity`](tokio_util::codec::FramedRead::with_capacity) to set +/// the capacity of the framed reader to the size of the file. +#[derive(Debug)] +pub struct HackReceiptFileCodec; + +impl Decoder for HackReceiptFileCodec { + type Item = Option; + type Error = FileClientError; + + fn decode(&mut self, src: &mut BytesMut) -> Result, Self::Error> { + if src.is_empty() { + return Ok(None) + } + + let buf_slice = &mut src.as_ref(); + let receipt = HackReceiptContainer::decode(buf_slice) + .map_err(|err| Self::Error::Rlp(err, src.to_vec()))? + .0; + src.advance(src.len() - buf_slice.len()); + + Ok(Some( + receipt.map(|receipt| receipt.try_into().map_err(FileClientError::from)).transpose()?, + )) + } +} + +/// See +#[derive(Debug, PartialEq, Eq, RlpDecodable)] +pub struct HackReceipt { + tx_type: u8, + post_state: Bytes, + status: u64, + cumulative_gas_used: u64, + bloom: Bloom, + /// + logs: Vec, + tx_hash: B256, + contract_address: Address, + gas_used: u64, + block_hash: B256, + block_number: u64, + transaction_index: u32, + l1_gas_price: u64, + l1_gas_used: u64, + l1_fee: u64, + fee_scalar: String, +} + +#[derive(Debug, PartialEq, Eq, RlpDecodable)] +#[rlp(trailing)] +struct HackReceiptContainer(Option); + +impl TryFrom for ReceiptWithBlockNumber { + type Error = &'static str; + fn try_from(exported_receipt: HackReceipt) -> Result { + let HackReceipt { + tx_type, status, cumulative_gas_used, logs, block_number: number, .. + } = exported_receipt; + + #[allow(clippy::needless_update)] + let receipt = Receipt { + tx_type: TxType::try_from(tx_type.to_be_bytes()[0])?, + success: status != 0, + cumulative_gas_used, + logs, + ..Default::default() + }; + + Ok(Self { receipt, number }) + } +} + +#[cfg(test)] +pub(super) mod test { + use reth_primitives::{alloy_primitives::LogData, hex}; + + use super::*; + + pub(crate) const HACK_RECEIPT_ENCODED_BLOCK_1: &[u8] = &hex!("f9030ff9030c8080018303183db9010000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000400000000000100000000000000200000000002000000000000001000000000000000000004000000000000000000000000000040000400000100400000000000000100000000000000000000000000000020000000000000000000000000000000000000000000000001000000000000000000000100000000000000000000000000000000000000000000000000000000000000088000000080000000000010000000000000000000000000000800008000120000000000000000000000000000000002000f90197f89b948ce8c13d816fe6daf12d6fd9e4952e1fc88850aff863a00109fc6f55cf40689f02fbaad7af7fe7bbac8a3d2186600afc7d3e10cac60271a00000000000000000000000000000000000000000000000000000000000014218a000000000000000000000000070b17c0fe982ab4a7ac17a4c25485643151a1f2da000000000000000000000000000000000000000000000000000000000618d8837f89c948ce8c13d816fe6daf12d6fd9e4952e1fc88850aff884a092e98423f8adac6e64d0608e519fd1cefb861498385c6dee70d58fc926ddc68ca000000000000000000000000000000000000000000000000000000000d0e3ebf0a00000000000000000000000000000000000000000000000000000000000014218a000000000000000000000000070b17c0fe982ab4a7ac17a4c25485643151a1f2d80f85a948ce8c13d816fe6daf12d6fd9e4952e1fc88850aff842a0fe25c73e3b9089fac37d55c4c7efcba6f04af04cebd2fc4d6d7dbb07e1e5234fa000000000000000000000000000000000000000000000007edc6ca0bb6834800080a05e77a04531c7c107af1882d76cbff9486d0a9aa53701c30888509d4f5f2b003a9400000000000000000000000000000000000000008303183da0bee7192e575af30420cae0c7776304ac196077ee72b048970549e4f08e8754530180018212c2821c2383312e35"); + + pub(crate) const HACK_RECEIPT_ENCODED_BLOCK_2: &[u8] = &hex!("f90271f9026e8080018301c60db9010000080000000200000000000000000008000000000000000000000100008000000000000000000000000000000000000000000000000000000000400000000000100000000000000000000000020000000000000000000000000000000000004000000000000000000000000000000000400000000400000000000000100000000000000000000000000000020000000000000000000000000000000000000000100000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000008400000000000000000010000000000000000020000000020000000000000000000000000000000000000000000002000f8faf89c948ce8c13d816fe6daf12d6fd9e4952e1fc88850aff884a092e98423f8adac6e64d0608e519fd1cefb861498385c6dee70d58fc926ddc68ca000000000000000000000000000000000000000000000000000000000d0ea0e40a00000000000000000000000000000000000000000000000000000000000014218a0000000000000000000000000e5e7492282fd1e3bfac337a0beccd29b15b7b24080f85a948ce8c13d816fe6daf12d6fd9e4952e1fc88850aff842a0fe25c73e3b9089fac37d55c4c7efcba6f04af04cebd2fc4d6d7dbb07e1e5234fa000000000000000000000000000000000000000000000007eda7867e0c7d4800080a0af6ed8a6864d44989adc47c84f6fe0aeb1819817505c42cde6cbbcd5e14dd3179400000000000000000000000000000000000000008301c60da045fd6ce41bb8ebb2bccdaa92dd1619e287704cb07722039901a7eba63dea1d130280018212c2821c2383312e35"); + + pub(crate) const HACK_RECEIPT_ENCODED_BLOCK_3: &[u8] = &hex!("f90271f9026e8080018301c60db9010000000000000000000000000000000000000000400000000000000000008000000000000000000000000000000000004000000000000000000000400004000000100000000000000000000000000000000000000000000000000000000000004000000000000000000000040000000000400080000400000000000000100000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000008100000000000000000000000000000000000004000000000000000000000000008000000000000000000010000000000000000000000000000400000000000000001000000000000000000000000002000f8faf89c948ce8c13d816fe6daf12d6fd9e4952e1fc88850aff884a092e98423f8adac6e64d0608e519fd1cefb861498385c6dee70d58fc926ddc68ca000000000000000000000000000000000000000000000000000000000d101e54ba00000000000000000000000000000000000000000000000000000000000014218a0000000000000000000000000fa011d8d6c26f13abe2cefed38226e401b2b8a9980f85a948ce8c13d816fe6daf12d6fd9e4952e1fc88850aff842a0fe25c73e3b9089fac37d55c4c7efcba6f04af04cebd2fc4d6d7dbb07e1e5234fa000000000000000000000000000000000000000000000007ed8842f062774800080a08fab01dcec1da547e90a77597999e9153ff788fa6451d1cc942064427bd995019400000000000000000000000000000000000000008301c60da0da4509fe0ca03202ddbe4f68692c132d689ee098433691040ece18c3a45d44c50380018212c2821c2383312e35"); + + fn hack_receipt_1() -> HackReceipt { + let receipt = receipt_block_1(); + + HackReceipt { + tx_type: receipt.receipt.tx_type as u8, + post_state: Bytes::default(), + status: receipt.receipt.success as u64, + cumulative_gas_used: receipt.receipt.cumulative_gas_used, + bloom: Bloom::from(hex!("00000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000400000000000100000000000000200000000002000000000000001000000000000000000004000000000000000000000000000040000400000100400000000000000100000000000000000000000000000020000000000000000000000000000000000000000000000001000000000000000000000100000000000000000000000000000000000000000000000000000000000000088000000080000000000010000000000000000000000000000800008000120000000000000000000000000000000002000")), + logs: receipt.receipt.logs, + tx_hash: B256::from(hex!("5e77a04531c7c107af1882d76cbff9486d0a9aa53701c30888509d4f5f2b003a")), contract_address: Address::from(hex!("0000000000000000000000000000000000000000")), gas_used: 202813, + block_hash: B256::from(hex!("bee7192e575af30420cae0c7776304ac196077ee72b048970549e4f08e875453")), + block_number: receipt.number, + transaction_index: 0, + l1_gas_price: 1, + l1_gas_used: 4802, + l1_fee: 7203, + fee_scalar: String::from("1.5") + } + } + + pub(crate) fn receipt_block_1() -> ReceiptWithBlockNumber { + let log_1 = Log { + address: Address::from(hex!("8ce8c13d816fe6daf12d6fd9e4952e1fc88850af")), + data: LogData::new( + vec![ + B256::from(hex!( + "0109fc6f55cf40689f02fbaad7af7fe7bbac8a3d2186600afc7d3e10cac60271" + )), + B256::from(hex!( + "0000000000000000000000000000000000000000000000000000000000014218" + )), + B256::from(hex!( + "00000000000000000000000070b17c0fe982ab4a7ac17a4c25485643151a1f2d" + )), + ], + Bytes::from(hex!( + "00000000000000000000000000000000000000000000000000000000618d8837" + )), + ) + .unwrap(), + }; + + let log_2 = Log { + address: Address::from(hex!("8ce8c13d816fe6daf12d6fd9e4952e1fc88850af")), + data: LogData::new( + vec![ + B256::from(hex!( + "92e98423f8adac6e64d0608e519fd1cefb861498385c6dee70d58fc926ddc68c" + )), + B256::from(hex!( + "00000000000000000000000000000000000000000000000000000000d0e3ebf0" + )), + B256::from(hex!( + "0000000000000000000000000000000000000000000000000000000000014218" + )), + B256::from(hex!( + "00000000000000000000000070b17c0fe982ab4a7ac17a4c25485643151a1f2d" + )), + ], + Bytes::default(), + ) + .unwrap(), + }; + + let log_3 = Log { + address: Address::from(hex!("8ce8c13d816fe6daf12d6fd9e4952e1fc88850af")), + data: LogData::new( + vec![ + B256::from(hex!( + "fe25c73e3b9089fac37d55c4c7efcba6f04af04cebd2fc4d6d7dbb07e1e5234f" + )), + B256::from(hex!( + "00000000000000000000000000000000000000000000007edc6ca0bb68348000" + )), + ], + Bytes::default(), + ) + .unwrap(), + }; + + let mut receipt = Receipt { + tx_type: TxType::Legacy, + success: true, + cumulative_gas_used: 202813, + ..Default::default() + }; + // #[allow(clippy::needless_update)] not recognised, ..Default::default() needed so optimism + // feature must not be brought into scope + receipt.logs = vec![log_1, log_2, log_3]; + + ReceiptWithBlockNumber { receipt, number: 1 } + } + + pub(crate) fn receipt_block_2() -> ReceiptWithBlockNumber { + let log_1 = Log { + address: Address::from(hex!("8ce8c13d816fe6daf12d6fd9e4952e1fc88850af")), + data: LogData::new( + vec![ + B256::from(hex!( + "92e98423f8adac6e64d0608e519fd1cefb861498385c6dee70d58fc926ddc68c" + )), + B256::from(hex!( + "00000000000000000000000000000000000000000000000000000000d0ea0e40" + )), + B256::from(hex!( + "0000000000000000000000000000000000000000000000000000000000014218" + )), + B256::from(hex!( + "000000000000000000000000e5e7492282fd1e3bfac337a0beccd29b15b7b240" + )), + ], + Bytes::default(), + ) + .unwrap(), + }; + + let log_2 = Log { + address: Address::from(hex!("8ce8c13d816fe6daf12d6fd9e4952e1fc88850af")), + data: LogData::new( + vec![ + B256::from(hex!( + "fe25c73e3b9089fac37d55c4c7efcba6f04af04cebd2fc4d6d7dbb07e1e5234f" + )), + B256::from(hex!( + "00000000000000000000000000000000000000000000007eda7867e0c7d48000" + )), + ], + Bytes::default(), + ) + .unwrap(), + }; + + let mut receipt = Receipt { + tx_type: TxType::Legacy, + success: true, + cumulative_gas_used: 116237, + ..Default::default() + }; + // #[allow(clippy::needless_update)] not recognised, ..Default::default() needed so optimism + // feature must not be brought into scope + receipt.logs = vec![log_1, log_2]; + + ReceiptWithBlockNumber { receipt, number: 2 } + } + + pub(crate) fn receipt_block_3() -> ReceiptWithBlockNumber { + let log_1 = Log { + address: Address::from(hex!("8ce8c13d816fe6daf12d6fd9e4952e1fc88850af")), + data: LogData::new( + vec![ + B256::from(hex!( + "92e98423f8adac6e64d0608e519fd1cefb861498385c6dee70d58fc926ddc68c" + )), + B256::from(hex!( + "00000000000000000000000000000000000000000000000000000000d101e54b" + )), + B256::from(hex!( + "0000000000000000000000000000000000000000000000000000000000014218" + )), + B256::from(hex!( + "000000000000000000000000fa011d8d6c26f13abe2cefed38226e401b2b8a99" + )), + ], + Bytes::default(), + ) + .unwrap(), + }; + + let log_2 = Log { + address: Address::from(hex!("8ce8c13d816fe6daf12d6fd9e4952e1fc88850af")), + data: LogData::new( + vec![ + B256::from(hex!( + "fe25c73e3b9089fac37d55c4c7efcba6f04af04cebd2fc4d6d7dbb07e1e5234f" + )), + B256::from(hex!( + "00000000000000000000000000000000000000000000007ed8842f0627748000" + )), + ], + Bytes::default(), + ) + .unwrap(), + }; + + let mut receipt = Receipt { + tx_type: TxType::Legacy, + success: true, + cumulative_gas_used: 116237, + ..Default::default() + }; + // #[allow(clippy::needless_update)] not recognised, ..Default::default() needed so optimism + // feature must not be brought into scope + receipt.logs = vec![log_1, log_2]; + + ReceiptWithBlockNumber { receipt, number: 3 } + } + + #[test] + fn decode_hack_receipt() { + let receipt = hack_receipt_1(); + + let decoded = HackReceiptContainer::decode(&mut &HACK_RECEIPT_ENCODED_BLOCK_1[..]) + .unwrap() + .0 + .unwrap(); + + assert_eq!(receipt, decoded); + } + + #[test] + #[allow(clippy::needless_update)] + fn receipts_codec() { + // rig + + let mut receipt_1_to_3 = HACK_RECEIPT_ENCODED_BLOCK_1.to_vec(); + receipt_1_to_3.extend_from_slice(HACK_RECEIPT_ENCODED_BLOCK_2); + receipt_1_to_3.extend_from_slice(HACK_RECEIPT_ENCODED_BLOCK_3); + + let encoded = &mut BytesMut::from(&receipt_1_to_3[..]); + + let mut codec = HackReceiptFileCodec; + + // test + + let first_decoded_receipt = codec.decode(encoded).unwrap().unwrap().unwrap(); + + assert_eq!(receipt_block_1(), first_decoded_receipt); + + let second_decoded_receipt = codec.decode(encoded).unwrap().unwrap().unwrap(); + + assert_eq!(receipt_block_2(), second_decoded_receipt); + + let third_decoded_receipt = codec.decode(encoded).unwrap().unwrap().unwrap(); + + assert_eq!(receipt_block_3(), third_decoded_receipt); + } +} diff --git a/crates/net/downloaders/src/headers/mod.rs b/crates/net/downloaders/src/headers/mod.rs index 4321ef52b..a261f5579 100644 --- a/crates/net/downloaders/src/headers/mod.rs +++ b/crates/net/downloaders/src/headers/mod.rs @@ -1,6 +1,9 @@ /// A Linear downloader implementation. pub mod reverse_headers; +/// A header downloader that does nothing. Useful to build unwind-only pipelines. +pub mod noop; + /// A downloader implementation that spawns a downloader to a task pub mod task; diff --git a/crates/net/downloaders/src/headers/noop.rs b/crates/net/downloaders/src/headers/noop.rs new file mode 100644 index 000000000..8127cc232 --- /dev/null +++ b/crates/net/downloaders/src/headers/noop.rs @@ -0,0 +1,30 @@ +use futures::Stream; +use reth_interfaces::p2p::headers::{ + downloader::{HeaderDownloader, SyncTarget}, + error::HeadersDownloaderError, +}; +use reth_primitives::SealedHeader; + +/// A [HeaderDownloader] implementation that does nothing. +#[derive(Debug, Default)] +#[non_exhaustive] +pub struct NoopHeaderDownloader; + +impl HeaderDownloader for NoopHeaderDownloader { + fn update_local_head(&mut self, _: SealedHeader) {} + + fn update_sync_target(&mut self, _: SyncTarget) {} + + fn set_batch_size(&mut self, _: usize) {} +} + +impl Stream for NoopHeaderDownloader { + type Item = Result, HeadersDownloaderError>; + + fn poll_next( + self: std::pin::Pin<&mut Self>, + _: &mut std::task::Context<'_>, + ) -> std::task::Poll> { + panic!("NoopHeaderDownloader shouldn't be polled.") + } +} diff --git a/crates/net/downloaders/src/headers/reverse_headers.rs b/crates/net/downloaders/src/headers/reverse_headers.rs index 3af45c172..a5cdb145b 100644 --- a/crates/net/downloaders/src/headers/reverse_headers.rs +++ b/crates/net/downloaders/src/headers/reverse_headers.rs @@ -6,21 +6,19 @@ use futures::{stream::Stream, FutureExt}; use futures_util::{stream::FuturesUnordered, StreamExt}; use rayon::prelude::*; use reth_config::config::HeadersConfig; -use reth_interfaces::{ - consensus::Consensus, - p2p::{ - error::{DownloadError, DownloadResult, PeerRequestResult}, - headers::{ - client::{HeadersClient, HeadersRequest}, - downloader::{validate_header_download, HeaderDownloader, SyncTarget}, - error::{HeadersDownloaderError, HeadersDownloaderResult}, - }, - priority::Priority, +use reth_consensus::Consensus; +use reth_interfaces::p2p::{ + error::{DownloadError, DownloadResult, PeerRequestResult}, + headers::{ + client::{HeadersClient, HeadersRequest}, + downloader::{validate_header_download, HeaderDownloader, SyncTarget}, + error::{HeadersDownloaderError, HeadersDownloaderResult}, }, + priority::Priority, }; +use reth_network_types::PeerId; use reth_primitives::{ - BlockHashOrNumber, BlockNumber, GotExpected, Header, HeadersDirection, PeerId, SealedHeader, - B256, + BlockHashOrNumber, BlockNumber, GotExpected, Header, HeadersDirection, SealedHeader, B256, }; use reth_tasks::{TaskSpawner, TokioTaskExecutor}; use std::{ @@ -538,7 +536,7 @@ where /// Handles the error of a bad response /// /// This will re-submit the request. - fn on_headers_error(&mut self, err: Box) { + fn on_headers_error(&self, err: Box) { let HeadersResponseError { request, peer_id, error } = *err; self.penalize_peer(peer_id, &error); @@ -583,7 +581,7 @@ where } /// Starts a request future - fn submit_request(&mut self, request: HeadersRequest, priority: Priority) { + fn submit_request(&self, request: HeadersRequest, priority: Priority) { trace!(target: "downloaders::headers", ?request, "Submitting headers request"); self.in_progress_queue.push(self.request_fut(request, priority)); self.metrics.in_flight_requests.increment(1.); @@ -1225,7 +1223,8 @@ mod tests { use crate::headers::test_utils::child_header; use assert_matches::assert_matches; - use reth_interfaces::test_utils::{TestConsensus, TestHeadersClient}; + use reth_consensus::test_utils::TestConsensus; + use reth_interfaces::test_utils::TestHeadersClient; /// Tests that `replace_number` works the same way as Option::replace #[test] diff --git a/crates/net/downloaders/src/headers/task.rs b/crates/net/downloaders/src/headers/task.rs index 83f00d4f1..aa079dad2 100644 --- a/crates/net/downloaders/src/headers/task.rs +++ b/crates/net/downloaders/src/headers/task.rs @@ -44,7 +44,7 @@ impl TaskDownloader { /// # use std::sync::Arc; /// # use reth_downloaders::headers::reverse_headers::ReverseHeadersDownloader; /// # use reth_downloaders::headers::task::TaskDownloader; - /// # use reth_interfaces::consensus::Consensus; + /// # use reth_consensus::Consensus; /// # use reth_interfaces::p2p::headers::client::HeadersClient; /// # fn t(consensus:Arc, client: Arc) { /// let downloader = ReverseHeadersDownloader::::builder().build( @@ -183,7 +183,8 @@ mod tests { use crate::headers::{ reverse_headers::ReverseHeadersDownloaderBuilder, test_utils::child_header, }; - use reth_interfaces::test_utils::{TestConsensus, TestHeadersClient}; + use reth_consensus::test_utils::TestConsensus; + use reth_interfaces::test_utils::TestHeadersClient; use std::sync::Arc; #[tokio::test(flavor = "multi_thread")] diff --git a/crates/net/downloaders/src/lib.rs b/crates/net/downloaders/src/lib.rs index 37c4a95e3..81e669d88 100644 --- a/crates/net/downloaders/src/lib.rs +++ b/crates/net/downloaders/src/lib.rs @@ -27,10 +27,29 @@ pub mod metrics; /// efficiently buffering headers and bodies for retrieval. pub mod file_client; +/// Module managing file-based data retrieval and buffering of receipts. +/// +/// Contains [ReceiptFileClient](receipt_file_client::ReceiptFileClient) to read receipt data from +/// files, efficiently buffering receipts for retrieval. +/// +/// Currently configured to use codec [`HackReceipt`](file_codec_ovm_receipt::HackReceipt) based on +/// export of below Bedrock data using . Codec can +/// be replaced with regular encoding of receipts for export. +/// +/// NOTE: receipts can be exported using regular op-geth encoding for `Receipt` type, to fit +/// reth's needs for importing. However, this would require patching the diff in to export the `Receipt` and not `HackReceipt` type (originally +/// made for op-erigon's import needs). +pub mod receipt_file_client; + /// Module with a codec for reading and encoding block bodies in files. /// /// Enables decoding and encoding `Block` types within file contexts. pub mod file_codec; +/// Module with a codec for reading and encoding receipts in files. +/// +/// Enables decoding and encoding `HackReceipt` type. See . +pub mod file_codec_ovm_receipt; + #[cfg(any(test, feature = "test-utils"))] pub mod test_utils; diff --git a/crates/net/downloaders/src/receipt_file_client.rs b/crates/net/downloaders/src/receipt_file_client.rs new file mode 100644 index 000000000..b6291d0a3 --- /dev/null +++ b/crates/net/downloaders/src/receipt_file_client.rs @@ -0,0 +1,268 @@ +use futures::Future; +use reth_primitives::{Receipt, Receipts}; +use tokio::io::AsyncReadExt; +use tokio_stream::StreamExt; +use tokio_util::codec::FramedRead; +use tracing::trace; + +use crate::{ + file_client::{FileClientError, FromReader}, + file_codec_ovm_receipt::HackReceiptFileCodec, +}; + +/// File client for reading RLP encoded receipts from file. Receipts in file must be in sequential +/// order w.r.t. block number. +#[derive(Debug)] +pub struct ReceiptFileClient { + /// The buffered receipts, read from file, as nested lists. One list per block number. + pub receipts: Receipts, + /// First (lowest) block number read from file. + pub first_block: u64, + /// Total number of receipts. Count of elements in [`Receipts`] flattened. + pub total_receipts: usize, +} + +impl FromReader for ReceiptFileClient { + type Error = FileClientError; + + /// Initialize the [`ReceiptFileClient`] from bytes that have been read from file. Caution! If + /// first block has no transactions, it's assumed to be the genesis block. + fn from_reader( + reader: B, + num_bytes: u64, + ) -> impl Future), Self::Error>> + where + B: AsyncReadExt + Unpin, + { + let mut receipts = Receipts::new(); + + // use with_capacity to make sure the internal buffer contains the entire chunk + let mut stream = + FramedRead::with_capacity(reader, HackReceiptFileCodec, num_bytes as usize); + + trace!(target: "downloaders::file", + target_num_bytes=num_bytes, + capacity=stream.read_buffer().capacity(), + coded=?HackReceiptFileCodec, + "init decode stream" + ); + + let mut remaining_bytes = vec![]; + + let mut log_interval = 0; + let mut log_interval_start_block = 0; + + let mut block_number = 0; + let mut total_receipts = 0; + let mut receipts_for_block = vec![]; + let mut first_block = None; + + async move { + while let Some(receipt_res) = stream.next().await { + let receipt = match receipt_res { + Ok(receipt) => receipt, + Err(FileClientError::Rlp(err, bytes)) => { + trace!(target: "downloaders::file", + %err, + bytes_len=bytes.len(), + "partial receipt returned from decoding chunk" + ); + + remaining_bytes = bytes; + + break + } + Err(err) => return Err(err), + }; + + total_receipts += 1; + + match receipt { + Some(ReceiptWithBlockNumber { receipt, number }) => { + if first_block.is_none() { + first_block = Some(number); + block_number = number; + } + + if block_number == number { + receipts_for_block.push(Some(receipt)); + } else { + receipts.push(receipts_for_block); + + // next block + block_number = number; + receipts_for_block = vec![Some(receipt)]; + } + } + None => { + match first_block { + Some(num) => { + // if there was a block number before this, push receipts for that + // block + receipts.push(receipts_for_block); + // block with no txns + block_number = num + receipts.len() as u64; + } + None => { + // this is the first block and it's empty, assume it's the genesis + // block + first_block = Some(0); + block_number = 0; + } + } + + receipts_for_block = vec![]; + } + } + + if log_interval == 0 { + trace!(target: "downloaders::file", + block_number, + total_receipts, + "read first receipt" + ); + log_interval_start_block = block_number; + } else if log_interval % 100_000 == 0 { + trace!(target: "downloaders::file", + blocks=?log_interval_start_block..=block_number, + total_receipts, + "read receipts from file" + ); + log_interval_start_block = block_number + 1; + } + log_interval += 1; + } + + trace!(target: "downloaders::file", + blocks=?log_interval_start_block..=block_number, + total_receipts, + "read receipts from file" + ); + + // we need to push the last receipts + receipts.push(receipts_for_block); + + trace!(target: "downloaders::file", + blocks = receipts.len(), + total_receipts, + "Initialized receipt file client" + ); + + Ok(( + Self { receipts, first_block: first_block.unwrap_or_default(), total_receipts }, + remaining_bytes, + )) + } + } +} + +/// [`Receipt`] with block number. +#[derive(Debug, PartialEq, Eq)] +pub struct ReceiptWithBlockNumber { + /// Receipt. + pub receipt: Receipt, + /// Block number. + pub number: u64, +} + +#[cfg(test)] +mod test { + use reth_primitives::hex; + use reth_tracing::init_test_tracing; + + use crate::file_codec_ovm_receipt::test::{ + receipt_block_1 as op_mainnet_receipt_block_1, + receipt_block_2 as op_mainnet_receipt_block_2, + receipt_block_3 as op_mainnet_receipt_block_3, + HACK_RECEIPT_ENCODED_BLOCK_1 as HACK_RECEIPT_ENCODED_BLOCK_1_OP_MAINNET, + HACK_RECEIPT_ENCODED_BLOCK_2 as HACK_RECEIPT_ENCODED_BLOCK_2_OP_MAINNET, + HACK_RECEIPT_ENCODED_BLOCK_3 as HACK_RECEIPT_ENCODED_BLOCK_3_OP_MAINNET, + }; + + use super::*; + + /// No receipts for genesis block + const HACK_RECEIPT_BLOCK_NO_TRANSACTIONS: &[u8] = &hex!("c0"); + + #[tokio::test] + async fn receipt_file_client_ovm_codec() { + init_test_tracing(); + + // genesis block has no hack receipts + let mut encoded_receipts = HACK_RECEIPT_BLOCK_NO_TRANSACTIONS.to_vec(); + // one receipt each for block 1 and 2 + encoded_receipts.extend_from_slice(HACK_RECEIPT_ENCODED_BLOCK_1_OP_MAINNET); + encoded_receipts.extend_from_slice(HACK_RECEIPT_ENCODED_BLOCK_2_OP_MAINNET); + // no receipt for block 4 + encoded_receipts.extend_from_slice(HACK_RECEIPT_BLOCK_NO_TRANSACTIONS); + + let encoded_byte_len = encoded_receipts.len() as u64; + let reader = &mut &encoded_receipts[..]; + + let (ReceiptFileClient { receipts, first_block, total_receipts }, _remaining_bytes) = + ReceiptFileClient::from_reader(reader, encoded_byte_len).await.unwrap(); + + assert_eq!(4, total_receipts); + assert_eq!(0, first_block); + assert!(receipts[0].is_empty()); + assert_eq!(op_mainnet_receipt_block_1().receipt, receipts[1][0].clone().unwrap()); + assert_eq!(op_mainnet_receipt_block_2().receipt, receipts[2][0].clone().unwrap()); + assert!(receipts[3].is_empty()); + } + + #[tokio::test] + async fn no_receipts_middle_block() { + init_test_tracing(); + + // genesis block has no hack receipts + let mut encoded_receipts = HACK_RECEIPT_BLOCK_NO_TRANSACTIONS.to_vec(); + // one receipt each for block 1 + encoded_receipts.extend_from_slice(HACK_RECEIPT_ENCODED_BLOCK_1_OP_MAINNET); + // no receipt for block 2 + encoded_receipts.extend_from_slice(HACK_RECEIPT_BLOCK_NO_TRANSACTIONS); + // one receipt for block 3 + encoded_receipts.extend_from_slice(HACK_RECEIPT_ENCODED_BLOCK_3_OP_MAINNET); + + let encoded_byte_len = encoded_receipts.len() as u64; + let reader = &mut &encoded_receipts[..]; + + let (ReceiptFileClient { receipts, first_block, total_receipts }, _remaining_bytes) = + ReceiptFileClient::from_reader(reader, encoded_byte_len).await.unwrap(); + + assert_eq!(4, total_receipts); + assert_eq!(0, first_block); + assert!(receipts[0].is_empty()); + assert_eq!(op_mainnet_receipt_block_1().receipt, receipts[1][0].clone().unwrap()); + assert!(receipts[2].is_empty()); + assert_eq!(op_mainnet_receipt_block_3().receipt, receipts[3][0].clone().unwrap()); + } + + #[tokio::test] + async fn two_receipts_same_block() { + init_test_tracing(); + + // genesis block has no hack receipts + let mut encoded_receipts = HACK_RECEIPT_BLOCK_NO_TRANSACTIONS.to_vec(); + // one receipt each for block 1 + encoded_receipts.extend_from_slice(HACK_RECEIPT_ENCODED_BLOCK_1_OP_MAINNET); + // two receipts for block 2 + encoded_receipts.extend_from_slice(HACK_RECEIPT_ENCODED_BLOCK_2_OP_MAINNET); + encoded_receipts.extend_from_slice(HACK_RECEIPT_ENCODED_BLOCK_2_OP_MAINNET); + // one receipt for block 3 + encoded_receipts.extend_from_slice(HACK_RECEIPT_ENCODED_BLOCK_3_OP_MAINNET); + + let encoded_byte_len = encoded_receipts.len() as u64; + let reader = &mut &encoded_receipts[..]; + + let (ReceiptFileClient { receipts, first_block, total_receipts }, _remaining_bytes) = + ReceiptFileClient::from_reader(reader, encoded_byte_len).await.unwrap(); + + assert_eq!(5, total_receipts); + assert_eq!(0, first_block); + assert!(receipts[0].is_empty()); + assert_eq!(op_mainnet_receipt_block_1().receipt, receipts[1][0].clone().unwrap()); + assert_eq!(op_mainnet_receipt_block_2().receipt, receipts[2][0].clone().unwrap()); + assert_eq!(op_mainnet_receipt_block_2().receipt, receipts[2][1].clone().unwrap()); + assert_eq!(op_mainnet_receipt_block_3().receipt, receipts[3][0].clone().unwrap()); + } +} diff --git a/crates/net/downloaders/src/test_utils/bodies_client.rs b/crates/net/downloaders/src/test_utils/bodies_client.rs index 2f3cf2f29..a7387fa88 100644 --- a/crates/net/downloaders/src/test_utils/bodies_client.rs +++ b/crates/net/downloaders/src/test_utils/bodies_client.rs @@ -3,7 +3,8 @@ use reth_interfaces::p2p::{ download::DownloadClient, priority::Priority, }; -use reth_primitives::{BlockBody, PeerId, B256}; +use reth_network_types::PeerId; +use reth_primitives::{BlockBody, B256}; use std::{ collections::HashMap, fmt::Debug, diff --git a/crates/net/ecies/Cargo.toml b/crates/net/ecies/Cargo.toml index 461aad885..d4a4de32a 100644 --- a/crates/net/ecies/Cargo.toml +++ b/crates/net/ecies/Cargo.toml @@ -13,6 +13,7 @@ workspace = true [dependencies] reth-primitives.workspace = true reth-net-common.workspace = true +reth-network-types.workspace = true alloy-rlp = { workspace = true, features = ["derive"] } futures.workspace = true diff --git a/crates/net/ecies/src/algorithm.rs b/crates/net/ecies/src/algorithm.rs index 5dce7fee6..52398de4f 100644 --- a/crates/net/ecies/src/algorithm.rs +++ b/crates/net/ecies/src/algorithm.rs @@ -13,9 +13,10 @@ use ctr::Ctr64BE; use digest::{crypto_common::KeyIvInit, Digest}; use educe::Educe; use rand::{thread_rng, Rng}; +use reth_network_types::{id2pk, pk2id}; use reth_primitives::{ bytes::{BufMut, Bytes, BytesMut}, - id2pk, pk2id, B128, B256, B512 as PeerId, + B128, B256, B512 as PeerId, }; use secp256k1::{ ecdsa::{RecoverableSignature, RecoveryId}, @@ -399,7 +400,7 @@ impl ECIES { let msg = x ^ self.nonce; let (rec_id, sig) = SECP256K1 .sign_ecdsa_recoverable( - &secp256k1::Message::from_slice(msg.as_slice()).unwrap(), + &secp256k1::Message::from_digest(msg.0), &self.ephemeral_secret_key, ) .serialize_compact(); @@ -473,7 +474,7 @@ impl ECIES { let x = ecdh_x(&self.remote_public_key.unwrap(), &self.secret_key); self.remote_ephemeral_public_key = Some(SECP256K1.recover_ecdsa( - &secp256k1::Message::from_slice((x ^ self.remote_nonce.unwrap()).as_ref()).unwrap(), + &secp256k1::Message::from_digest((x ^ self.remote_nonce.unwrap()).0), &signature, )?); self.ephemeral_shared_secret = @@ -631,7 +632,7 @@ impl ECIES { let tag = self.egress_mac.as_mut().unwrap().digest(); out.reserve(ECIES::header_len()); - out.extend_from_slice(&header); + out.extend_from_slice(&header[..]); out.extend_from_slice(tag.as_slice()); } diff --git a/crates/net/ecies/src/error.rs b/crates/net/ecies/src/error.rs index d87545871..64526f16d 100644 --- a/crates/net/ecies/src/error.rs +++ b/crates/net/ecies/src/error.rs @@ -81,8 +81,8 @@ pub enum ECIESErrorImpl { /// a message from the (partially filled) buffer. #[error("stream closed due to not being readable")] UnreadableStream, - // Error when data is not recieved from peer for a prolonged period. - #[error("never recieved data from remote peer")] + // Error when data is not received from peer for a prolonged period. + #[error("never received data from remote peer")] StreamTimeout, } diff --git a/crates/net/ecies/src/stream.rs b/crates/net/ecies/src/stream.rs index 47518aa25..4538fc059 100644 --- a/crates/net/ecies/src/stream.rs +++ b/crates/net/ecies/src/stream.rs @@ -175,7 +175,7 @@ where #[cfg(test)] mod tests { use super::*; - use reth_primitives::pk2id; + use reth_network_types::pk2id; use secp256k1::SECP256K1; use tokio::net::{TcpListener, TcpStream}; diff --git a/crates/net/eth-wire-types/src/blocks.rs b/crates/net/eth-wire-types/src/blocks.rs index fa6365c20..36b8e6e8c 100644 --- a/crates/net/eth-wire-types/src/blocks.rs +++ b/crates/net/eth-wire-types/src/blocks.rs @@ -131,8 +131,8 @@ mod tests { use crate::{message::RequestPair, BlockBodies, BlockHeaders, GetBlockBodies, GetBlockHeaders}; use alloy_rlp::{Decodable, Encodable}; use reth_primitives::{ - hex, BlockHashOrNumber, Header, HeadersDirection, Signature, Transaction, TransactionKind, - TransactionSigned, TxLegacy, U256, + hex, BlockHashOrNumber, Header, HeadersDirection, Signature, Transaction, + TransactionSigned, TxKind, TxLegacy, U256, }; use std::str::FromStr; @@ -383,7 +383,7 @@ mod tests { nonce: 0x8u64, gas_price: 0x4a817c808, gas_limit: 0x2e248u64, - to: TransactionKind::Call(hex!("3535353535353535353535353535353535353535").into()), + to: TxKind::Call(hex!("3535353535353535353535353535353535353535").into()), value: U256::from(0x200u64), input: Default::default(), }), @@ -398,7 +398,7 @@ mod tests { nonce: 0x9u64, gas_price: 0x4a817c809, gas_limit: 0x33450u64, - to: TransactionKind::Call(hex!("3535353535353535353535353535353535353535").into()), + to: TxKind::Call(hex!("3535353535353535353535353535353535353535").into()), value: U256::from(0x2d9u64), input: Default::default(), }), Signature { @@ -455,7 +455,7 @@ mod tests { nonce: 0x8u64, gas_price: 0x4a817c808, gas_limit: 0x2e248u64, - to: TransactionKind::Call(hex!("3535353535353535353535353535353535353535").into()), + to: TxKind::Call(hex!("3535353535353535353535353535353535353535").into()), value: U256::from(0x200u64), input: Default::default(), }), @@ -471,7 +471,7 @@ mod tests { nonce: 0x9u64, gas_price: 0x4a817c809, gas_limit: 0x33450u64, - to: TransactionKind::Call(hex!("3535353535353535353535353535353535353535").into()), + to: TxKind::Call(hex!("3535353535353535353535353535353535353535").into()), value: U256::from(0x2d9u64), input: Default::default(), }), diff --git a/crates/net/eth-wire-types/src/transactions.rs b/crates/net/eth-wire-types/src/transactions.rs index 5d48211be..f19bbdcc7 100644 --- a/crates/net/eth-wire-types/src/transactions.rs +++ b/crates/net/eth-wire-types/src/transactions.rs @@ -80,8 +80,8 @@ mod tests { use crate::{message::RequestPair, GetPooledTransactions, PooledTransactions}; use alloy_rlp::{Decodable, Encodable}; use reth_primitives::{ - hex, PooledTransactionsElement, Signature, Transaction, TransactionKind, TransactionSigned, - TxEip1559, TxLegacy, U256, + hex, PooledTransactionsElement, Signature, Transaction, TransactionSigned, TxEip1559, + TxKind, TxLegacy, U256, }; use std::str::FromStr; @@ -130,9 +130,7 @@ mod tests { nonce: 0x8u64, gas_price: 0x4a817c808, gas_limit: 0x2e248u64, - to: TransactionKind::Call( - hex!("3535353535353535353535353535353535353535").into(), - ), + to: TxKind::Call(hex!("3535353535353535353535353535353535353535").into()), value: U256::from(0x200u64), input: Default::default(), }), @@ -154,9 +152,7 @@ mod tests { nonce: 0x09u64, gas_price: 0x4a817c809, gas_limit: 0x33450u64, - to: TransactionKind::Call( - hex!("3535353535353535353535353535353535353535").into(), - ), + to: TxKind::Call(hex!("3535353535353535353535353535353535353535").into()), value: U256::from(0x2d9u64), input: Default::default(), }), @@ -200,9 +196,7 @@ mod tests { nonce: 0x8u64, gas_price: 0x4a817c808, gas_limit: 0x2e248u64, - to: TransactionKind::Call( - hex!("3535353535353535353535353535353535353535").into(), - ), + to: TxKind::Call(hex!("3535353535353535353535353535353535353535").into()), value: U256::from(0x200u64), input: Default::default(), }), @@ -224,9 +218,7 @@ mod tests { nonce: 0x09u64, gas_price: 0x4a817c809, gas_limit: 0x33450u64, - to: TransactionKind::Call( - hex!("3535353535353535353535353535353535353535").into(), - ), + to: TxKind::Call(hex!("3535353535353535353535353535353535353535").into()), value: U256::from(0x2d9u64), input: Default::default(), }), @@ -271,9 +263,7 @@ mod tests { nonce: 15u64, gas_price: 2200000000, gas_limit: 34811u64, - to: TransactionKind::Call( - hex!("cf7f9e66af820a19257a2108375b180b0ec49167").into(), - ), + to: TxKind::Call(hex!("cf7f9e66af820a19257a2108375b180b0ec49167").into()), value: U256::from(1234u64), input: Default::default(), }), @@ -296,9 +286,7 @@ mod tests { max_priority_fee_per_gas: 1500000000, max_fee_per_gas: 1500000013, gas_limit: 21000u64, - to: TransactionKind::Call( - hex!("61815774383099e24810ab832a5b2a5425c154d5").into(), - ), + to: TxKind::Call(hex!("61815774383099e24810ab832a5b2a5425c154d5").into()), value: U256::from(3000000000000000000u64), input: Default::default(), access_list: Default::default(), @@ -321,9 +309,7 @@ mod tests { nonce: 3u64, gas_price: 2000000000, gas_limit: 10000000u64, - to: TransactionKind::Call( - hex!("d3e8763675e4c425df46cc3b5c0f6cbdac396046").into(), - ), + to: TxKind::Call(hex!("d3e8763675e4c425df46cc3b5c0f6cbdac396046").into()), value: U256::from(1000000000000000u64), input: Default::default(), }), @@ -345,9 +331,7 @@ mod tests { nonce: 1u64, gas_price: 1000000000, gas_limit: 100000u64, - to: TransactionKind::Call( - hex!("d3e8763675e4c425df46cc3b5c0f6cbdac396046").into(), - ), + to: TxKind::Call(hex!("d3e8763675e4c425df46cc3b5c0f6cbdac396046").into()), value: U256::from(693361000000000u64), input: Default::default(), }), @@ -369,9 +353,7 @@ mod tests { nonce: 2u64, gas_price: 1000000000, gas_limit: 100000u64, - to: TransactionKind::Call( - hex!("d3e8763675e4c425df46cc3b5c0f6cbdac396046").into(), - ), + to: TxKind::Call(hex!("d3e8763675e4c425df46cc3b5c0f6cbdac396046").into()), value: U256::from(1000000000000000u64), input: Default::default(), }), @@ -420,9 +402,7 @@ mod tests { nonce: 15u64, gas_price: 2200000000, gas_limit: 34811u64, - to: TransactionKind::Call( - hex!("cf7f9e66af820a19257a2108375b180b0ec49167").into(), - ), + to: TxKind::Call(hex!("cf7f9e66af820a19257a2108375b180b0ec49167").into()), value: U256::from(1234u64), input: Default::default(), }), @@ -445,9 +425,7 @@ mod tests { max_priority_fee_per_gas: 1500000000, max_fee_per_gas: 1500000013, gas_limit: 21000u64, - to: TransactionKind::Call( - hex!("61815774383099e24810ab832a5b2a5425c154d5").into(), - ), + to: TxKind::Call(hex!("61815774383099e24810ab832a5b2a5425c154d5").into()), value: U256::from(3000000000000000000u64), input: Default::default(), access_list: Default::default(), @@ -470,9 +448,7 @@ mod tests { nonce: 3u64, gas_price: 2000000000, gas_limit: 10000000u64, - to: TransactionKind::Call( - hex!("d3e8763675e4c425df46cc3b5c0f6cbdac396046").into(), - ), + to: TxKind::Call(hex!("d3e8763675e4c425df46cc3b5c0f6cbdac396046").into()), value: U256::from(1000000000000000u64), input: Default::default(), }), @@ -494,9 +470,7 @@ mod tests { nonce: 1u64, gas_price: 1000000000, gas_limit: 100000u64, - to: TransactionKind::Call( - hex!("d3e8763675e4c425df46cc3b5c0f6cbdac396046").into(), - ), + to: TxKind::Call(hex!("d3e8763675e4c425df46cc3b5c0f6cbdac396046").into()), value: U256::from(693361000000000u64), input: Default::default(), }), @@ -518,9 +492,7 @@ mod tests { nonce: 2u64, gas_price: 1000000000, gas_limit: 100000u64, - to: TransactionKind::Call( - hex!("d3e8763675e4c425df46cc3b5c0f6cbdac396046").into(), - ), + to: TxKind::Call(hex!("d3e8763675e4c425df46cc3b5c0f6cbdac396046").into()), value: U256::from(1000000000000000u64), input: Default::default(), }), diff --git a/crates/net/eth-wire/Cargo.toml b/crates/net/eth-wire/Cargo.toml index cddc84cf9..0cfdfef24 100644 --- a/crates/net/eth-wire/Cargo.toml +++ b/crates/net/eth-wire/Cargo.toml @@ -19,6 +19,7 @@ reth-ecies.workspace = true alloy-rlp = { workspace = true, features = ["derive"] } reth-discv4.workspace = true reth-eth-wire-types.workspace = true +reth-network-types.workspace = true # metrics reth-metrics.workspace = true diff --git a/crates/net/eth-wire/src/ethstream.rs b/crates/net/eth-wire/src/ethstream.rs index cbd1e3150..8de509034 100644 --- a/crates/net/eth-wire/src/ethstream.rs +++ b/crates/net/eth-wire/src/ethstream.rs @@ -354,7 +354,8 @@ mod tests { use futures::{SinkExt, StreamExt}; use reth_discv4::DEFAULT_DISCOVERY_PORT; use reth_ecies::stream::ECIESStream; - use reth_primitives::{pk2id, ForkFilter, Head, NamedChain, B256, U256}; + use reth_network_types::pk2id; + use reth_primitives::{ForkFilter, Head, NamedChain, B256, U256}; use secp256k1::{SecretKey, SECP256K1}; use std::time::Duration; use tokio::net::{TcpListener, TcpStream}; diff --git a/crates/net/eth-wire/src/hello.rs b/crates/net/eth-wire/src/hello.rs index 6ca8d9d99..f953c4aae 100644 --- a/crates/net/eth-wire/src/hello.rs +++ b/crates/net/eth-wire/src/hello.rs @@ -2,7 +2,8 @@ use crate::{capability::Capability, EthVersion, ProtocolVersion}; use alloy_rlp::{RlpDecodable, RlpEncodable}; use reth_codecs::derive_arbitrary; use reth_discv4::DEFAULT_DISCOVERY_PORT; -use reth_primitives::{constants::RETH_CLIENT_VERSION, PeerId}; +use reth_network_types::PeerId; +use reth_primitives::constants::RETH_CLIENT_VERSION; use crate::protocol::Protocol; #[cfg(feature = "serde")] @@ -38,7 +39,7 @@ impl HelloMessageWithProtocols { /// /// ``` /// use reth_eth_wire::HelloMessageWithProtocols; - /// use reth_primitives::pk2id; + /// use reth_network_types::pk2id; /// use secp256k1::{SecretKey, SECP256K1}; /// let secret_key = SecretKey::new(&mut rand::thread_rng()); /// let id = pk2id(&secret_key.public_key(SECP256K1)); @@ -120,7 +121,7 @@ impl HelloMessage { /// /// ``` /// use reth_eth_wire::HelloMessage; - /// use reth_primitives::pk2id; + /// use reth_network_types::pk2id; /// use secp256k1::{SecretKey, SECP256K1}; /// let secret_key = SecretKey::new(&mut rand::thread_rng()); /// let id = pk2id(&secret_key.public_key(SECP256K1)); @@ -209,7 +210,7 @@ impl HelloMessageBuilder { mod tests { use alloy_rlp::{Decodable, Encodable, EMPTY_STRING_CODE}; use reth_discv4::DEFAULT_DISCOVERY_PORT; - use reth_primitives::pk2id; + use reth_network_types::pk2id; use secp256k1::{SecretKey, SECP256K1}; use crate::{ diff --git a/crates/net/eth-wire/src/multiplex.rs b/crates/net/eth-wire/src/multiplex.rs index 27d0f0a00..04b7cda37 100644 --- a/crates/net/eth-wire/src/multiplex.rs +++ b/crates/net/eth-wire/src/multiplex.rs @@ -12,17 +12,18 @@ use std::{ fmt, future::Future, io, - pin::Pin, + pin::{pin, Pin}, task::{ready, Context, Poll}, }; use crate::{ capability::{Capability, SharedCapabilities, SharedCapability, UnsupportedCapabilityError}, errors::{EthStreamError, P2PStreamError}, + p2pstream::DisconnectP2P, CanDisconnect, DisconnectReason, EthStream, P2PStream, Status, UnauthedEthStream, }; use bytes::{Bytes, BytesMut}; -use futures::{pin_mut, Sink, SinkExt, Stream, StreamExt, TryStream, TryStreamExt}; +use futures::{Sink, SinkExt, Stream, StreamExt, TryStream, TryStreamExt}; use reth_primitives::ForkFilter; use tokio::sync::{mpsc, mpsc::UnboundedSender}; use tokio_stream::wrappers::UnboundedReceiverStream; @@ -158,7 +159,7 @@ impl RlpxProtocolMultiplexer { }; let f = handshake(proxy); - pin_mut!(f); + let mut f = pin!(f); // this polls the connection and the primary stream concurrently until the handshake is // complete @@ -238,7 +239,7 @@ impl MultiplexInner { } /// Delegates a message to the matching protocol. - fn delegate_message(&mut self, cap: &SharedCapability, msg: BytesMut) -> bool { + fn delegate_message(&self, cap: &SharedCapability, msg: BytesMut) -> bool { for proto in &self.protocols { if proto.shared_cap == *cap { proto.send_raw(msg); @@ -465,7 +466,7 @@ where let mut conn_ready = true; loop { match this.inner.conn.poll_ready_unpin(cx) { - Poll::Ready(_) => { + Poll::Ready(Ok(())) => { if let Some(msg) = this.inner.out_buffer.pop_front() { if let Err(err) = this.inner.conn.start_send_unpin(msg) { return Poll::Ready(Some(Err(err.into()))) @@ -474,6 +475,14 @@ where break } } + Poll::Ready(Err(err)) => { + if let Err(disconnect_err) = + this.inner.conn.start_disconnect(DisconnectReason::DisconnectRequested) + { + return Poll::Ready(Some(Err(disconnect_err.into()))); + } + return Poll::Ready(Some(Err(err.into()))); + } Poll::Pending => { conn_ready = false; break diff --git a/crates/net/eth-wire/src/muxdemux.rs b/crates/net/eth-wire/src/muxdemux.rs index 3aa7bc1dd..18112346e 100644 --- a/crates/net/eth-wire/src/muxdemux.rs +++ b/crates/net/eth-wire/src/muxdemux.rs @@ -171,7 +171,7 @@ impl MuxDemuxStream { /// Checks if all clones of this shared stream have been dropped, if true then returns // /// function to drop the stream. - fn can_drop(&mut self) -> bool { + fn can_drop(&self) -> bool { for tx in self.demux.values() { if !tx.is_closed() { return false @@ -357,9 +357,10 @@ mod tests { UnauthedEthStream, UnauthedP2PStream, }; use futures::{Future, SinkExt, StreamExt}; + use reth_network_types::pk2id; use reth_primitives::{ bytes::{BufMut, Bytes, BytesMut}, - pk2id, ForkFilter, Hardfork, MAINNET, + ForkFilter, Hardfork, MAINNET, }; use secp256k1::{SecretKey, SECP256K1}; use std::{net::SocketAddr, pin::Pin}; diff --git a/crates/net/eth-wire/src/test_utils.rs b/crates/net/eth-wire/src/test_utils.rs index 1708e1ffa..0783e4dad 100644 --- a/crates/net/eth-wire/src/test_utils.rs +++ b/crates/net/eth-wire/src/test_utils.rs @@ -4,7 +4,8 @@ use crate::{ EthVersion, HelloMessageWithProtocols, P2PStream, ProtocolVersion, Status, UnauthedP2PStream, }; use reth_discv4::DEFAULT_DISCOVERY_PORT; -use reth_primitives::{pk2id, Chain, ForkFilter, Head, B256, U256}; +use reth_network_types::pk2id; +use reth_primitives::{Chain, ForkFilter, Head, B256, U256}; use secp256k1::{SecretKey, SECP256K1}; use std::net::SocketAddr; use tokio::net::TcpStream; diff --git a/crates/net/eth-wire/tests/fuzz_roundtrip.rs b/crates/net/eth-wire/tests/fuzz_roundtrip.rs index 9bd75e3f3..1fc5ea0bf 100644 --- a/crates/net/eth-wire/tests/fuzz_roundtrip.rs +++ b/crates/net/eth-wire/tests/fuzz_roundtrip.rs @@ -48,7 +48,7 @@ macro_rules! fuzz_type_and_name { }; } -#[cfg(any(test, feature = "bench"))] +#[cfg(test)] pub mod fuzz_rlp { use crate::roundtrip_encoding; use alloy_rlp::{RlpDecodableWrapper, RlpEncodableWrapper}; diff --git a/crates/net/network-api/Cargo.toml b/crates/net/network-api/Cargo.toml index dcf4089cd..81536aad9 100644 --- a/crates/net/network-api/Cargo.toml +++ b/crates/net/network-api/Cargo.toml @@ -17,6 +17,7 @@ reth-primitives.workspace = true reth-eth-wire.workspace = true reth-rpc-types.workspace = true reth-discv4.workspace = true +reth-network-types.workspace = true # eth enr = { workspace = true, default-features = false, features = ["rust-secp256k1"] } diff --git a/crates/net/network-api/src/lib.rs b/crates/net/network-api/src/lib.rs index 0c43273cd..6c3040bd9 100644 --- a/crates/net/network-api/src/lib.rs +++ b/crates/net/network-api/src/lib.rs @@ -14,7 +14,8 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] use reth_eth_wire::{DisconnectReason, EthVersion, Status}; -use reth_primitives::{NodeRecord, PeerId}; +use reth_network_types::PeerId; +use reth_primitives::NodeRecord; use std::{future::Future, net::SocketAddr, sync::Arc, time::Instant}; pub use error::NetworkError; diff --git a/crates/net/network-api/src/noop.rs b/crates/net/network-api/src/noop.rs index b6a0fa846..2ace603e3 100644 --- a/crates/net/network-api/src/noop.rs +++ b/crates/net/network-api/src/noop.rs @@ -10,7 +10,8 @@ use crate::{ use enr::{secp256k1::SecretKey, Enr}; use reth_discv4::DEFAULT_DISCOVERY_PORT; use reth_eth_wire::{DisconnectReason, ProtocolVersion}; -use reth_primitives::{Chain, NodeRecord, PeerId}; +use reth_network_types::PeerId; +use reth_primitives::{Chain, NodeRecord}; use reth_rpc_types::{admin::EthProtocolInfo, NetworkStatus}; use std::net::{IpAddr, SocketAddr}; diff --git a/crates/net/network/Cargo.toml b/crates/net/network/Cargo.toml index 8e4c110bb..aa6da6ea2 100644 --- a/crates/net/network/Cargo.toml +++ b/crates/net/network/Cargo.toml @@ -27,6 +27,8 @@ reth-transaction-pool.workspace = true reth-provider.workspace = true reth-rpc-types.workspace = true reth-tokio-util.workspace = true +reth-consensus.workspace = true +reth-network-types.workspace = true # ethereum enr = { workspace = true, features = ["serde", "rust-secp256k1"] } @@ -95,11 +97,7 @@ criterion = { workspace = true, features = ["async_tokio", "html_reports"] } [features] default = ["serde"] serde = ["dep:serde", "dep:humantime-serde", "secp256k1/serde", "enr/serde", "dep:serde_json"] -test-utils = [ - "reth-provider/test-utils", - "dep:tempfile", - "reth-transaction-pool/test-utils", -] +test-utils = ["reth-provider/test-utils", "dep:tempfile", "reth-transaction-pool/test-utils"] geth-tests = [] [[bench]] diff --git a/crates/net/network/src/budget.rs b/crates/net/network/src/budget.rs index 319c8e311..e20d882fe 100644 --- a/crates/net/network/src/budget.rs +++ b/crates/net/network/src/budget.rs @@ -46,6 +46,7 @@ macro_rules! poll_nested_stream_with_budget { loop { match $poll_stream { Poll::Ready(Some(item)) => { + #[allow(unused_mut)] let mut f = $on_ready_some; f(item); diff --git a/crates/net/network/src/config.rs b/crates/net/network/src/config.rs index f9c9212d9..c2a7b3238 100644 --- a/crates/net/network/src/config.rs +++ b/crates/net/network/src/config.rs @@ -9,16 +9,18 @@ use crate::{ NetworkHandle, NetworkManager, }; use reth_discv4::{Discv4Config, Discv4ConfigBuilder, DEFAULT_DISCOVERY_ADDRESS}; -use reth_discv5::config::OPSTACK; +use reth_discv5::NetworkStackId; use reth_dns_discovery::DnsDiscoveryConfig; use reth_eth_wire::{HelloMessage, HelloMessageWithProtocols, Status}; +use reth_network_types::{pk2id, PeerId}; use reth_primitives::{ - mainnet_nodes, pk2id, sepolia_nodes, ChainSpec, ForkFilter, Head, NodeRecord, PeerId, MAINNET, + mainnet_nodes, sepolia_nodes, ChainSpec, ForkFilter, Head, NodeRecord, MAINNET, }; use reth_provider::{BlockReader, HeaderProvider}; use reth_tasks::{TaskSpawner, TokioTaskExecutor}; use secp256k1::SECP256K1; use std::{collections::HashSet, net::SocketAddr, sync::Arc}; + // re-export for convenience use crate::protocol::{IntoRlpxSubProtocol, RlpxSubProtocols}; pub use secp256k1::SecretKey; @@ -119,16 +121,15 @@ impl NetworkConfig { self, f: impl FnOnce(reth_discv5::ConfigBuilder) -> reth_discv5::Config, ) -> Self { - let rlpx_port = self.listener_addr.port(); - let chain = self.chain_spec.chain; - let fork_id = self.status.forkid; + let network_stack_id = NetworkStackId::id(&self.chain_spec); + let fork_id = self.chain_spec.latest_fork_id(); let boot_nodes = self.boot_nodes.clone(); - let mut builder = - reth_discv5::Config::builder(rlpx_port).add_unsigned_boot_nodes(boot_nodes.into_iter()); + let mut builder = reth_discv5::Config::builder(self.listener_addr) + .add_unsigned_boot_nodes(boot_nodes.into_iter()); - if chain.is_optimism() { - builder = builder.fork(OPSTACK, fork_id) + if let Some(id) = network_stack_id { + builder = builder.fork(id, fork_id) } self.set_discovery_v5(f(builder)) @@ -140,11 +141,16 @@ impl NetworkConfig { self } - /// Sets the address for the incoming connection listener. + /// Sets the address for the incoming RLPx connection listener. pub fn set_listener_addr(mut self, listener_addr: SocketAddr) -> Self { self.listener_addr = listener_addr; self } + + /// Returns the address for the incoming RLPx connection listener. + pub fn listener_addr(&self) -> &SocketAddr { + &self.listener_addr + } } impl NetworkConfig @@ -174,8 +180,6 @@ pub struct NetworkConfigBuilder { dns_discovery_config: Option, /// How to set up discovery version 4. discovery_v4_builder: Option, - /// Whether to enable discovery version 5. Disabled by default. - enable_discovery_v5: bool, /// All boot nodes to start network discovery with. boot_nodes: HashSet, /// Address to use for discovery @@ -218,7 +222,6 @@ impl NetworkConfigBuilder { secret_key, dns_discovery_config: Some(Default::default()), discovery_v4_builder: Some(Default::default()), - enable_discovery_v5: false, boot_nodes: Default::default(), discovery_addr: None, listener_addr: None, @@ -351,12 +354,6 @@ impl NetworkConfigBuilder { self } - /// Allows discv5 discovery. - pub fn discovery_v5(mut self) -> Self { - self.enable_discovery_v5 = true; - self - } - /// Sets the dns discovery config to use. pub fn dns_discovery(mut self, config: DnsDiscoveryConfig) -> Self { self.dns_discovery_config = Some(config); @@ -405,12 +402,6 @@ impl NetworkConfigBuilder { self } - /// Enable the Discv5 discovery. - pub fn enable_discv5_discovery(mut self) -> Self { - self.enable_discovery_v5 = true; - self - } - /// Disable the DNS discovery if the given condition is true. pub fn disable_dns_discovery_if(self, disable: bool) -> Self { if disable { @@ -467,7 +458,6 @@ impl NetworkConfigBuilder { secret_key, mut dns_discovery_config, discovery_v4_builder, - enable_discovery_v5: _, boot_nodes, discovery_addr, listener_addr, diff --git a/crates/net/network/src/discovery.rs b/crates/net/network/src/discovery.rs index b7a1131b4..bb456d4ea 100644 --- a/crates/net/network/src/discovery.rs +++ b/crates/net/network/src/discovery.rs @@ -7,12 +7,13 @@ use crate::{ }; use enr::Enr; use futures::StreamExt; -use reth_discv4::{DiscoveryUpdate, Discv4, Discv4Config, EnrForkIdEntry}; +use reth_discv4::{DiscoveryUpdate, Discv4, Discv4Config}; use reth_discv5::{DiscoveredPeer, Discv5}; use reth_dns_discovery::{ DnsDiscoveryConfig, DnsDiscoveryHandle, DnsDiscoveryService, DnsNodeRecordUpdate, DnsResolver, }; -use reth_primitives::{ForkId, NodeRecord, PeerId}; +use reth_network_types::PeerId; +use reth_primitives::{EnrForkIdEntry, ForkId, NodeRecord}; use secp256k1::SecretKey; use std::{ collections::VecDeque, @@ -368,7 +369,7 @@ mod tests { let discv4_config = Discv4ConfigBuilder::default().external_ip_resolver(None).build(); let discv5_listen_config = discv5::ListenConfig::from(discv5_addr); - let discv5_config = reth_discv5::Config::builder(0) + let discv5_config = reth_discv5::Config::builder(discv5_addr) .discv5_config(discv5::ConfigBuilder::new(discv5_listen_config).build()) .build(); diff --git a/crates/net/network/src/eth_requests.rs b/crates/net/network/src/eth_requests.rs index baa636b93..3268ff898 100644 --- a/crates/net/network/src/eth_requests.rs +++ b/crates/net/network/src/eth_requests.rs @@ -11,7 +11,8 @@ use reth_eth_wire::{ Receipts, }; use reth_interfaces::p2p::error::RequestResult; -use reth_primitives::{BlockBody, BlockHashOrNumber, Header, HeadersDirection, PeerId}; +use reth_network_types::PeerId; +use reth_primitives::{BlockBody, BlockHashOrNumber, Header, HeadersDirection}; use reth_provider::{BlockReader, HeaderProvider, ReceiptProvider}; use std::{ future::Future, @@ -138,7 +139,7 @@ where } fn on_headers_request( - &mut self, + &self, _peer_id: PeerId, request: GetBlockHeaders, response: oneshot::Sender>, @@ -149,7 +150,7 @@ where } fn on_bodies_request( - &mut self, + &self, _peer_id: PeerId, request: GetBlockBodies, response: oneshot::Sender>, @@ -186,7 +187,7 @@ where } fn on_receipts_request( - &mut self, + &self, _peer_id: PeerId, request: GetReceipts, response: oneshot::Sender>, diff --git a/crates/net/network/src/fetch/client.rs b/crates/net/network/src/fetch/client.rs index eab474506..63e22abe0 100644 --- a/crates/net/network/src/fetch/client.rs +++ b/crates/net/network/src/fetch/client.rs @@ -11,7 +11,8 @@ use reth_interfaces::p2p::{ priority::Priority, }; use reth_network_api::ReputationChangeKind; -use reth_primitives::{Header, PeerId, B256}; +use reth_network_types::PeerId; +use reth_primitives::{Header, B256}; use std::sync::{ atomic::{AtomicUsize, Ordering}, Arc, diff --git a/crates/net/network/src/fetch/mod.rs b/crates/net/network/src/fetch/mod.rs index 19c605fb9..9ad50edb0 100644 --- a/crates/net/network/src/fetch/mod.rs +++ b/crates/net/network/src/fetch/mod.rs @@ -9,7 +9,8 @@ use reth_interfaces::p2p::{ priority::Priority, }; use reth_network_api::ReputationChangeKind; -use reth_primitives::{BlockBody, Header, PeerId, B256}; +use reth_network_types::PeerId; +use reth_primitives::{BlockBody, Header, B256}; use std::{ collections::{HashMap, VecDeque}, sync::{ @@ -77,8 +78,16 @@ impl StateFetcher { best_number: u64, timeout: Arc, ) { - self.peers - .insert(peer_id, Peer { state: PeerState::Idle, best_hash, best_number, timeout }); + self.peers.insert( + peer_id, + Peer { + state: PeerState::Idle, + best_hash, + best_number, + timeout, + last_response_likely_bad: false, + }, + ); } /// Removes the peer from the peer list, after which it is no longer available for future @@ -119,14 +128,29 @@ impl StateFetcher { } /// Returns the _next_ idle peer that's ready to accept a request, - /// prioritizing those with the lowest timeout/latency. - /// Once a peer has been yielded, it will be moved to the end of the map - fn next_peer(&mut self) -> Option { - self.peers - .iter() - .filter(|(_, peer)| peer.state.is_idle()) - .min_by_key(|(_, peer)| peer.timeout()) - .map(|(id, _)| *id) + /// prioritizing those with the lowest timeout/latency and those that recently responded with + /// adequate data. + fn next_best_peer(&self) -> Option { + let mut idle = self.peers.iter().filter(|(_, peer)| peer.state.is_idle()); + + let mut best_peer = idle.next()?; + + for maybe_better in idle { + // replace best peer if our current best peer sent us a bad response last time + if best_peer.1.last_response_likely_bad && !maybe_better.1.last_response_likely_bad { + best_peer = maybe_better; + continue + } + + // replace best peer if this peer has better rtt + if maybe_better.1.timeout() < best_peer.1.timeout() && + !maybe_better.1.last_response_likely_bad + { + best_peer = maybe_better; + } + } + + Some(*best_peer.0) } /// Returns the next action to return @@ -136,7 +160,7 @@ impl StateFetcher { return PollAction::NoRequests } - let Some(peer_id) = self.next_peer() else { return PollAction::NoPeersAvailable }; + let Some(peer_id) = self.next_best_peer() else { return PollAction::NoPeersAvailable }; let request = self.queued_requests.pop_front().expect("not empty"); let request = self.prepare_block_request(peer_id, request); @@ -249,6 +273,9 @@ impl StateFetcher { } if let Some(peer) = self.peers.get_mut(&peer_id) { + // update the peer's response state + peer.last_response_likely_bad = is_likely_bad_response; + // If the peer is still ready to accept new requests, we try to send a followup // request immediately. if peer.state.on_request_finished() && !is_error && !is_likely_bad_response { @@ -268,11 +295,16 @@ impl StateFetcher { peer_id: PeerId, res: RequestResult>, ) -> Option { + let is_likely_bad_response = res.as_ref().map_or(true, |bodies| bodies.is_empty()); + if let Some(resp) = self.inflight_bodies_requests.remove(&peer_id) { let _ = resp.response.send(res.map(|b| (peer_id, b).into())); } if let Some(peer) = self.peers.get_mut(&peer_id) { - if peer.state.on_request_finished() { + // update the peer's response state + peer.last_response_likely_bad = is_likely_bad_response; + + if peer.state.on_request_finished() && !is_likely_bad_response { return self.followup_request(peer_id) } } @@ -307,6 +339,13 @@ struct Peer { best_number: u64, /// Tracks the current timeout value we use for the peer. timeout: Arc, + /// Tracks whether the peer has recently responded with a likely bad response. + /// + /// This is used to de-rank the peer if there are other peers available. + /// This exists because empty responses may not be penalized (e.g. when blocks near the tip are + /// downloaded), but we still want to avoid requesting from the same peer again if it has the + /// lowest timeout. + last_response_likely_bad: bool, } impl Peer { @@ -462,17 +501,17 @@ mod tests { fetcher.new_active_peer(peer1, B256::random(), 1, Arc::new(AtomicU64::new(1))); fetcher.new_active_peer(peer2, B256::random(), 2, Arc::new(AtomicU64::new(1))); - let first_peer = fetcher.next_peer().unwrap(); + let first_peer = fetcher.next_best_peer().unwrap(); assert!(first_peer == peer1 || first_peer == peer2); // Pending disconnect for first_peer fetcher.on_pending_disconnect(&first_peer); // first_peer now isn't idle, so we should get other peer - let second_peer = fetcher.next_peer().unwrap(); + let second_peer = fetcher.next_best_peer().unwrap(); assert!(first_peer == peer1 || first_peer == peer2); assert_ne!(first_peer, second_peer); // without idle peers, returns None fetcher.on_pending_disconnect(&second_peer); - assert_eq!(fetcher.next_peer(), None); + assert_eq!(fetcher.next_best_peer(), None); } #[tokio::test] @@ -491,13 +530,13 @@ mod tests { fetcher.new_active_peer(peer3, B256::random(), 3, Arc::new(AtomicU64::new(50))); // Must always get peer1 (lowest timeout) - assert_eq!(fetcher.next_peer(), Some(peer1)); - assert_eq!(fetcher.next_peer(), Some(peer1)); + assert_eq!(fetcher.next_best_peer(), Some(peer1)); + assert_eq!(fetcher.next_best_peer(), Some(peer1)); // peer2's timeout changes below peer1's peer2_timeout.store(10, Ordering::Relaxed); // Then we get peer 2 always (now lowest) - assert_eq!(fetcher.next_peer(), Some(peer2)); - assert_eq!(fetcher.next_peer(), Some(peer2)); + assert_eq!(fetcher.next_best_peer(), Some(peer2)); + assert_eq!(fetcher.next_best_peer(), Some(peer2)); } #[tokio::test] diff --git a/crates/net/network/src/import.rs b/crates/net/network/src/import.rs index d127dab8f..2d18da9d4 100644 --- a/crates/net/network/src/import.rs +++ b/crates/net/network/src/import.rs @@ -1,7 +1,7 @@ //! This module provides an abstraction over block import in the form of the `BlockImport` trait. use crate::message::NewBlockMessage; -use reth_primitives::PeerId; +use reth_network_types::PeerId; use std::task::{Context, Poll}; /// Abstraction over block import. @@ -50,7 +50,7 @@ pub enum BlockValidation { pub enum BlockImportError { /// Consensus error #[error(transparent)] - Consensus(#[from] reth_interfaces::consensus::ConsensusError), + Consensus(#[from] reth_consensus::ConsensusError), } /// An implementation of `BlockImport` used in Proof-of-Stake consensus that does nothing. diff --git a/crates/net/network/src/listener.rs b/crates/net/network/src/listener.rs index 1575b3933..4cc219655 100644 --- a/crates/net/network/src/listener.rs +++ b/crates/net/network/src/listener.rs @@ -104,8 +104,10 @@ impl Stream for TcpListenerStream { #[cfg(test)] mod tests { use super::*; - use futures::pin_mut; - use std::net::{Ipv4Addr, SocketAddrV4}; + use std::{ + net::{Ipv4Addr, SocketAddrV4}, + pin::pin, + }; use tokio::macros::support::poll_fn; #[tokio::test(flavor = "multi_thread")] @@ -117,7 +119,7 @@ mod tests { let local_addr = listener.local_address(); tokio::task::spawn(async move { - pin_mut!(listener); + let mut listener = pin!(listener); match poll_fn(|cx| listener.as_mut().poll(cx)).await { ListenerEvent::Incoming { .. } => {} _ => { diff --git a/crates/net/network/src/manager.rs b/crates/net/network/src/manager.rs index 5783c4ebd..d516625c6 100644 --- a/crates/net/network/src/manager.rs +++ b/crates/net/network/src/manager.rs @@ -35,7 +35,7 @@ use crate::{ transactions::NetworkTransactionEvent, FetchClient, NetworkBuilder, }; -use futures::{pin_mut, Future, StreamExt}; +use futures::{Future, StreamExt}; use parking_lot::Mutex; use reth_eth_wire::{ capability::{Capabilities, CapabilityMessage}, @@ -44,7 +44,8 @@ use reth_eth_wire::{ use reth_metrics::common::mpsc::UnboundedMeteredSender; use reth_net_common::bandwidth_meter::BandwidthMeter; use reth_network_api::ReputationChangeKind; -use reth_primitives::{ForkId, NodeRecord, PeerId}; +use reth_network_types::PeerId; +use reth_primitives::{ForkId, NodeRecord}; use reth_provider::{BlockNumReader, BlockReader}; use reth_rpc_types::{admin::EthProtocolInfo, NetworkStatus}; use reth_tasks::shutdown::GracefulShutdown; @@ -52,7 +53,7 @@ use reth_tokio_util::EventListeners; use secp256k1::SecretKey; use std::{ net::SocketAddr, - pin::Pin, + pin::{pin, Pin}, sync::{ atomic::{AtomicU64, AtomicUsize, Ordering}, Arc, @@ -402,7 +403,7 @@ where } /// Handle an incoming request from the peer - fn on_eth_request(&mut self, peer_id: PeerId, req: PeerRequest) { + fn on_eth_request(&self, peer_id: PeerId, req: PeerRequest) { match req { PeerRequest::GetBlockHeaders { request, response } => { self.delegate_eth_request(IncomingEthRequest::GetBlockHeaders { @@ -901,7 +902,7 @@ where shutdown_hook: impl FnOnce(&mut Self), ) { let network = self; - pin_mut!(network, shutdown); + let mut network = pin!(network); let mut graceful_guard = None; tokio::select! { diff --git a/crates/net/network/src/message.rs b/crates/net/network/src/message.rs index b6861267a..2086fd60e 100644 --- a/crates/net/network/src/message.rs +++ b/crates/net/network/src/message.rs @@ -11,8 +11,9 @@ use reth_eth_wire::{ SharedTransactions, Transactions, }; use reth_interfaces::p2p::error::{RequestError, RequestResult}; +use reth_network_types::PeerId; use reth_primitives::{ - BlockBody, Bytes, Header, PeerId, PooledTransactionsElement, ReceiptWithBloom, B256, + BlockBody, Bytes, Header, PooledTransactionsElement, ReceiptWithBloom, B256, }; use std::{ fmt, diff --git a/crates/net/network/src/network.rs b/crates/net/network/src/network.rs index 7104e442e..86669bf19 100644 --- a/crates/net/network/src/network.rs +++ b/crates/net/network/src/network.rs @@ -13,7 +13,8 @@ use reth_network_api::{ NetworkError, NetworkInfo, PeerInfo, PeerKind, Peers, PeersInfo, Reputation, ReputationChangeKind, }; -use reth_primitives::{Head, NodeRecord, PeerId, TransactionSigned, B256}; +use reth_network_types::PeerId; +use reth_primitives::{Head, NodeRecord, TransactionSigned, B256}; use reth_rpc_types::NetworkStatus; use secp256k1::SecretKey; use std::{ diff --git a/crates/net/network/src/peers/manager.rs b/crates/net/network/src/peers/manager.rs index b94c22db7..d6ae9c4da 100644 --- a/crates/net/network/src/peers/manager.rs +++ b/crates/net/network/src/peers/manager.rs @@ -14,7 +14,8 @@ use futures::StreamExt; use reth_eth_wire::{errors::EthStreamError, DisconnectReason}; use reth_net_common::ban_list::BanList; use reth_network_api::{PeerKind, ReputationChangeKind}; -use reth_primitives::{ForkId, NodeRecord, PeerId}; +use reth_network_types::PeerId; +use reth_primitives::{ForkId, NodeRecord}; use std::{ collections::{hash_map::Entry, HashMap, HashSet, VecDeque}, fmt::Display, @@ -1558,7 +1559,8 @@ mod tests { }; use reth_net_common::ban_list::BanList; use reth_network_api::{Direction, ReputationChangeKind}; - use reth_primitives::{PeerId, B512}; + use reth_network_types::PeerId; + use reth_primitives::B512; use std::{ collections::HashSet, future::{poll_fn, Future}, diff --git a/crates/net/network/src/session/active.rs b/crates/net/network/src/session/active.rs index 33c0a66e3..32bfb72ac 100644 --- a/crates/net/network/src/session/active.rs +++ b/crates/net/network/src/session/active.rs @@ -20,7 +20,7 @@ use reth_eth_wire::{ }; use reth_interfaces::p2p::error::RequestError; use reth_metrics::common::mpsc::MeteredPollSender; -use reth_primitives::PeerId; +use reth_network_types::PeerId; use std::{ collections::VecDeque, future::Future, @@ -769,7 +769,8 @@ mod tests { UnauthedEthStream, UnauthedP2PStream, }; use reth_net_common::bandwidth_meter::{BandwidthMeter, MeteredStream}; - use reth_primitives::{pk2id, ForkFilter, Hardfork, MAINNET}; + use reth_network_types::pk2id; + use reth_primitives::{ForkFilter, Hardfork, MAINNET}; use secp256k1::{SecretKey, SECP256K1}; use tokio::{ net::{TcpListener, TcpStream}, diff --git a/crates/net/network/src/session/handle.rs b/crates/net/network/src/session/handle.rs index 80298f324..c48fff618 100644 --- a/crates/net/network/src/session/handle.rs +++ b/crates/net/network/src/session/handle.rs @@ -12,7 +12,7 @@ use reth_eth_wire::{ DisconnectReason, EthVersion, Status, }; use reth_network_api::PeerInfo; -use reth_primitives::PeerId; +use reth_network_types::PeerId; use std::{io, net::SocketAddr, sync::Arc, time::Instant}; use tokio::sync::{ mpsc::{self, error::SendError}, diff --git a/crates/net/network/src/session/mod.rs b/crates/net/network/src/session/mod.rs index 94d41226e..95f426c54 100644 --- a/crates/net/network/src/session/mod.rs +++ b/crates/net/network/src/session/mod.rs @@ -19,7 +19,8 @@ use reth_net_common::{ bandwidth_meter::{BandwidthMeter, MeteredStream}, stream::HasRemoteAddr, }; -use reth_primitives::{ForkFilter, ForkId, ForkTransition, Head, PeerId}; +use reth_network_types::PeerId; +use reth_primitives::{ForkFilter, ForkId, ForkTransition, Head}; use reth_tasks::TaskSpawner; use secp256k1::SecretKey; use std::{ diff --git a/crates/net/network/src/state.rs b/crates/net/network/src/state.rs index d75a1aaa5..309184ca3 100644 --- a/crates/net/network/src/state.rs +++ b/crates/net/network/src/state.rs @@ -18,7 +18,8 @@ use reth_eth_wire::{ capability::Capabilities, BlockHashNumber, DisconnectReason, NewBlockHashes, Status, }; use reth_network_api::PeerKind; -use reth_primitives::{ForkId, PeerId, B256}; +use reth_network_types::PeerId; +use reth_primitives::{ForkId, B256}; use reth_provider::BlockNumReader; use std::{ collections::{HashMap, VecDeque}, @@ -233,7 +234,7 @@ where } /// Invoked when a new [`ForkId`] is activated. - pub(crate) fn update_fork_id(&mut self, fork_id: ForkId) { + pub(crate) fn update_fork_id(&self, fork_id: ForkId) { self.discovery.update_fork_id(fork_id) } @@ -537,7 +538,8 @@ mod tests { BlockBodies, EthVersion, }; use reth_interfaces::p2p::{bodies::client::BodiesClient, error::RequestError}; - use reth_primitives::{BlockBody, Header, PeerId, B256}; + use reth_network_types::PeerId; + use reth_primitives::{BlockBody, Header, B256}; use reth_provider::test_utils::NoopProvider; use std::{ future::poll_fn, diff --git a/crates/net/network/src/swarm.rs b/crates/net/network/src/swarm.rs index 136ece0bd..11ac5949a 100644 --- a/crates/net/network/src/swarm.rs +++ b/crates/net/network/src/swarm.rs @@ -12,7 +12,7 @@ use reth_eth_wire::{ errors::EthStreamError, EthVersion, Status, }; -use reth_primitives::PeerId; +use reth_network_types::PeerId; use reth_provider::{BlockNumReader, BlockReader}; use std::{ io, diff --git a/crates/net/network/src/test_utils/init.rs b/crates/net/network/src/test_utils/init.rs index 1419191aa..b72046a7f 100644 --- a/crates/net/network/src/test_utils/init.rs +++ b/crates/net/network/src/test_utils/init.rs @@ -1,5 +1,5 @@ use enr::{k256::ecdsa::SigningKey, Enr, EnrPublicKey}; -use reth_primitives::PeerId; +use reth_network_types::PeerId; use std::{net::SocketAddr, time::Duration}; /// The timeout for tests that create a GethInstance diff --git a/crates/net/network/src/test_utils/testnet.rs b/crates/net/network/src/test_utils/testnet.rs index 9720b7a93..a92934c0c 100644 --- a/crates/net/network/src/test_utils/testnet.rs +++ b/crates/net/network/src/test_utils/testnet.rs @@ -14,7 +14,8 @@ use futures::{FutureExt, StreamExt}; use pin_project::pin_project; use reth_eth_wire::{protocol::Protocol, DisconnectReason, HelloMessageWithProtocols}; use reth_network_api::{NetworkInfo, Peers}; -use reth_primitives::{PeerId, MAINNET}; +use reth_network_types::PeerId; +use reth_primitives::MAINNET; use reth_provider::{ test_utils::NoopProvider, BlockReader, BlockReaderIdExt, HeaderProvider, StateProviderFactory, }; diff --git a/crates/net/network/src/transactions/constants.rs b/crates/net/network/src/transactions/constants.rs index 9e37f0786..107d9758b 100644 --- a/crates/net/network/src/transactions/constants.rs +++ b/crates/net/network/src/transactions/constants.rs @@ -153,19 +153,20 @@ pub mod tx_fetcher { /// search is budget constrained. /// /// Default is a sixth of [`DEFAULT_MAX_COUNT_PENDING_FETCH`], which defaults to 12 800 hashes - /// (the breadth of the search), divided by [`DEFAULT_MAX_COUNT_FALLBACK_PEERS`], which - /// defaults to 3 peers (the depth of the search), so the 711 lru hashes in the pending hashes - /// cache. + /// (the ideal max number of hashes pending fetch), divided by + /// [`DEFAULT_MAX_COUNT_FALLBACK_PEERS`], which defaults to 3 peers (the depth of the search), + /// so a search breadth of 711 lru hashes in the pending hashes cache. pub const DEFAULT_BUDGET_FIND_IDLE_FALLBACK_PEER: usize = DEFAULT_MAX_COUNT_PENDING_FETCH / 6 / DEFAULT_MAX_COUNT_FALLBACK_PEERS as usize; /// Default budget for finding hashes in the intersection of transactions announced by a peer /// and in the cache of hashes pending fetch, when said search is budget constrained. /// - /// Default is a sixth of [`DEFAULT_MAX_COUNT_PENDING_FETCH`], which defaults to 12 800 hashes - /// (the breadth of the search), so 2133 lru hashes in the pending hashes cache. + /// Default is an eight of [`DEFAULT_MAX_COUNT_PENDING_FETCH`], which defaults to 12 800 hashes + /// (the ideal max number of hashes pending fetch), so a search breadth of 1 600 lru hashes in + /// the pending hashes cache. pub const DEFAULT_BUDGET_FIND_INTERSECTION_ANNOUNCED_BY_PEER_AND_PENDING_FETCH: usize = - DEFAULT_MAX_COUNT_PENDING_FETCH / 6; + DEFAULT_MAX_COUNT_PENDING_FETCH / 8; /* ====== SCALARS FOR USE ON FETCH PENDING HASHES ====== */ @@ -209,8 +210,8 @@ pub mod tx_fetcher { /// for the intersection of hashes announced by a peer and hashes pending fetch. The max /// inflight requests is configured in [`TransactionFetcherInfo`]. /// - /// Default is 2 requests. - pub const DEFAULT_DIVISOR_MAX_COUNT_INFLIGHT_REQUESTS_ON_FIND_INTERSECTION: usize = 2; + /// Default is 3 requests. + pub const DEFAULT_DIVISOR_MAX_COUNT_INFLIGHT_REQUESTS_ON_FIND_INTERSECTION: usize = 3; // Default divisor to the max pending pool imports when calculating search breadth of the /// search for any idle peer to which to send a request filled with hashes pending fetch. @@ -225,8 +226,8 @@ pub mod tx_fetcher { /// The max pending pool imports is configured in /// [`PendingPoolImportsInfo`](crate::transactions::PendingPoolImportsInfo). /// - /// Default is 3 requests. - pub const DEFAULT_DIVISOR_MAX_COUNT_PENDING_POOL_IMPORTS_ON_FIND_INTERSECTION: usize = 3; + /// Default is 4 requests. + pub const DEFAULT_DIVISOR_MAX_COUNT_PENDING_POOL_IMPORTS_ON_FIND_INTERSECTION: usize = 4; /* ================== ROUGH MEASURES ================== */ diff --git a/crates/net/network/src/transactions/fetcher.rs b/crates/net/network/src/transactions/fetcher.rs index cbec0f1e6..7c60b5497 100644 --- a/crates/net/network/src/transactions/fetcher.rs +++ b/crates/net/network/src/transactions/fetcher.rs @@ -41,7 +41,8 @@ use reth_eth_wire::{ PartiallyValidData, RequestTxHashes, ValidAnnouncementData, }; use reth_interfaces::p2p::error::{RequestError, RequestResult}; -use reth_primitives::{PeerId, PooledTransactionsElement, TxHash}; +use reth_network_types::PeerId; +use reth_primitives::{PooledTransactionsElement, TxHash}; use schnellru::ByLength; #[cfg(debug_assertions)] use smallvec::{smallvec, SmallVec}; @@ -238,7 +239,7 @@ impl TransactionFetcher { /// /// Returns left over hashes. pub fn pack_request( - &mut self, + &self, hashes_to_request: &mut RequestTxHashes, hashes_from_announcement: ValidAnnouncementData, ) -> RequestTxHashes { @@ -259,7 +260,7 @@ impl TransactionFetcher { /// response. If no, it's added to surplus hashes. If yes, it's added to hashes to the request /// and expected response size is accumulated. pub fn pack_request_eth68( - &mut self, + &self, hashes_to_request: &mut RequestTxHashes, hashes_from_announcement: impl HandleMempoolData + IntoIterator)>, @@ -327,7 +328,7 @@ impl TransactionFetcher { /// /// Returns left over hashes. pub fn pack_request_eth66( - &mut self, + &self, hashes_to_request: &mut RequestTxHashes, hashes_from_announcement: ValidAnnouncementData, ) -> RequestTxHashes { @@ -1293,27 +1294,26 @@ pub enum VerificationOutcome { /// Tracks stats about the [`TransactionFetcher`]. #[derive(Debug)] pub struct TransactionFetcherInfo { - /// Currently active outgoing [`GetPooledTransactions`] requests. + /// Max inflight [`GetPooledTransactions`] requests. pub max_inflight_requests: usize, - /// Soft limit for the byte size of the expected - /// [`PooledTransactions`] response on packing a - /// [`GetPooledTransactions`] request with hashes. - pub(super) soft_limit_byte_size_pooled_transactions_response_on_pack_request: usize, - /// Soft limit for the byte size of a [`PooledTransactions`] - /// response on assembling a [`GetPooledTransactions`] - /// request. Spec'd at 2 MiB. + /// Soft limit for the byte size of the expected [`PooledTransactions`] response, upon packing + /// a [`GetPooledTransactions`] request with hashes (by default less than 2 MiB worth of + /// transactions is requested). + pub soft_limit_byte_size_pooled_transactions_response_on_pack_request: usize, + /// Soft limit for the byte size of a [`PooledTransactions`] response, upon assembling the + /// response. Spec'd at 2 MiB, but can be adjusted for research purpose. pub soft_limit_byte_size_pooled_transactions_response: usize, } impl TransactionFetcherInfo { /// Creates a new max pub fn new( - max_inflight_transaction_requests: usize, + max_inflight_requests: usize, soft_limit_byte_size_pooled_transactions_response_on_pack_request: usize, soft_limit_byte_size_pooled_transactions_response: usize, ) -> Self { Self { - max_inflight_requests: max_inflight_transaction_requests, + max_inflight_requests, soft_limit_byte_size_pooled_transactions_response_on_pack_request, soft_limit_byte_size_pooled_transactions_response, } @@ -1323,7 +1323,7 @@ impl TransactionFetcherInfo { impl Default for TransactionFetcherInfo { fn default() -> Self { Self::new( - DEFAULT_MAX_COUNT_INFLIGHT_REQUESTS_ON_FETCH_PENDING_HASHES, + DEFAULT_MAX_COUNT_CONCURRENT_REQUESTS as usize * DEFAULT_MAX_COUNT_CONCURRENT_REQUESTS_PER_PEER as usize, DEFAULT_SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESP_ON_PACK_GET_POOLED_TRANSACTIONS_REQ, SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESPONSE ) diff --git a/crates/net/network/src/transactions/mod.rs b/crates/net/network/src/transactions/mod.rs index 83176c566..ee14e4c82 100644 --- a/crates/net/network/src/transactions/mod.rs +++ b/crates/net/network/src/transactions/mod.rs @@ -26,9 +26,9 @@ use reth_interfaces::{ }; use reth_metrics::common::mpsc::UnboundedMeteredReceiver; use reth_network_api::{Peers, ReputationChangeKind}; +use reth_network_types::PeerId; use reth_primitives::{ - FromRecoveredPooledTransaction, PeerId, PooledTransactionsElement, TransactionSigned, TxHash, - B256, + FromRecoveredPooledTransaction, PooledTransactionsElement, TransactionSigned, TxHash, B256, }; use reth_transaction_pool::{ error::{PoolError, PoolResult}, @@ -945,14 +945,13 @@ where return } + let Some(peer) = self.peers.get_mut(&peer_id) else { return }; let mut transactions = transactions.0; // mark the transactions as received self.transaction_fetcher .remove_hashes_from_transaction_fetcher(transactions.iter().map(|tx| *tx.hash())); - let Some(peer) = self.peers.get_mut(&peer_id) else { return }; - // track that the peer knows these transaction, but only if this is a new broadcast. // If we received the transactions as the response to our `GetPooledTransactions`` // requests (based on received `NewPooledTransactionHashes`) then we already diff --git a/crates/net/network/src/transactions/validation.rs b/crates/net/network/src/transactions/validation.rs index e508b2b24..9171004bd 100644 --- a/crates/net/network/src/transactions/validation.rs +++ b/crates/net/network/src/transactions/validation.rs @@ -21,7 +21,7 @@ pub const SIGNATURE_DECODED_SIZE_BYTES: usize = mem::size_of::(); pub trait ValidateTx68 { /// Validates a [`NewPooledTransactionHashes68`](reth_eth_wire::NewPooledTransactionHashes68) /// entry. Returns [`ValidationOutcome`] which signals to the caller whether to fetch the - /// transaction or wether to drop it, and whether the sender of the announcement should be + /// transaction or to drop it, and whether the sender of the announcement should be /// penalized. fn should_fetch( &self, diff --git a/crates/net/network/tests/it/connect.rs b/crates/net/network/tests/it/connect.rs index 1ff59bf40..7b9c785eb 100644 --- a/crates/net/network/tests/it/connect.rs +++ b/crates/net/network/tests/it/connect.rs @@ -1,7 +1,7 @@ //! Connection tests use alloy_node_bindings::Geth; -use alloy_provider::{admin::AdminApi, ProviderBuilder}; +use alloy_provider::{ext::AdminApi, ProviderBuilder}; use futures::StreamExt; use reth_discv4::Discv4Config; use reth_eth_wire::DisconnectReason; @@ -320,9 +320,8 @@ async fn test_incoming_node_id_blacklist() { let geth = Geth::new().data_dir(temp_dir).disable_discovery().authrpc_port(0).spawn(); let geth_endpoint = SocketAddr::new([127, 0, 0, 1].into(), geth.port()); - let provider = ProviderBuilder::new() - .on_http(format!("http://{geth_endpoint}").parse().unwrap()) - .unwrap(); + let provider = + ProviderBuilder::new().on_http(format!("http://{geth_endpoint}").parse().unwrap()); // get the peer id we should be expecting let enr = provider.node_info().await.unwrap().enr; @@ -375,9 +374,8 @@ async fn test_incoming_connect_with_single_geth() { let temp_dir = tempfile::tempdir().unwrap().into_path(); let geth = Geth::new().data_dir(temp_dir).disable_discovery().authrpc_port(0).spawn(); let geth_endpoint = SocketAddr::new([127, 0, 0, 1].into(), geth.port()); - let provider = ProviderBuilder::new() - .on_http(format!("http://{geth_endpoint}").parse().unwrap()) - .unwrap(); + let provider = + ProviderBuilder::new().on_http(format!("http://{geth_endpoint}").parse().unwrap()); // get the peer id we should be expecting let enr = provider.node_info().await.unwrap().enr; @@ -438,9 +436,8 @@ async fn test_outgoing_connect_with_single_geth() { let geth_socket = SocketAddr::new([127, 0, 0, 1].into(), geth_p2p_port); let geth_endpoint = SocketAddr::new([127, 0, 0, 1].into(), geth.port()).to_string(); - let provider = ProviderBuilder::new() - .on_http(format!("http://{geth_endpoint}").parse().unwrap()) - .unwrap(); + let provider = + ProviderBuilder::new().on_http(format!("http://{geth_endpoint}").parse().unwrap()); // get the peer id we should be expecting let enr = provider.node_info().await.unwrap().enr; @@ -485,9 +482,8 @@ async fn test_geth_disconnect() { let geth_socket = SocketAddr::new([127, 0, 0, 1].into(), geth_p2p_port); let geth_endpoint = SocketAddr::new([127, 0, 0, 1].into(), geth.port()).to_string(); - let provider = ProviderBuilder::new() - .on_http(format!("http://{geth_endpoint}").parse().unwrap()) - .unwrap(); + let provider = + ProviderBuilder::new().on_http(format!("http://{geth_endpoint}").parse().unwrap()); // get the peer id we should be expecting let enr = provider.node_info().await.unwrap().enr; diff --git a/crates/net/network/tests/it/main.rs b/crates/net/network/tests/it/main.rs index 2bed287d6..1b4494abd 100644 --- a/crates/net/network/tests/it/main.rs +++ b/crates/net/network/tests/it/main.rs @@ -4,7 +4,6 @@ mod multiplex; mod requests; mod session; mod startup; -#[cfg(not(feature = "optimism"))] mod txgossip; fn main() {} diff --git a/crates/net/network/tests/it/multiplex.rs b/crates/net/network/tests/it/multiplex.rs index aac55a982..650b75423 100644 --- a/crates/net/network/tests/it/multiplex.rs +++ b/crates/net/network/tests/it/multiplex.rs @@ -22,7 +22,7 @@ use std::{ use tokio::sync::{mpsc, oneshot}; use tokio_stream::wrappers::UnboundedReceiverStream; -/// A simple Rplx subprotocol for +/// A simple Rlpx subprotocol that sends pings and pongs mod proto { use super::*; use reth_eth_wire::capability::Capability; diff --git a/crates/net/network/tests/it/requests.rs b/crates/net/network/tests/it/requests.rs index decc9ee25..4e36f191c 100644 --- a/crates/net/network/tests/it/requests.rs +++ b/crates/net/network/tests/it/requests.rs @@ -12,8 +12,8 @@ use reth_network::{ }; use reth_network_api::{NetworkInfo, Peers}; use reth_primitives::{ - Block, BlockBody, Bytes, Header, HeadersDirection, Signature, Transaction, TransactionKind, - TransactionSigned, TxEip2930, U256, + Block, BlockBody, Bytes, Header, HeadersDirection, Signature, Transaction, TransactionSigned, + TxEip2930, TxKind, U256, }; use reth_provider::test_utils::MockEthProvider; use std::sync::Arc; @@ -25,7 +25,7 @@ pub fn rng_transaction(rng: &mut impl rand::RngCore) -> TransactionSigned { nonce: rng.gen(), gas_price: rng.gen(), gas_limit: rng.gen(), - to: TransactionKind::Create, + to: TxKind::Create, value: U256::from(rng.gen::()), input: Bytes::from(vec![1, 2]), access_list: Default::default(), diff --git a/crates/net/types/Cargo.toml b/crates/net/types/Cargo.toml new file mode 100644 index 000000000..9be9a2f3a --- /dev/null +++ b/crates/net/types/Cargo.toml @@ -0,0 +1,33 @@ +[package] +name = "reth-network-types" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true +description = "Network types and utils" + +[lints] +workspace = true + +[dependencies] + +# eth +alloy-primitives = { workspace = true, features = ["rlp"] } +alloy-rlp = { workspace = true, features = ["derive"] } +enr.workspace = true + +# crypto +secp256k1.workspace = true + +# misc +serde_with.workspace = true +thiserror.workspace = true +url.workspace = true + +[dev-dependencies] +alloy-primitives = { workspace = true, features = ["rand"] } +rand.workspace = true +secp256k1 = { workspace = true, features = ["rand"] } +serde_json.workspace = true diff --git a/crates/primitives/src/peer.rs b/crates/net/types/src/lib.rs similarity index 90% rename from crates/primitives/src/peer.rs rename to crates/net/types/src/lib.rs index f66361f39..e4b9f28a4 100644 --- a/crates/primitives/src/peer.rs +++ b/crates/net/types/src/lib.rs @@ -1,10 +1,28 @@ -use enr::Enr; -use reth_rpc_types::NodeRecord; +//! Network Types and Utilities. +//! +//! This crate manages and converts Ethereum network entities such as node records, peer IDs, and +//! Ethereum Node Records (ENRs) + +#![doc( + html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", + html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", + issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" +)] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] +#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] + +use alloy_primitives::B512; use secp256k1::{constants::UNCOMPRESSED_PUBLIC_KEY_SIZE, PublicKey, SecretKey}; use std::{net::IpAddr, str::FromStr}; // Re-export PeerId for ease of use. -pub use reth_rpc_types::PeerId; +pub use enr::Enr; + +/// Alias for a peer identifier +pub type PeerId = B512; + +pub mod node_record; +pub use node_record::{NodeRecord, NodeRecordParseError}; /// This tag should be set to indicate to libsecp256k1 that the following bytes denote an /// uncompressed pubkey. @@ -114,7 +132,7 @@ impl std::fmt::Display for AnyNode { AnyNode::NodeRecord(record) => write!(f, "{record}"), AnyNode::Enr(enr) => write!(f, "{enr}"), AnyNode::PeerId(peer_id) => { - write!(f, "enode://{}", crate::hex::encode(peer_id.as_slice())) + write!(f, "enode://{}", alloy_primitives::hex::encode(peer_id.as_slice())) } } } diff --git a/crates/net/types/src/node_record.rs b/crates/net/types/src/node_record.rs new file mode 100644 index 000000000..5a6706201 --- /dev/null +++ b/crates/net/types/src/node_record.rs @@ -0,0 +1,362 @@ +//! Commonly used NodeRecord type for peers. + +use std::{ + fmt, + fmt::Write, + net::{IpAddr, Ipv4Addr, SocketAddr}, + num::ParseIntError, + str::FromStr, +}; + +use crate::{pk2id, PeerId}; +use alloy_rlp::{RlpDecodable, RlpEncodable}; +use enr::Enr; +use secp256k1::{SecretKey, SECP256K1}; +use serde_with::{DeserializeFromStr, SerializeDisplay}; + +/// Represents a ENR in discovery. +/// +/// Note: this is only an excerpt of the [`NodeRecord`] data structure. +#[derive( + Clone, + Copy, + Debug, + Eq, + PartialEq, + Hash, + SerializeDisplay, + DeserializeFromStr, + RlpEncodable, + RlpDecodable, +)] +pub struct NodeRecord { + /// The Address of a node. + pub address: IpAddr, + /// TCP port of the port that accepts connections. + pub tcp_port: u16, + /// UDP discovery port. + pub udp_port: u16, + /// Public key of the discovery service + pub id: PeerId, +} + +impl NodeRecord { + /// Derive the [`NodeRecord`] from the secret key and addr + pub fn from_secret_key(addr: SocketAddr, sk: &SecretKey) -> Self { + let pk = secp256k1::PublicKey::from_secret_key(SECP256K1, sk); + let id = PeerId::from_slice(&pk.serialize_uncompressed()[1..]); + Self::new(addr, id) + } + + /// Converts the `address` into an [`Ipv4Addr`] if the `address` is a mapped + /// [Ipv6Addr](std::net::Ipv6Addr). + /// + /// Returns `true` if the address was converted. + /// + /// See also [std::net::Ipv6Addr::to_ipv4_mapped] + pub fn convert_ipv4_mapped(&mut self) -> bool { + // convert IPv4 mapped IPv6 address + if let IpAddr::V6(v6) = self.address { + if let Some(v4) = v6.to_ipv4_mapped() { + self.address = v4.into(); + return true + } + } + false + } + + /// Same as [Self::convert_ipv4_mapped] but consumes the type + pub fn into_ipv4_mapped(mut self) -> Self { + self.convert_ipv4_mapped(); + self + } + + /// Creates a new record from a socket addr and peer id. + #[allow(dead_code)] + pub fn new(addr: SocketAddr, id: PeerId) -> Self { + Self { address: addr.ip(), tcp_port: addr.port(), udp_port: addr.port(), id } + } + + /// The TCP socket address of this node + #[must_use] + pub fn tcp_addr(&self) -> SocketAddr { + SocketAddr::new(self.address, self.tcp_port) + } + + /// The UDP socket address of this node + #[must_use] + pub fn udp_addr(&self) -> SocketAddr { + SocketAddr::new(self.address, self.udp_port) + } +} + +impl fmt::Display for NodeRecord { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str("enode://")?; + alloy_primitives::hex::encode(self.id.as_slice()).fmt(f)?; + f.write_char('@')?; + match self.address { + IpAddr::V4(ip) => { + ip.fmt(f)?; + } + IpAddr::V6(ip) => { + // encapsulate with brackets + f.write_char('[')?; + ip.fmt(f)?; + f.write_char(']')?; + } + } + f.write_char(':')?; + self.tcp_port.fmt(f)?; + if self.tcp_port != self.udp_port { + f.write_str("?discport=")?; + self.udp_port.fmt(f)?; + } + + Ok(()) + } +} + +/// Possible error types when parsing a [`NodeRecord`] +#[derive(Debug, thiserror::Error)] +pub enum NodeRecordParseError { + /// Invalid url + #[error("Failed to parse url: {0}")] + InvalidUrl(String), + /// Invalid id + #[error("Failed to parse id")] + InvalidId(String), + /// Invalid discport + #[error("Failed to discport query: {0}")] + Discport(ParseIntError), +} + +impl FromStr for NodeRecord { + type Err = NodeRecordParseError; + + fn from_str(s: &str) -> Result { + use url::{Host, Url}; + + let url = Url::parse(s).map_err(|e| NodeRecordParseError::InvalidUrl(e.to_string()))?; + + let address = match url.host() { + Some(Host::Ipv4(ip)) => IpAddr::V4(ip), + Some(Host::Ipv6(ip)) => IpAddr::V6(ip), + Some(Host::Domain(ip)) => IpAddr::V4( + Ipv4Addr::from_str(ip) + .map_err(|e| NodeRecordParseError::InvalidUrl(e.to_string()))?, + ), + _ => return Err(NodeRecordParseError::InvalidUrl(format!("invalid host: {url:?}"))), + }; + let port = url + .port() + .ok_or_else(|| NodeRecordParseError::InvalidUrl("no port specified".to_string()))?; + + let udp_port = if let Some(discovery_port) = url + .query_pairs() + .find_map(|(maybe_disc, port)| (maybe_disc.as_ref() == "discport").then_some(port)) + { + discovery_port.parse::().map_err(NodeRecordParseError::Discport)? + } else { + port + }; + + let id = url + .username() + .parse::() + .map_err(|e| NodeRecordParseError::InvalidId(e.to_string()))?; + + Ok(Self { address, id, tcp_port: port, udp_port }) + } +} + +impl TryFrom<&Enr> for NodeRecord { + type Error = NodeRecordParseError; + + fn try_from(enr: &Enr) -> Result { + let Some(address) = enr.ip4().map(IpAddr::from).or_else(|| enr.ip6().map(IpAddr::from)) + else { + return Err(NodeRecordParseError::InvalidUrl("ip missing".to_string())) + }; + + let Some(udp_port) = enr.udp4().or_else(|| enr.udp6()) else { + return Err(NodeRecordParseError::InvalidUrl("udp port missing".to_string())) + }; + + let Some(tcp_port) = enr.tcp4().or_else(|| enr.tcp6()) else { + return Err(NodeRecordParseError::InvalidUrl("tcp port missing".to_string())) + }; + + let id = pk2id(&enr.public_key()); + + Ok(NodeRecord { address, tcp_port, udp_port, id }.into_ipv4_mapped()) + } +} + +#[cfg(test)] +mod tests { + use std::net::Ipv6Addr; + + use alloy_rlp::Decodable; + use rand::{thread_rng, Rng, RngCore}; + + use super::*; + + #[test] + fn test_mapped_ipv6() { + let mut rng = thread_rng(); + + let v4: Ipv4Addr = "0.0.0.0".parse().unwrap(); + let v6 = v4.to_ipv6_mapped(); + + let record = NodeRecord { + address: v6.into(), + tcp_port: rng.gen(), + udp_port: rng.gen(), + id: rng.gen(), + }; + + assert!(record.clone().convert_ipv4_mapped()); + assert_eq!(record.into_ipv4_mapped().address, IpAddr::from(v4)); + } + + #[test] + fn test_mapped_ipv4() { + let mut rng = thread_rng(); + let v4: Ipv4Addr = "0.0.0.0".parse().unwrap(); + + let record = NodeRecord { + address: v4.into(), + tcp_port: rng.gen(), + udp_port: rng.gen(), + id: rng.gen(), + }; + + assert!(!record.clone().convert_ipv4_mapped()); + assert_eq!(record.into_ipv4_mapped().address, IpAddr::from(v4)); + } + + #[test] + fn test_noderecord_codec_ipv4() { + let mut rng = thread_rng(); + for _ in 0..100 { + let mut ip = [0u8; 4]; + rng.fill_bytes(&mut ip); + let record = NodeRecord { + address: IpAddr::V4(ip.into()), + tcp_port: rng.gen(), + udp_port: rng.gen(), + id: rng.gen(), + }; + + let decoded = NodeRecord::decode(&mut alloy_rlp::encode(record).as_slice()).unwrap(); + assert_eq!(record, decoded); + } + } + + #[test] + fn test_noderecord_codec_ipv6() { + let mut rng = thread_rng(); + for _ in 0..100 { + let mut ip = [0u8; 16]; + rng.fill_bytes(&mut ip); + let record = NodeRecord { + address: IpAddr::V6(ip.into()), + tcp_port: rng.gen(), + udp_port: rng.gen(), + id: rng.gen(), + }; + + let decoded = NodeRecord::decode(&mut alloy_rlp::encode(record).as_slice()).unwrap(); + assert_eq!(record, decoded); + } + } + + #[test] + fn test_url_parse() { + let url = "enode://6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0@10.3.58.6:30303?discport=30301"; + let node: NodeRecord = url.parse().unwrap(); + assert_eq!(node, NodeRecord { + address: IpAddr::V4([10,3,58,6].into()), + tcp_port: 30303, + udp_port: 30301, + id: "6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0".parse().unwrap(), + }) + } + + #[test] + fn test_node_display() { + let url = "enode://6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0@10.3.58.6:30303"; + let node: NodeRecord = url.parse().unwrap(); + assert_eq!(url, &format!("{node}")); + } + + #[test] + fn test_node_display_discport() { + let url = "enode://6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0@10.3.58.6:30303?discport=30301"; + let node: NodeRecord = url.parse().unwrap(); + assert_eq!(url, &format!("{node}")); + } + + #[test] + fn test_node_serialize() { + let cases = vec![ + // IPv4 + ( + NodeRecord { + address: IpAddr::V4([10, 3, 58, 6].into()), + tcp_port: 30303u16, + udp_port: 30301u16, + id: PeerId::from_str("6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0").unwrap(), + }, + "\"enode://6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0@10.3.58.6:30303?discport=30301\"" + ), + // IPv6 + ( + NodeRecord { + address: Ipv6Addr::new(0x2001, 0xdb8, 0x3c4d, 0x15, 0x0, 0x0, 0xabcd, 0xef12).into(), + tcp_port: 52150u16, + udp_port: 52151u16, + id: PeerId::from_str("1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439").unwrap(), + }, + "\"enode://1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439@[2001:db8:3c4d:15::abcd:ef12]:52150?discport=52151\"", + ) + ]; + + for (node, expected) in cases { + let ser = serde_json::to_string::(&node).expect("couldn't serialize"); + assert_eq!(ser, expected); + } + } + + #[test] + fn test_node_deserialize() { + let cases = vec![ + // IPv4 + ( + "\"enode://6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0@10.3.58.6:30303?discport=30301\"", + NodeRecord { + address: IpAddr::V4([10, 3, 58, 6].into()), + tcp_port: 30303u16, + udp_port: 30301u16, + id: PeerId::from_str("6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0").unwrap(), + } + ), + // IPv6 + ( + "\"enode://1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439@[2001:db8:3c4d:15::abcd:ef12]:52150?discport=52151\"", + NodeRecord { + address: Ipv6Addr::new(0x2001, 0xdb8, 0x3c4d, 0x15, 0x0, 0x0, 0xabcd, 0xef12).into(), + tcp_port: 52150u16, + udp_port: 52151u16, + id: PeerId::from_str("1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439").unwrap(), + } + ), + ]; + + for (url, expected) in cases { + let node: NodeRecord = serde_json::from_str(url).expect("couldn't deserialize"); + assert_eq!(node, expected); + } + } +} diff --git a/crates/node-builder/src/builder.rs b/crates/node-builder/src/builder.rs deleted file mode 100644 index 49be32b33..000000000 --- a/crates/node-builder/src/builder.rs +++ /dev/null @@ -1,1433 +0,0 @@ -//! Customizable node builder. - -#![allow(clippy::type_complexity, missing_debug_implementations)] - -use crate::{ - components::{ComponentsBuilder, NodeComponents, NodeComponentsBuilder, PoolBuilder}, - exex::BoxedLaunchExEx, - hooks::NodeHooks, - node::FullNode, - rpc::{RethRpcServerHandles, RpcContext, RpcHooks}, - Node, NodeHandle, -}; -use eyre::Context; -use futures::{future, future::Either, stream, stream_select, Future, StreamExt}; -use rayon::ThreadPoolBuilder; -use reth_auto_seal_consensus::{AutoSealConsensus, MiningMode}; -use reth_beacon_consensus::{ - hooks::{EngineHooks, PruneHook, StaticFileHook}, - BeaconConsensus, BeaconConsensusEngine, -}; -use reth_blockchain_tree::{ - BlockchainTree, BlockchainTreeConfig, ShareableBlockchainTree, TreeExternals, -}; -use reth_config::config::EtlConfig; -use reth_db::{ - database::Database, - database_metrics::{DatabaseMetadata, DatabaseMetrics}, - test_utils::{create_test_rw_db, TempDatabase}, - DatabaseEnv, -}; -use reth_exex::{ExExContext, ExExHandle, ExExManager, ExExManagerHandle}; -use reth_interfaces::{consensus::Consensus, p2p::either::EitherDownloader}; -use reth_network::{NetworkBuilder, NetworkConfig, NetworkEvents, NetworkHandle}; -use reth_node_api::{ - FullNodeComponents, FullNodeComponentsAdapter, FullNodeTypes, FullNodeTypesAdapter, NodeTypes, -}; -use reth_node_core::{ - cli::config::{PayloadBuilderConfig, RethRpcConfig, RethTransactionPoolConfig}, - dirs::{ChainPath, DataDirPath, MaybePlatformPath}, - engine_api_store::EngineApiStore, - engine_skip_fcu::EngineApiSkipFcu, - exit::NodeExitFuture, - init::init_genesis, - node_config::NodeConfig, - primitives::{kzg::KzgSettings, Head}, - utils::write_peers_to_file, -}; -use reth_node_events::{cl::ConsensusLayerHealthEvents, node}; -use reth_primitives::{constants::eip4844::MAINNET_KZG_TRUSTED_SETUP, format_ether, ChainSpec}; -use reth_provider::{ - providers::BlockchainProvider, CanonStateSubscriptions, ChainSpecProvider, ProviderFactory, -}; -use reth_prune::PrunerBuilder; -use reth_revm::EvmProcessorFactory; -use reth_rpc_engine_api::EngineApi; -use reth_static_file::StaticFileProducer; -use reth_tasks::TaskExecutor; -use reth_tracing::tracing::{debug, error, info}; -use reth_transaction_pool::{PoolConfig, TransactionPool}; -use std::{cmp::max, str::FromStr, sync::Arc, thread::available_parallelism}; -use tokio::sync::{mpsc::unbounded_channel, oneshot}; - -/// The builtin provider type of the reth node. -// Note: we need to hardcode this because custom components might depend on it in associated types. -type RethFullProviderType = - BlockchainProvider>>; - -type RethFullAdapter = - FullNodeTypesAdapter::Evm>>; - -#[cfg_attr(doc, aquamarine::aquamarine)] -/// Declaratively construct a node. -/// -/// [`NodeBuilder`] provides a [builder-like interface][builder] for composing -/// components of a node. -/// -/// ## Order -/// -/// Configuring a node starts out with a [`NodeConfig`] (this can be obtained from cli arguments for -/// example) and then proceeds to configure the core static types of the node: [NodeTypes], these -/// include the node's primitive types and the node's engine types. -/// -/// Next all stateful components of the node are configured, these include the -/// [ConfigureEvm](reth_node_api::evm::ConfigureEvm), the database [Database] and all the -/// components of the node that are downstream of those types, these include: -/// -/// - The transaction pool: [PoolBuilder] -/// - The network: [NetworkBuilder](crate::components::NetworkBuilder) -/// - The payload builder: [PayloadBuilder](crate::components::PayloadServiceBuilder) -/// -/// Once all the components are configured, the node is ready to be launched. -/// -/// On launch the builder returns a fully type aware [NodeHandle] that has access to all the -/// configured components and can interact with the node. -/// -/// There are convenience functions for networks that come with a preset of types and components via -/// the [Node] trait, see `reth_node_ethereum::EthereumNode` or `reth_node_optimism::OptimismNode`. -/// -/// The [NodeBuilder::node] function configures the node's types and components in one step. -/// -/// ## Components -/// -/// All components are configured with a [NodeComponentsBuilder] that is responsible for actually -/// creating the node components during the launch process. The [ComponentsBuilder] is a general -/// purpose implementation of the [NodeComponentsBuilder] trait that can be used to configure the -/// network, transaction pool and payload builder of the node. It enforces the correct order of -/// configuration, for example the network and the payload builder depend on the transaction pool -/// type that is configured first. -/// -/// All builder traits are generic over the node types and are invoked with the [BuilderContext] -/// that gives access to internals of the that are needed to configure the components. This include -/// the original config, chain spec, the database provider and the task executor, -/// -/// ## Hooks -/// -/// Once all the components are configured, the builder can be used to set hooks that are run at -/// specific points in the node's lifecycle. This way custom services can be spawned before the node -/// is launched [NodeBuilder::on_component_initialized], or once the rpc server(s) are launched -/// [NodeBuilder::on_rpc_started]. The [NodeBuilder::extend_rpc_modules] can be used to inject -/// custom rpc modules into the rpc server before it is launched. See also [RpcContext] -/// All hooks accept a closure that is then invoked at the appropriate time in the node's launch -/// process. -/// -/// ## Flow -/// -/// The [NodeBuilder] is intended to sit behind a CLI that provides the necessary [NodeConfig] -/// input: [NodeBuilder::new] -/// -/// From there the builder is configured with the node's types, components, and hooks, then launched -/// with the [NodeBuilder::launch] method. On launch all the builtin internals, such as the -/// `Database` and its providers [BlockchainProvider] are initialized before the configured -/// [NodeComponentsBuilder] is invoked with the [BuilderContext] to create the transaction pool, -/// network, and payload builder components. When the RPC is configured, the corresponding hooks are -/// invoked to allow for custom rpc modules to be injected into the rpc server: -/// [NodeBuilder::extend_rpc_modules] -/// -/// Finally all components are created and all services are launched and a [NodeHandle] is returned -/// that can be used to interact with the node: [FullNode] -/// -/// The following diagram shows the flow of the node builder from CLI to a launched node. -/// -/// include_mmd!("docs/mermaid/builder.mmd") -/// -/// ## Internals -/// -/// The node builder is fully type safe, it uses the [NodeTypes] trait to enforce that all -/// components are configured with the correct types. However the database types and with that the -/// provider trait implementations are currently created by the builder itself during the launch -/// process, hence the database type is not part of the [NodeTypes] trait and the node's components, -/// that depend on the database, are configured separately. In order to have a nice trait that -/// encapsulates the entire node the [FullNodeComponents] trait was introduced. This trait has -/// convenient associated types for all the components of the node. After [NodeBuilder::launch] the -/// [NodeHandle] contains an instance of [FullNode] that implements the [FullNodeComponents] trait -/// and has access to all the components of the node. Internally the node builder uses several -/// generic adapter types that are then map to traits with associated types for ease of use. -/// -/// ### Limitations -/// -/// Currently the launch process is limited to ethereum nodes and requires all the components -/// specified above. It also expect beacon consensus with the ethereum engine API that is configured -/// by the builder itself during launch. This might change in the future. -/// -/// [builder]: https://doc.rust-lang.org/1.0.0/style/ownership/builders.html -pub struct NodeBuilder { - /// All settings for how the node should be configured. - config: NodeConfig, - /// State of the node builder process. - state: State, - /// The configured database for the node. - database: DB, -} - -impl NodeBuilder { - /// Returns a reference to the node builder's config. - pub fn config(&self) -> &NodeConfig { - &self.config - } - - /// Loads the reth config with the given datadir root - fn load_config(&self, data_dir: &ChainPath) -> eyre::Result { - let config_path = self.config.config.clone().unwrap_or_else(|| data_dir.config_path()); - - let mut config = confy::load_path::(&config_path) - .wrap_err_with(|| format!("Could not load config file {config_path:?}"))?; - - info!(target: "reth::cli", path = ?config_path, "Configuration loaded"); - - // Update the config with the command line arguments - config.peers.trusted_nodes_only = self.config.network.trusted_only; - - if !self.config.network.trusted_peers.is_empty() { - info!(target: "reth::cli", "Adding trusted nodes"); - self.config.network.trusted_peers.iter().for_each(|peer| { - config.peers.trusted_nodes.insert(*peer); - }); - } - - Ok(config) - } -} - -impl NodeBuilder<(), InitState> { - /// Create a new [`NodeBuilder`]. - pub fn new(config: NodeConfig) -> Self { - Self { config, database: (), state: InitState::default() } - } -} - -impl NodeBuilder { - /// Configures the underlying database that the node will use. - pub fn with_database(self, database: D) -> NodeBuilder { - NodeBuilder { config: self.config, state: self.state, database } - } - - /// Preconfigure the builder with the context to launch the node. - /// - /// This provides the task executor and the data directory for the node. - pub fn with_launch_context( - self, - task_executor: TaskExecutor, - data_dir: ChainPath, - ) -> WithLaunchContext { - WithLaunchContext { builder: self, task_executor, data_dir } - } - - /// Creates an _ephemeral_ preconfigured node for testing purposes. - pub fn testing_node( - self, - task_executor: TaskExecutor, - ) -> WithLaunchContext>, InitState> { - let db = create_test_rw_db(); - let db_path_str = db.path().to_str().expect("Path is not valid unicode"); - let path = - MaybePlatformPath::::from_str(db_path_str).expect("Path is not valid"); - let data_dir = path.unwrap_or_chain_default(self.config.chain.chain); - - WithLaunchContext { builder: self.with_database(db), task_executor, data_dir } - } -} - -impl NodeBuilder -where - DB: Database + Unpin + Clone + 'static, -{ - /// Configures the types of the node. - pub fn with_types(self, types: T) -> NodeBuilder> - where - T: NodeTypes, - { - NodeBuilder { - config: self.config, - state: TypesState { adapter: FullNodeTypesAdapter::new(types) }, - database: self.database, - } - } - - /// Preconfigures the node with a specific node implementation. - /// - /// This is a convenience method that sets the node's types and components in one call. - pub fn node( - self, - node: N, - ) -> NodeBuilder< - DB, - ComponentsState< - N, - ComponentsBuilder< - RethFullAdapter, - N::PoolBuilder, - N::PayloadBuilder, - N::NetworkBuilder, - >, - FullNodeComponentsAdapter< - RethFullAdapter, - >>::Pool, - >, - >, - > - where - N: Node::Evm>>>, - N::PoolBuilder: PoolBuilder>, - N::NetworkBuilder: crate::components::NetworkBuilder< - RethFullAdapter, - >>::Pool, - >, - N::PayloadBuilder: crate::components::PayloadServiceBuilder< - RethFullAdapter, - >>::Pool, - >, - { - self.with_types(node.clone()).with_components(node.components()) - } -} - -impl NodeBuilder> -where - Types: NodeTypes, - DB: Database + Clone + Unpin + 'static, -{ - /// Configures the node's components. - pub fn with_components( - self, - components_builder: Components, - ) -> NodeBuilder< - DB, - ComponentsState< - Types, - Components, - FullNodeComponentsAdapter< - FullNodeTypesAdapter>, - Components::Pool, - >, - >, - > - where - Components: NodeComponentsBuilder< - FullNodeTypesAdapter>, - >, - { - NodeBuilder { - config: self.config, - database: self.database, - state: ComponentsState { - types: self.state.adapter.types, - components_builder, - hooks: NodeHooks::new(), - rpc: RpcHooks::new(), - exexs: Vec::new(), - }, - } - } -} - -impl - NodeBuilder< - DB, - ComponentsState< - Types, - Components, - FullNodeComponentsAdapter< - FullNodeTypesAdapter>, - Components::Pool, - >, - >, - > -where - DB: Database + DatabaseMetrics + DatabaseMetadata + Clone + Unpin + 'static, - Types: NodeTypes, - Components: NodeComponentsBuilder< - FullNodeTypesAdapter>, - >, -{ - /// Apply a function to the components builder. - pub fn map_components(self, f: impl FnOnce(Components) -> Components) -> Self { - Self { - config: self.config, - database: self.database, - state: ComponentsState { - types: self.state.types, - components_builder: f(self.state.components_builder), - hooks: self.state.hooks, - rpc: self.state.rpc, - exexs: self.state.exexs, - }, - } - } - - /// Sets the hook that is run once the node's components are initialized. - pub fn on_component_initialized(mut self, hook: F) -> Self - where - F: Fn( - FullNodeComponentsAdapter< - FullNodeTypesAdapter>, - Components::Pool, - >, - ) -> eyre::Result<()> - + Send - + 'static, - { - self.state.hooks.set_on_component_initialized(hook); - self - } - - /// Sets the hook that is run once the node has started. - pub fn on_node_started(mut self, hook: F) -> Self - where - F: Fn( - FullNode< - FullNodeComponentsAdapter< - FullNodeTypesAdapter>, - Components::Pool, - >, - >, - ) -> eyre::Result<()> - + Send - + 'static, - { - self.state.hooks.set_on_node_started(hook); - self - } - - /// Sets the hook that is run once the rpc server is started. - pub fn on_rpc_started(mut self, hook: F) -> Self - where - F: Fn( - RpcContext< - '_, - FullNodeComponentsAdapter< - FullNodeTypesAdapter>, - Components::Pool, - >, - >, - RethRpcServerHandles, - ) -> eyre::Result<()> - + Send - + 'static, - { - self.state.rpc.set_on_rpc_started(hook); - self - } - - /// Sets the hook that is run to configure the rpc modules. - pub fn extend_rpc_modules(mut self, hook: F) -> Self - where - F: Fn( - RpcContext< - '_, - FullNodeComponentsAdapter< - FullNodeTypesAdapter>, - Components::Pool, - >, - >, - ) -> eyre::Result<()> - + Send - + 'static, - { - self.state.rpc.set_extend_rpc_modules(hook); - self - } - - /// Installs an ExEx (Execution Extension) in the node. - /// - /// # Note - /// - /// The ExEx ID must be unique. - pub fn install_exex(mut self, exex_id: impl Into, exex: F) -> Self - where - F: Fn( - ExExContext< - FullNodeComponentsAdapter< - FullNodeTypesAdapter>, - Components::Pool, - >, - >, - ) -> R - + Send - + 'static, - R: Future> + Send, - E: Future> + Send, - { - self.state.exexs.push((exex_id.into(), Box::new(exex))); - self - } - - /// Launches the node and returns a handle to it. - /// - /// This bootstraps the node internals, creates all the components with the provider - /// [NodeComponentsBuilder] and launches the node. - /// - /// Returns a [NodeHandle] that can be used to interact with the node. - pub async fn launch( - self, - executor: TaskExecutor, - data_dir: ChainPath, - ) -> eyre::Result< - NodeHandle< - FullNodeComponentsAdapter< - FullNodeTypesAdapter>, - Components::Pool, - >, - >, - > { - // get config from file - let reth_config = self.load_config(&data_dir)?; - - let Self { - config, - state: ComponentsState { types, components_builder, hooks, rpc, exexs: _ }, - database, - } = self; - - // Raise the fd limit of the process. - // Does not do anything on windows. - fdlimit::raise_fd_limit()?; - - // Limit the global rayon thread pool, reserving 2 cores for the rest of the system - let _ = ThreadPoolBuilder::new() - .num_threads( - available_parallelism().map_or(25, |cpus| max(cpus.get().saturating_sub(2), 2)), - ) - .build_global() - .map_err(|e| error!("Failed to build global thread pool: {:?}", e)); - - let provider_factory = ProviderFactory::new( - database.clone(), - Arc::clone(&config.chain), - data_dir.static_files_path(), - )? - .with_static_files_metrics(); - info!(target: "reth::cli", "Database opened"); - - let prometheus_handle = config.install_prometheus_recorder()?; - config - .start_metrics_endpoint( - prometheus_handle, - database.clone(), - provider_factory.static_file_provider(), - executor.clone(), - ) - .await?; - - debug!(target: "reth::cli", chain=%config.chain.chain, genesis=?config.chain.genesis_hash(), "Initializing genesis"); - - let genesis_hash = init_genesis(provider_factory.clone())?; - - info!(target: "reth::cli", "\n{}", config.chain.display_hardforks()); - - // setup the consensus instance - let consensus: Arc = if config.dev.dev { - Arc::new(AutoSealConsensus::new(Arc::clone(&config.chain))) - } else { - Arc::new(BeaconConsensus::new(Arc::clone(&config.chain))) - }; - - debug!(target: "reth::cli", "Spawning stages metrics listener task"); - let (sync_metrics_tx, sync_metrics_rx) = unbounded_channel(); - let sync_metrics_listener = reth_stages::MetricsListener::new(sync_metrics_rx); - executor.spawn_critical("stages metrics listener task", sync_metrics_listener); - - let prune_config = config.prune_config()?.or_else(|| reth_config.prune.clone()); - - // Configure the blockchain tree for the node - let evm_config = types.evm_config(); - let tree_config = BlockchainTreeConfig::default(); - let tree_externals = TreeExternals::new( - provider_factory.clone(), - consensus.clone(), - EvmProcessorFactory::new(config.chain.clone(), evm_config.clone()), - ); - let tree = BlockchainTree::new( - tree_externals, - tree_config, - prune_config.as_ref().map(|config| config.segments.clone()), - )? - .with_sync_metrics_tx(sync_metrics_tx.clone()); - - let canon_state_notification_sender = tree.canon_state_notification_sender(); - let blockchain_tree = ShareableBlockchainTree::new(tree); - debug!(target: "reth::cli", "configured blockchain tree"); - - // fetch the head block from the database - let head = - config.lookup_head(provider_factory.clone()).wrap_err("the head block is missing")?; - - // setup the blockchain provider - let blockchain_db = - BlockchainProvider::new(provider_factory.clone(), blockchain_tree.clone())?; - - let ctx = BuilderContext::new( - head, - blockchain_db, - executor, - data_dir, - config, - reth_config, - evm_config.clone(), - ); - - debug!(target: "reth::cli", "creating components"); - let NodeComponents { transaction_pool, network, payload_builder } = - components_builder.build_components(&ctx).await?; - - let BuilderContext { - provider: blockchain_db, - executor, - data_dir, - mut config, - mut reth_config, - .. - } = ctx; - - let NodeHooks { on_component_initialized, on_node_started, .. } = hooks; - - let node_components = FullNodeComponentsAdapter { - evm_config: evm_config.clone(), - pool: transaction_pool.clone(), - network: network.clone(), - provider: blockchain_db.clone(), - payload_builder: payload_builder.clone(), - executor: executor.clone(), - }; - debug!(target: "reth::cli", "calling on_component_initialized hook"); - on_component_initialized.on_event(node_components.clone())?; - - // spawn exexs - let mut exex_handles = Vec::with_capacity(self.state.exexs.len()); - let mut exexs = Vec::with_capacity(self.state.exexs.len()); - for (id, exex) in self.state.exexs { - // create a new exex handle - let (handle, events, notifications) = ExExHandle::new(id.clone()); - exex_handles.push(handle); - - // create the launch context for the exex - let context = ExExContext { - head, - provider: blockchain_db.clone(), - task_executor: executor.clone(), - data_dir: data_dir.clone(), - config: config.clone(), - reth_config: reth_config.clone(), - pool: transaction_pool.clone(), - events, - notifications, - }; - - let executor = executor.clone(); - exexs.push(async move { - debug!(target: "reth::cli", id, "spawning exex"); - let span = reth_tracing::tracing::info_span!("exex", id); - let _enter = span.enter(); - - // init the exex - let exex = exex.launch(context).await.unwrap(); - - // spawn it as a crit task - executor.spawn_critical("exex", async move { - info!(target: "reth::cli", id, "ExEx started"); - match exex.await { - Ok(_) => panic!("ExEx {id} finished. ExEx's should run indefinitely"), - Err(err) => panic!("ExEx {id} crashed: {err}"), - } - }); - }); - } - - future::join_all(exexs).await; - - // spawn exex manager - let exex_manager_handle = if !exex_handles.is_empty() { - debug!(target: "reth::cli", "spawning exex manager"); - // todo(onbjerg): rm magic number - let exex_manager = ExExManager::new(exex_handles, 1024); - let exex_manager_handle = exex_manager.handle(); - executor.spawn_critical("exex manager", async move { - exex_manager.await.expect("exex manager crashed"); - }); - - // send notifications from the blockchain tree to exex manager - let mut canon_state_notifications = blockchain_tree.subscribe_to_canonical_state(); - let mut handle = exex_manager_handle.clone(); - executor.spawn_critical("exex manager blockchain tree notifications", async move { - while let Ok(notification) = canon_state_notifications.recv().await { - handle - .send_async(notification) - .await - .expect("blockchain tree notification could not be sent to exex manager"); - } - }); - - info!(target: "reth::cli", "ExEx Manager started"); - - Some(exex_manager_handle) - } else { - None - }; - - // create pipeline - let network_client = network.fetch_client().await?; - let (consensus_engine_tx, mut consensus_engine_rx) = unbounded_channel(); - - if let Some(skip_fcu_threshold) = config.debug.skip_fcu { - debug!(target: "reth::cli", "spawning skip FCU task"); - let (skip_fcu_tx, skip_fcu_rx) = unbounded_channel(); - let engine_skip_fcu = EngineApiSkipFcu::new(skip_fcu_threshold); - executor.spawn_critical( - "skip FCU interceptor", - engine_skip_fcu.intercept(consensus_engine_rx, skip_fcu_tx), - ); - consensus_engine_rx = skip_fcu_rx; - } - - if let Some(store_path) = config.debug.engine_api_store.clone() { - debug!(target: "reth::cli", "spawning engine API store"); - let (engine_intercept_tx, engine_intercept_rx) = unbounded_channel(); - let engine_api_store = EngineApiStore::new(store_path); - executor.spawn_critical( - "engine api interceptor", - engine_api_store.intercept(consensus_engine_rx, engine_intercept_tx), - ); - consensus_engine_rx = engine_intercept_rx; - }; - - let max_block = config.max_block(&network_client, provider_factory.clone()).await?; - let mut hooks = EngineHooks::new(); - - let static_file_producer = StaticFileProducer::new( - provider_factory.clone(), - provider_factory.static_file_provider(), - prune_config.clone().unwrap_or_default().segments, - ); - let static_file_producer_events = static_file_producer.lock().events(); - hooks.add(StaticFileHook::new(static_file_producer.clone(), Box::new(executor.clone()))); - info!(target: "reth::cli", "StaticFileProducer initialized"); - - // Make sure ETL doesn't default to /tmp/, but to whatever datadir is set to - if reth_config.stages.etl.dir.is_none() { - reth_config.stages.etl.dir = Some(EtlConfig::from_datadir(&data_dir.data_dir_path())); - } - - // Configure the pipeline - let pipeline_exex_handle = - exex_manager_handle.clone().unwrap_or_else(ExExManagerHandle::empty); - let (mut pipeline, client) = if config.dev.dev { - info!(target: "reth::cli", "Starting Reth in dev mode"); - - for (idx, (address, alloc)) in config.chain.genesis.alloc.iter().enumerate() { - info!(target: "reth::cli", "Allocated Genesis Account: {:02}. {} ({} ETH)", idx, address.to_string(), format_ether(alloc.balance)); - } - - // install auto-seal - let pending_transactions_listener = transaction_pool.pending_transactions_listener(); - - let mining_mode = if let Some(interval) = config.dev.block_time { - MiningMode::interval(interval) - } else if let Some(max_transactions) = config.dev.block_max_transactions { - MiningMode::instant(max_transactions, pending_transactions_listener) - } else { - info!(target: "reth::cli", "No mining mode specified, defaulting to ReadyTransaction"); - MiningMode::instant(1, pending_transactions_listener) - }; - - let (_, client, mut task) = reth_auto_seal_consensus::AutoSealBuilder::new( - Arc::clone(&config.chain), - blockchain_db.clone(), - transaction_pool.clone(), - consensus_engine_tx.clone(), - canon_state_notification_sender, - mining_mode, - evm_config.clone(), - ) - .build(); - - let mut pipeline = crate::setup::build_networked_pipeline( - &config, - &reth_config.stages, - client.clone(), - Arc::clone(&consensus), - provider_factory.clone(), - &executor, - sync_metrics_tx, - prune_config.clone(), - max_block, - static_file_producer, - evm_config, - pipeline_exex_handle, - ) - .await?; - - let pipeline_events = pipeline.events(); - task.set_pipeline_events(pipeline_events); - debug!(target: "reth::cli", "Spawning auto mine task"); - executor.spawn(Box::pin(task)); - - (pipeline, EitherDownloader::Left(client)) - } else { - let pipeline = crate::setup::build_networked_pipeline( - &config, - &reth_config.stages, - network_client.clone(), - Arc::clone(&consensus), - provider_factory.clone(), - &executor, - sync_metrics_tx, - prune_config.clone(), - max_block, - static_file_producer, - evm_config, - pipeline_exex_handle, - ) - .await?; - - (pipeline, EitherDownloader::Right(network_client)) - }; - - let pipeline_events = pipeline.events(); - - let initial_target = config.initial_pipeline_target(genesis_hash); - - let prune_config = prune_config.unwrap_or_default(); - let mut pruner_builder = PrunerBuilder::new(prune_config.clone()) - .max_reorg_depth(tree_config.max_reorg_depth() as usize) - .prune_delete_limit(config.chain.prune_delete_limit) - .timeout(PrunerBuilder::DEFAULT_TIMEOUT); - if let Some(exex_manager_handle) = &exex_manager_handle { - pruner_builder = - pruner_builder.finished_exex_height(exex_manager_handle.finished_height()); - } - - let mut pruner = pruner_builder.build(provider_factory.clone()); - - let pruner_events = pruner.events(); - hooks.add(PruneHook::new(pruner, Box::new(executor.clone()))); - info!(target: "reth::cli", ?prune_config, "Pruner initialized"); - - // Configure the consensus engine - let (beacon_consensus_engine, beacon_engine_handle) = BeaconConsensusEngine::with_channel( - client, - pipeline, - blockchain_db.clone(), - Box::new(executor.clone()), - Box::new(network.clone()), - max_block, - config.debug.continuous, - payload_builder.clone(), - initial_target, - reth_beacon_consensus::MIN_BLOCKS_FOR_PIPELINE_RUN, - consensus_engine_tx, - consensus_engine_rx, - hooks, - )?; - info!(target: "reth::cli", "Consensus engine initialized"); - - let events = stream_select!( - network.event_listener().map(Into::into), - beacon_engine_handle.event_listener().map(Into::into), - pipeline_events.map(Into::into), - if config.debug.tip.is_none() && !config.dev.dev { - Either::Left( - ConsensusLayerHealthEvents::new(Box::new(blockchain_db.clone())) - .map(Into::into), - ) - } else { - Either::Right(stream::empty()) - }, - pruner_events.map(Into::into), - static_file_producer_events.map(Into::into) - ); - executor.spawn_critical( - "events task", - node::handle_events(Some(network.clone()), Some(head.number), events, database.clone()), - ); - - let engine_api = EngineApi::new( - blockchain_db.clone(), - config.chain.clone(), - beacon_engine_handle, - payload_builder.into(), - Box::new(executor.clone()), - ); - info!(target: "reth::cli", "Engine API handler initialized"); - - // extract the jwt secret from the args if possible - let default_jwt_path = data_dir.jwt_path(); - let jwt_secret = config.rpc.auth_jwt_secret(default_jwt_path)?; - - // adjust rpc port numbers based on instance number - config.adjust_instance_ports(); - - // Start RPC servers - - let (rpc_server_handles, mut rpc_registry) = crate::rpc::launch_rpc_servers( - node_components.clone(), - engine_api, - &config, - jwt_secret, - rpc, - ) - .await?; - - // in dev mode we generate 20 random dev-signer accounts - if config.dev.dev { - rpc_registry.eth_api().with_dev_accounts(); - } - - // Run consensus engine to completion - let (tx, rx) = oneshot::channel(); - info!(target: "reth::cli", "Starting consensus engine"); - executor.spawn_critical_blocking("consensus engine", async move { - let res = beacon_consensus_engine.await; - let _ = tx.send(res); - }); - - let FullNodeComponentsAdapter { - evm_config, - pool, - network, - provider, - payload_builder, - executor, - } = node_components; - - let full_node = FullNode { - evm_config, - pool, - network, - provider, - payload_builder, - task_executor: executor, - rpc_server_handles, - rpc_registry, - config, - data_dir, - }; - // Notify on node started - on_node_started.on_event(full_node.clone())?; - - let handle = NodeHandle { - node_exit_future: NodeExitFuture::new(rx, full_node.config.debug.terminate), - node: full_node, - }; - - Ok(handle) - } - - /// Check that the builder can be launched - /// - /// This is useful when writing tests to ensure that the builder is configured correctly. - pub fn check_launch(self) -> Self { - self - } -} - -/// A [NodeBuilder] with it's launch context already configured. -/// -/// This exposes the same methods as [NodeBuilder] but with the launch context already configured, -/// See [WithLaunchContext::launch] -pub struct WithLaunchContext { - builder: NodeBuilder, - task_executor: TaskExecutor, - data_dir: ChainPath, -} - -impl WithLaunchContext { - /// Returns a reference to the node builder's config. - pub fn config(&self) -> &NodeConfig { - self.builder.config() - } - - /// Returns a reference to the task executor. - pub fn task_executor(&self) -> &TaskExecutor { - &self.task_executor - } - - /// Returns a reference to the data directory. - pub fn data_dir(&self) -> &ChainPath { - &self.data_dir - } -} - -impl WithLaunchContext -where - DB: Database + Clone + Unpin + 'static, -{ - /// Configures the types of the node. - pub fn with_types(self, types: T) -> WithLaunchContext> - where - T: NodeTypes, - { - WithLaunchContext { - builder: self.builder.with_types(types), - task_executor: self.task_executor, - data_dir: self.data_dir, - } - } - - /// Preconfigures the node with a specific node implementation. - pub fn node( - self, - node: N, - ) -> WithLaunchContext< - DB, - ComponentsState< - N, - ComponentsBuilder< - RethFullAdapter, - N::PoolBuilder, - N::PayloadBuilder, - N::NetworkBuilder, - >, - FullNodeComponentsAdapter< - RethFullAdapter, - >>::Pool, - >, - >, - > - where - N: Node::Evm>>>, - N::PoolBuilder: PoolBuilder>, - N::NetworkBuilder: crate::components::NetworkBuilder< - RethFullAdapter, - >>::Pool, - >, - N::PayloadBuilder: crate::components::PayloadServiceBuilder< - RethFullAdapter, - >>::Pool, - >, - { - self.with_types(node.clone()).with_components(node.components()) - } -} - -impl WithLaunchContext -where - DB: Database + DatabaseMetrics + DatabaseMetadata + Clone + Unpin + 'static, -{ - /// Launches a preconfigured [Node] - /// - /// This bootstraps the node internals, creates all the components with the given [Node] type - /// and launches the node. - /// - /// Returns a [NodeHandle] that can be used to interact with the node. - pub async fn launch_node( - self, - node: N, - ) -> eyre::Result< - NodeHandle< - FullNodeComponentsAdapter< - RethFullAdapter, - >>::Pool, - >, - >, - > - where - N: Node::Evm>>>, - N::PoolBuilder: PoolBuilder>, - N::NetworkBuilder: crate::components::NetworkBuilder< - RethFullAdapter, - >>::Pool, - >, - N::PayloadBuilder: crate::components::PayloadServiceBuilder< - RethFullAdapter, - >>::Pool, - >, - { - self.node(node).launch().await - } -} - -impl WithLaunchContext> -where - Types: NodeTypes, - DB: Database + Clone + Unpin + 'static, -{ - /// Configures the node's components. - /// - /// The given components builder is used to create the components of the node when it is - /// launched. - pub fn with_components( - self, - components_builder: Components, - ) -> WithLaunchContext< - DB, - ComponentsState< - Types, - Components, - FullNodeComponentsAdapter< - FullNodeTypesAdapter>, - Components::Pool, - >, - >, - > - where - Components: NodeComponentsBuilder< - FullNodeTypesAdapter>, - >, - { - WithLaunchContext { - builder: self.builder.with_components(components_builder), - task_executor: self.task_executor, - data_dir: self.data_dir, - } - } -} - -impl - WithLaunchContext< - DB, - ComponentsState< - Types, - Components, - FullNodeComponentsAdapter< - FullNodeTypesAdapter>, - Components::Pool, - >, - >, - > -where - DB: Database + DatabaseMetrics + DatabaseMetadata + Clone + Unpin + 'static, - Types: NodeTypes, - Components: NodeComponentsBuilder< - FullNodeTypesAdapter>, - >, -{ - /// Apply a function to the components builder. - pub fn map_components(self, f: impl FnOnce(Components) -> Components) -> Self { - Self { - builder: self.builder.map_components(f), - task_executor: self.task_executor, - data_dir: self.data_dir, - } - } - - /// Sets the hook that is run once the node's components are initialized. - pub fn on_component_initialized(mut self, hook: F) -> Self - where - F: Fn( - FullNodeComponentsAdapter< - FullNodeTypesAdapter>, - Components::Pool, - >, - ) -> eyre::Result<()> - + Send - + 'static, - { - self.builder.state.hooks.set_on_component_initialized(hook); - self - } - - /// Sets the hook that is run once the node has started. - pub fn on_node_started(mut self, hook: F) -> Self - where - F: Fn( - FullNode< - FullNodeComponentsAdapter< - FullNodeTypesAdapter>, - Components::Pool, - >, - >, - ) -> eyre::Result<()> - + Send - + 'static, - { - self.builder.state.hooks.set_on_node_started(hook); - self - } - - /// Sets the hook that is run once the rpc server is started. - pub fn on_rpc_started(mut self, hook: F) -> Self - where - F: Fn( - RpcContext< - '_, - FullNodeComponentsAdapter< - FullNodeTypesAdapter>, - Components::Pool, - >, - >, - RethRpcServerHandles, - ) -> eyre::Result<()> - + Send - + 'static, - { - self.builder.state.rpc.set_on_rpc_started(hook); - self - } - - /// Sets the hook that is run to configure the rpc modules. - pub fn extend_rpc_modules(mut self, hook: F) -> Self - where - F: Fn( - RpcContext< - '_, - FullNodeComponentsAdapter< - FullNodeTypesAdapter>, - Components::Pool, - >, - >, - ) -> eyre::Result<()> - + Send - + 'static, - { - self.builder.state.rpc.set_extend_rpc_modules(hook); - self - } - - /// Installs an ExEx (Execution Extension) in the node. - pub fn install_exex(mut self, exex_id: impl Into, exex: F) -> Self - where - F: Fn( - ExExContext< - FullNodeComponentsAdapter< - FullNodeTypesAdapter>, - Components::Pool, - >, - >, - ) -> R - + Send - + 'static, - R: Future> + Send, - E: Future> + Send, - { - self.builder.state.exexs.push((exex_id.into(), Box::new(exex))); - self - } - - /// Launches the node and returns a handle to it. - pub async fn launch( - self, - ) -> eyre::Result< - NodeHandle< - FullNodeComponentsAdapter< - FullNodeTypesAdapter>, - Components::Pool, - >, - >, - > { - let Self { builder, task_executor, data_dir } = self; - - builder.launch(task_executor, data_dir).await - } - - /// Check that the builder can be launched - /// - /// This is useful when writing tests to ensure that the builder is configured correctly. - pub fn check_launch(self) -> Self { - self - } -} - -/// Captures the necessary context for building the components of the node. -pub struct BuilderContext { - /// The current head of the blockchain at launch. - head: Head, - /// The configured provider to interact with the blockchain. - provider: Node::Provider, - /// The executor of the node. - executor: TaskExecutor, - /// The data dir of the node. - data_dir: ChainPath, - /// The config of the node - config: NodeConfig, - /// loaded config - reth_config: reth_config::Config, - /// EVM config of the node - evm_config: Node::Evm, -} - -impl BuilderContext { - /// Create a new instance of [BuilderContext] - pub fn new( - head: Head, - provider: Node::Provider, - executor: TaskExecutor, - data_dir: ChainPath, - config: NodeConfig, - reth_config: reth_config::Config, - evm_config: Node::Evm, - ) -> Self { - Self { head, provider, executor, data_dir, config, reth_config, evm_config } - } - - /// Returns the configured provider to interact with the blockchain. - pub fn provider(&self) -> &Node::Provider { - &self.provider - } - - /// Returns the configured evm. - pub fn evm_config(&self) -> &Node::Evm { - &self.evm_config - } - - /// Returns the current head of the blockchain at launch. - pub fn head(&self) -> Head { - self.head - } - - /// Returns the config of the node. - pub fn config(&self) -> &NodeConfig { - &self.config - } - - /// Returns the data dir of the node. - /// - /// This gives access to all relevant files and directories of the node's datadir. - pub fn data_dir(&self) -> &ChainPath { - &self.data_dir - } - - /// Returns the executor of the node. - /// - /// This can be used to execute async tasks or functions during the setup. - pub fn task_executor(&self) -> &TaskExecutor { - &self.executor - } - - /// Returns the chain spec of the node. - pub fn chain_spec(&self) -> Arc { - self.provider().chain_spec() - } - - /// Returns the transaction pool config of the node. - pub fn pool_config(&self) -> PoolConfig { - self.config().txpool.pool_config() - } - - /// Loads `MAINNET_KZG_TRUSTED_SETUP`. - pub fn kzg_settings(&self) -> eyre::Result> { - Ok(Arc::clone(&MAINNET_KZG_TRUSTED_SETUP)) - } - - /// Returns the config for payload building. - pub fn payload_builder_config(&self) -> impl PayloadBuilderConfig { - self.config.builder.clone() - } - - /// Returns the default network config for the node. - pub fn network_config(&self) -> eyre::Result> { - self.config.network_config( - &self.reth_config, - self.provider.clone(), - self.executor.clone(), - self.head, - self.data_dir(), - ) - } - - /// Creates the [NetworkBuilder] for the node. - pub async fn network_builder(&self) -> eyre::Result> { - self.config - .build_network( - &self.reth_config, - self.provider.clone(), - self.executor.clone(), - self.head, - self.data_dir(), - ) - .await - } - - /// Convenience function to start the network. - /// - /// Spawns the configured network and associated tasks and returns the [NetworkHandle] connected - /// to that network. - pub fn start_network( - &self, - builder: NetworkBuilder, - pool: Pool, - ) -> NetworkHandle - where - Pool: TransactionPool + Unpin + 'static, - { - let (handle, network, txpool, eth) = builder - .transactions(pool, Default::default()) - .request_handler(self.provider().clone()) - .split_with_handle(); - - self.executor.spawn_critical("p2p txpool", txpool); - self.executor.spawn_critical("p2p eth request handler", eth); - - let default_peers_path = self.data_dir().known_peers_path(); - let known_peers_file = self.config.network.persistent_peers_file(default_peers_path); - self.executor.spawn_critical_with_graceful_shutdown_signal( - "p2p network task", - |shutdown| { - network.run_until_graceful_shutdown(shutdown, |network| { - write_peers_to_file(network, known_peers_file) - }) - }, - ); - - handle - } -} - -impl std::fmt::Debug for BuilderContext { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_struct("BuilderContext") - .field("head", &self.head) - .field("provider", &std::any::type_name::()) - .field("executor", &self.executor) - .field("data_dir", &self.data_dir) - .field("config", &self.config) - .finish() - } -} - -/// The initial state of the node builder process. -#[derive(Debug, Default)] -#[non_exhaustive] -pub struct InitState; - -/// The state after all types of the node have been configured. -#[derive(Debug)] -pub struct TypesState -where - DB: Database + Clone + 'static, - Types: NodeTypes, -{ - adapter: FullNodeTypesAdapter>, -} - -/// The state of the node builder process after the node's components have been configured. -/// -/// With this state all types and components of the node are known and the node can be launched. -/// -/// Additionally, this state captures additional hooks that are called at specific points in the -/// node's launch lifecycle. -pub struct ComponentsState { - /// The types of the node. - types: Types, - /// Type that builds the components of the node. - components_builder: Components, - /// Additional NodeHooks that are called at specific points in the node's launch lifecycle. - hooks: NodeHooks, - /// Additional RPC hooks. - rpc: RpcHooks, - /// The ExExs (execution extensions) of the node. - exexs: Vec<(String, Box>)>, -} - -impl std::fmt::Debug - for ComponentsState -{ - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_struct("ComponentsState") - .field("types", &std::any::type_name::()) - .field("components_builder", &std::any::type_name::()) - .field("hooks", &self.hooks) - .field("rpc", &self.rpc) - .field("exexs", &self.exexs.len()) - .finish() - } -} diff --git a/crates/node-builder/src/components/builder.rs b/crates/node-builder/src/components/builder.rs deleted file mode 100644 index 6abdca96c..000000000 --- a/crates/node-builder/src/components/builder.rs +++ /dev/null @@ -1,200 +0,0 @@ -//! A generic [NodeComponentsBuilder] - -use crate::{ - components::{NetworkBuilder, NodeComponents, PayloadServiceBuilder, PoolBuilder}, - BuilderContext, FullNodeTypes, -}; -use reth_transaction_pool::TransactionPool; -use std::marker::PhantomData; - -/// A generic, customizable [`NodeComponentsBuilder`]. -/// -/// This type is stateful and captures the configuration of the node's components. -/// -/// ## Component dependencies: -/// -/// The components of the node depend on each other: -/// - The payload builder service depends on the transaction pool. -/// - The network depends on the transaction pool. -/// -/// We distinguish between different kind of components: -/// - Components that are standalone, such as the transaction pool. -/// - Components that are spawned as a service, such as the payload builder service or the network. -/// -/// ## Builder lifecycle: -/// -/// First all standalone components are built. Then the service components are spawned. -/// All component builders are captured in the builder state and will be consumed once the node is -/// launched. -#[derive(Debug)] -pub struct ComponentsBuilder { - pool_builder: PoolB, - payload_builder: PayloadB, - network_builder: NetworkB, - _marker: PhantomData, -} - -impl ComponentsBuilder { - /// Configures the node types. - pub fn node_types(self) -> ComponentsBuilder - where - Types: FullNodeTypes, - { - let Self { pool_builder, payload_builder, network_builder, _marker } = self; - ComponentsBuilder { - pool_builder, - payload_builder, - network_builder, - _marker: Default::default(), - } - } - - /// Apply a function to the pool builder. - pub fn map_pool(self, f: impl FnOnce(PoolB) -> PoolB) -> Self { - Self { - pool_builder: f(self.pool_builder), - payload_builder: self.payload_builder, - network_builder: self.network_builder, - _marker: self._marker, - } - } - - /// Apply a function to the payload builder. - pub fn map_payload(self, f: impl FnOnce(PayloadB) -> PayloadB) -> Self { - Self { - pool_builder: self.pool_builder, - payload_builder: f(self.payload_builder), - network_builder: self.network_builder, - _marker: self._marker, - } - } - - /// Apply a function to the network builder. - pub fn map_network(self, f: impl FnOnce(NetworkB) -> NetworkB) -> Self { - Self { - pool_builder: self.pool_builder, - payload_builder: self.payload_builder, - network_builder: f(self.network_builder), - _marker: self._marker, - } - } -} - -impl ComponentsBuilder -where - Node: FullNodeTypes, -{ - /// Configures the pool builder. - /// - /// This accepts a [PoolBuilder] instance that will be used to create the node's transaction - /// pool. - pub fn pool(self, pool_builder: PB) -> ComponentsBuilder - where - PB: PoolBuilder, - { - let Self { pool_builder: _, payload_builder, network_builder, _marker } = self; - ComponentsBuilder { pool_builder, payload_builder, network_builder, _marker } - } -} - -impl ComponentsBuilder -where - Node: FullNodeTypes, - PoolB: PoolBuilder, -{ - /// Configures the network builder. - /// - /// This accepts a [NetworkBuilder] instance that will be used to create the node's network - /// stack. - pub fn network(self, network_builder: NB) -> ComponentsBuilder - where - NB: NetworkBuilder, - { - let Self { pool_builder, payload_builder, network_builder: _, _marker } = self; - ComponentsBuilder { pool_builder, payload_builder, network_builder, _marker } - } - - /// Configures the payload builder. - /// - /// This accepts a [PayloadServiceBuilder] instance that will be used to create the node's - /// payload builder service. - pub fn payload(self, payload_builder: PB) -> ComponentsBuilder - where - PB: PayloadServiceBuilder, - { - let Self { pool_builder, payload_builder: _, network_builder, _marker } = self; - ComponentsBuilder { pool_builder, payload_builder, network_builder, _marker } - } -} - -impl NodeComponentsBuilder - for ComponentsBuilder -where - Node: FullNodeTypes, - PoolB: PoolBuilder, - NetworkB: NetworkBuilder, - PayloadB: PayloadServiceBuilder, -{ - type Pool = PoolB::Pool; - - async fn build_components( - self, - context: &BuilderContext, - ) -> eyre::Result> { - let Self { pool_builder, payload_builder, network_builder, _marker } = self; - - let pool = pool_builder.build_pool(context).await?; - let network = network_builder.build_network(context, pool.clone()).await?; - let payload_builder = payload_builder.spawn_payload_service(context, pool.clone()).await?; - - Ok(NodeComponents { transaction_pool: pool, network, payload_builder }) - } -} - -impl Default for ComponentsBuilder<(), (), (), ()> { - fn default() -> Self { - Self { - pool_builder: (), - payload_builder: (), - network_builder: (), - _marker: Default::default(), - } - } -} - -/// A type that configures all the customizable components of the node and knows how to build them. -/// -/// Implementors of this trait are responsible for building all the components of the node: See -/// [NodeComponents]. -/// -/// The [ComponentsBuilder] is a generic implementation of this trait that can be used to customize -/// certain components of the node using the builder pattern and defaults, e.g. Ethereum and -/// Optimism. -pub trait NodeComponentsBuilder { - /// The transaction pool to use. - type Pool: TransactionPool + Unpin + 'static; - - /// Builds the components of the node. - fn build_components( - self, - context: &BuilderContext, - ) -> impl std::future::Future>> + Send; -} - -impl NodeComponentsBuilder for F -where - Node: FullNodeTypes, - F: FnOnce(&BuilderContext) -> Fut + Send, - Fut: std::future::Future>> + Send, - Pool: TransactionPool + Unpin + 'static, -{ - type Pool = Pool; - - fn build_components( - self, - ctx: &BuilderContext, - ) -> impl std::future::Future>> + Send - { - self(ctx) - } -} diff --git a/crates/node-builder/src/components/mod.rs b/crates/node-builder/src/components/mod.rs deleted file mode 100644 index 4aa73f0ff..000000000 --- a/crates/node-builder/src/components/mod.rs +++ /dev/null @@ -1,41 +0,0 @@ -//! Support for configuring the components of a node. -//! -//! Customizable components of the node include: -//! - The transaction pool. -//! - The network implementation. -//! - The payload builder service. -//! -//! Components depend on a fully type configured node: [FullNodeTypes](crate::node::FullNodeTypes). - -use crate::FullNodeTypes; -pub use builder::*; -pub use network::*; -pub use payload::*; -pub use pool::*; -use reth_network::NetworkHandle; -use reth_payload_builder::PayloadBuilderHandle; - -mod builder; -mod network; -mod payload; -mod pool; - -/// All the components of the node. -/// -/// This provides access to all the components of the node. -#[derive(Debug)] -pub struct NodeComponents { - /// The transaction pool of the node. - pub transaction_pool: Pool, - /// The network implementation of the node. - pub network: NetworkHandle, - /// The handle to the payload builder service. - pub payload_builder: PayloadBuilderHandle, -} - -impl NodeComponents { - /// Returns the handle to the payload builder service. - pub fn payload_builder(&self) -> PayloadBuilderHandle { - self.payload_builder.clone() - } -} diff --git a/crates/node-core/Cargo.toml b/crates/node-core/Cargo.toml index d6df37f09..ef5d63b3f 100644 --- a/crates/node-core/Cargo.toml +++ b/crates/node-core/Cargo.toml @@ -27,19 +27,25 @@ reth-transaction-pool.workspace = true reth-tracing.workspace = true reth-config.workspace = true reth-discv4.workspace = true +reth-discv5.workspace = true reth-net-nat.workspace = true reth-network-api.workspace = true reth-evm.workspace = true reth-engine-primitives.workspace = true reth-tasks.workspace = true +reth-trie.workspace = true reth-consensus-common.workspace = true reth-beacon-consensus.workspace = true +reth-etl.workspace = true +reth-codecs.workspace = true # ethereum discv5.workspace = true # async tokio.workspace = true +tokio-util.workspace = true +pin-project.workspace = true # metrics metrics-exporter-prometheus = "0.12.1" @@ -71,7 +77,11 @@ hyper.workspace = true tracing.workspace = true # crypto -secp256k1 = { workspace = true, features = ["global-context", "rand-std", "recovery"] } +secp256k1 = { workspace = true, features = [ + "global-context", + "rand-std", + "recovery", +] } # async futures.workspace = true @@ -97,7 +107,6 @@ optimism = [ "reth-rpc-engine-api/optimism", "reth-provider/optimism", "reth-rpc-types-compat/optimism", - "reth-consensus-common/optimism", "reth-beacon-consensus/optimism", ] diff --git a/crates/node-core/src/args/database_args.rs b/crates/node-core/src/args/database.rs similarity index 100% rename from crates/node-core/src/args/database_args.rs rename to crates/node-core/src/args/database.rs diff --git a/crates/node-core/src/args/debug_args.rs b/crates/node-core/src/args/debug.rs similarity index 66% rename from crates/node-core/src/args/debug_args.rs rename to crates/node-core/src/args/debug.rs index 916b4a1ef..d1c4e9b73 100644 --- a/crates/node-core/src/args/debug_args.rs +++ b/crates/node-core/src/args/debug.rs @@ -1,7 +1,7 @@ //! clap [Args](clap::Args) for debugging purposes use clap::Args; -use reth_primitives::{TxHash, B256}; +use reth_primitives::B256; use std::path::PathBuf; /// Parameters for debugging purposes @@ -28,41 +28,14 @@ pub struct DebugArgs { #[arg(long = "debug.max-block", help_heading = "Debug")] pub max_block: Option, - /// Print opcode level traces directly to console during execution. - #[arg(long = "debug.print-inspector", help_heading = "Debug")] - pub print_inspector: bool, - - /// Hook on a specific block during execution. - #[arg( - long = "debug.hook-block", - help_heading = "Debug", - conflicts_with = "hook_transaction", - conflicts_with = "hook_all" - )] - pub hook_block: Option, - - /// Hook on a specific transaction during execution. - #[arg( - long = "debug.hook-transaction", - help_heading = "Debug", - conflicts_with = "hook_block", - conflicts_with = "hook_all" - )] - pub hook_transaction: Option, - - /// Hook on every transaction in a block. - #[arg( - long = "debug.hook-all", - help_heading = "Debug", - conflicts_with = "hook_block", - conflicts_with = "hook_transaction" - )] - pub hook_all: bool, - /// If provided, the engine will skip `n` consecutive FCUs. #[arg(long = "debug.skip-fcu", help_heading = "Debug")] pub skip_fcu: Option, + /// If provided, the engine will skip `n` consecutive new payloads. + #[arg(long = "debug.skip-new-payload", help_heading = "Debug")] + pub skip_new_payload: Option, + /// The path to store engine API messages at. /// If specified, all of the intercepted engine API messages /// will be written to specified location. diff --git a/crates/node-core/src/args/dev_args.rs b/crates/node-core/src/args/dev.rs similarity index 100% rename from crates/node-core/src/args/dev_args.rs rename to crates/node-core/src/args/dev.rs diff --git a/crates/node-core/src/args/gas_price_oracle_args.rs b/crates/node-core/src/args/gas_price_oracle.rs similarity index 100% rename from crates/node-core/src/args/gas_price_oracle_args.rs rename to crates/node-core/src/args/gas_price_oracle.rs diff --git a/crates/node-core/src/args/log_args.rs b/crates/node-core/src/args/log.rs similarity index 100% rename from crates/node-core/src/args/log_args.rs rename to crates/node-core/src/args/log.rs diff --git a/crates/node-core/src/args/mod.rs b/crates/node-core/src/args/mod.rs index 14b63dd74..bce63917b 100644 --- a/crates/node-core/src/args/mod.rs +++ b/crates/node-core/src/args/mod.rs @@ -1,55 +1,55 @@ //! Parameters for configuring the rpc more granularity via CLI /// NetworkArg struct for configuring the network -mod network_args; -pub use network_args::{DiscoveryArgs, NetworkArgs}; +mod network; +pub use network::{DiscoveryArgs, NetworkArgs}; /// RpcServerArg struct for configuring the RPC -mod rpc_server_args; -pub use rpc_server_args::RpcServerArgs; +mod rpc_server; +pub use rpc_server::RpcServerArgs; /// RpcStateCacheArgs struct for configuring RPC state cache -mod rpc_state_cache_args; -pub use rpc_state_cache_args::RpcStateCacheArgs; +mod rpc_state_cache; +pub use rpc_state_cache::RpcStateCacheArgs; /// DebugArgs struct for debugging purposes -mod debug_args; -pub use debug_args::DebugArgs; +mod debug; +pub use debug::DebugArgs; /// DatabaseArgs struct for configuring the database -mod database_args; -pub use database_args::DatabaseArgs; +mod database; +pub use database::DatabaseArgs; /// LogArgs struct for configuring the logger -mod log_args; -pub use log_args::{ColorMode, LogArgs}; +mod log; +pub use log::{ColorMode, LogArgs}; mod secret_key; pub use secret_key::{get_secret_key, SecretKeyError}; /// PayloadBuilderArgs struct for configuring the payload builder -mod payload_builder_args; -pub use payload_builder_args::PayloadBuilderArgs; +mod payload_builder; +pub use payload_builder::PayloadBuilderArgs; /// Stage related arguments -mod stage_args; -pub use stage_args::StageEnum; +mod stage; +pub use stage::StageEnum; /// Gas price oracle related arguments -mod gas_price_oracle_args; -pub use gas_price_oracle_args::GasPriceOracleArgs; +mod gas_price_oracle; +pub use gas_price_oracle::GasPriceOracleArgs; /// TxPoolArgs for configuring the transaction pool -mod txpool_args; -pub use txpool_args::TxPoolArgs; +mod txpool; +pub use txpool::TxPoolArgs; /// DevArgs for configuring the dev testnet -mod dev_args; -pub use dev_args::DevArgs; +mod dev; +pub use dev::DevArgs; /// PruneArgs for configuring the pruning and full node -mod pruning_args; -pub use pruning_args::PruningArgs; +mod pruning; +pub use pruning::PruningArgs; pub mod utils; diff --git a/crates/node-core/src/args/network_args.rs b/crates/node-core/src/args/network.rs similarity index 81% rename from crates/node-core/src/args/network_args.rs rename to crates/node-core/src/args/network.rs index 59dc6ceba..9ff93c5a9 100644 --- a/crates/node-core/src/args/network_args.rs +++ b/crates/node-core/src/args/network.rs @@ -3,9 +3,10 @@ use crate::version::P2P_CLIENT_VERSION; use clap::Args; use reth_config::Config; -use reth_discv4::{ - DEFAULT_DISCOVERY_ADDR, DEFAULT_DISCOVERY_PORT, DEFAULT_DISCOVERY_V5_ADDR, - DEFAULT_DISCOVERY_V5_PORT, +use reth_discv4::{DEFAULT_DISCOVERY_ADDR, DEFAULT_DISCOVERY_PORT}; +use reth_discv5::{ + DEFAULT_COUNT_BOOTSTRAP_LOOKUPS, DEFAULT_DISCOVERY_V5_PORT, + DEFAULT_SECONDS_BOOTSTRAP_LOOKUP_INTERVAL, DEFAULT_SECONDS_LOOKUP_INTERVAL, }; use reth_net_nat::NatResolver; use reth_network::{ @@ -18,7 +19,11 @@ use reth_network::{ }; use reth_primitives::{mainnet_nodes, ChainSpec, NodeRecord}; use secp256k1::SecretKey; -use std::{net::IpAddr, path::PathBuf, sync::Arc}; +use std::{ + net::{IpAddr, Ipv4Addr, Ipv6Addr}, + path::PathBuf, + sync::Arc, +}; /// Parameters for configuring the network more granularity via CLI #[derive(Debug, Clone, Args, PartialEq, Eq)] @@ -88,7 +93,7 @@ pub struct NetworkArgs { /// `GetPooledTransactions` request. Spec'd at 2 MiB. /// /// . - #[arg(long = "pooled-tx-response-soft-limit", value_name = "BYTES", default_value_t = SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESPONSE, help = "Sets the soft limit for the byte size of pooled transactions response. Specified at 2 MiB by default. This is a spec'd value that should only be set for experimental purposes on a testnet.")] + #[arg(long = "pooled-tx-response-soft-limit", value_name = "BYTES", default_value_t = SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESPONSE, help = "Sets the soft limit for the byte size of pooled transactions response. Specified at 2 MiB by default. This is a spec'd value that should only be set for experimental purposes on a testnet.",long_help = None)] pub soft_limit_byte_size_pooled_transactions_response: usize, /// Default soft limit for the byte size of a `PooledTransactions` response on assembling a @@ -96,7 +101,7 @@ pub struct NetworkArgs { /// than the [`SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESPONSE`], at 2 MiB, used when /// assembling a `PooledTransactions` response. Default /// is 128 KiB. - #[arg(long = "pooled-tx-pack-soft-limit", value_name = "BYTES", default_value_t = DEFAULT_SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESP_ON_PACK_GET_POOLED_TRANSACTIONS_REQ)] + #[arg(long = "pooled-tx-pack-soft-limit", value_name = "BYTES", default_value_t = DEFAULT_SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESP_ON_PACK_GET_POOLED_TRANSACTIONS_REQ,help = "Sets the soft limit for the byte size of a single pooled transactions response when packing multiple responses into a single packet for a `GetPooledTransactions` request. Specified at 128 Kib by default.",long_help = None)] pub soft_limit_byte_size_pooled_transactions_response_on_pack_request: usize, } @@ -226,15 +231,41 @@ pub struct DiscoveryArgs { #[arg(id = "discovery.port", long = "discovery.port", value_name = "DISCOVERY_PORT", default_value_t = DEFAULT_DISCOVERY_PORT)] pub port: u16, - /// The UDP address to use for devp2p peer discovery version 5. - #[arg(id = "discovery.v5.addr", long = "discovery.v5.addr", value_name = "DISCOVERY_V5_ADDR", - default_value_t = DEFAULT_DISCOVERY_V5_ADDR)] - pub discv5_addr: IpAddr, + /// The UDP IPv4 address to use for devp2p peer discovery version 5. + #[arg(id = "discovery.v5.addr", long = "discovery.v5.addr", value_name = "DISCOVERY_V5_ADDR", default_value = None)] + pub discv5_addr: Option, + + /// The UDP IPv6 address to use for devp2p peer discovery version 5. + #[arg(id = "discovery.v5.addr.ipv6", long = "discovery.v5.addr.ipv6", value_name = "DISCOVERY_V5_ADDR_IPV6", default_value = None)] + pub discv5_addr_ipv6: Option, - /// The UDP port to use for devp2p peer discovery version 5. + /// The UDP IPv4 port to use for devp2p peer discovery version 5. Not used unless `--addr` is + /// IPv4, or `--discv5.addr` is set. #[arg(id = "discovery.v5.port", long = "discovery.v5.port", value_name = "DISCOVERY_V5_PORT", default_value_t = DEFAULT_DISCOVERY_V5_PORT)] pub discv5_port: u16, + + /// The UDP IPv6 port to use for devp2p peer discovery version 5. Not used unless `--addr` is + /// IPv6, or `--discv5.addr.ipv6` is set. + #[arg(id = "discovery.v5.port.ipv6", long = "discovery.v5.port.ipv6", value_name = "DISCOVERY_V5_PORT_IPV6", + default_value = None, default_value_t = DEFAULT_DISCOVERY_V5_PORT)] + pub discv5_port_ipv6: u16, + + /// The interval in seconds at which to carry out periodic lookup queries, for the whole + /// run of the program. + #[arg(id = "discovery.v5.lookup-interval", long = "discovery.v5.lookup-interval", value_name = "DISCOVERY_V5_LOOKUP_INTERVAL", default_value_t = DEFAULT_SECONDS_LOOKUP_INTERVAL)] + pub discv5_lookup_interval: u64, + + /// The interval in seconds at which to carry out boost lookup queries, for a fixed number of + /// times, at bootstrap. + #[arg(id = "discovery.v5.bootstrap.lookup-interval", long = "discovery.v5.bootstrap.lookup-interval", value_name = "DISCOVERY_V5_bootstrap_lookup_interval", + default_value_t = DEFAULT_SECONDS_BOOTSTRAP_LOOKUP_INTERVAL)] + pub discv5_bootstrap_lookup_interval: u64, + + /// The number of times to carry out boost lookup queries at bootstrap. + #[arg(id = "discovery.v5.bootstrap.lookup-countdown", long = "discovery.v5.bootstrap.lookup-countdown", value_name = "DISCOVERY_V5_bootstrap_lookup_countdown", + default_value_t = DEFAULT_COUNT_BOOTSTRAP_LOOKUPS)] + pub discv5_bootstrap_lookup_countdown: u64, } impl DiscoveryArgs { @@ -251,11 +282,6 @@ impl DiscoveryArgs { network_config_builder = network_config_builder.disable_discv4_discovery(); } - if !self.disable_discovery && (self.enable_discv5_discovery || cfg!(feature = "optimism")) { - network_config_builder = network_config_builder.disable_discv4_discovery(); - network_config_builder = network_config_builder.enable_discv5_discovery(); - } - network_config_builder } @@ -276,8 +302,13 @@ impl Default for DiscoveryArgs { enable_discv5_discovery: cfg!(feature = "optimism"), addr: DEFAULT_DISCOVERY_ADDR, port: DEFAULT_DISCOVERY_PORT, - discv5_addr: DEFAULT_DISCOVERY_V5_ADDR, + discv5_addr: None, + discv5_addr_ipv6: None, discv5_port: DEFAULT_DISCOVERY_V5_PORT, + discv5_port_ipv6: DEFAULT_DISCOVERY_V5_PORT, + discv5_lookup_interval: DEFAULT_SECONDS_LOOKUP_INTERVAL, + discv5_bootstrap_lookup_interval: DEFAULT_SECONDS_BOOTSTRAP_LOOKUP_INTERVAL, + discv5_bootstrap_lookup_countdown: DEFAULT_COUNT_BOOTSTRAP_LOOKUPS, } } } diff --git a/crates/node-core/src/args/payload_builder_args.rs b/crates/node-core/src/args/payload_builder.rs similarity index 100% rename from crates/node-core/src/args/payload_builder_args.rs rename to crates/node-core/src/args/payload_builder.rs diff --git a/crates/node-core/src/args/pruning_args.rs b/crates/node-core/src/args/pruning.rs similarity index 52% rename from crates/node-core/src/args/pruning_args.rs rename to crates/node-core/src/args/pruning.rs index 52605338e..4adc72158 100644 --- a/crates/node-core/src/args/pruning_args.rs +++ b/crates/node-core/src/args/pruning.rs @@ -5,7 +5,6 @@ use reth_config::config::PruneConfig; use reth_primitives::{ ChainSpec, PruneMode, PruneModes, ReceiptsLogPruneConfig, MINIMUM_PRUNING_DISTANCE, }; -use std::sync::Arc; /// Parameters for pruning and full node #[derive(Debug, Clone, Args, PartialEq, Eq, Default)] @@ -19,31 +18,30 @@ pub struct PruningArgs { impl PruningArgs { /// Returns pruning configuration. - pub fn prune_config(&self, chain_spec: Arc) -> eyre::Result> { - Ok(if self.full { - Some(PruneConfig { - block_interval: 5, - segments: PruneModes { - sender_recovery: Some(PruneMode::Full), - transaction_lookup: None, - receipts: chain_spec + pub fn prune_config(&self, chain_spec: &ChainSpec) -> Option { + if !self.full { + return None; + } + Some(PruneConfig { + block_interval: 5, + segments: PruneModes { + sender_recovery: Some(PruneMode::Full), + transaction_lookup: None, + receipts: chain_spec + .deposit_contract + .as_ref() + .map(|contract| PruneMode::Before(contract.block)), + account_history: Some(PruneMode::Distance(MINIMUM_PRUNING_DISTANCE)), + storage_history: Some(PruneMode::Distance(MINIMUM_PRUNING_DISTANCE)), + receipts_log_filter: ReceiptsLogPruneConfig( + chain_spec .deposit_contract .as_ref() - .map(|contract| PruneMode::Before(contract.block)), - account_history: Some(PruneMode::Distance(MINIMUM_PRUNING_DISTANCE)), - storage_history: Some(PruneMode::Distance(MINIMUM_PRUNING_DISTANCE)), - receipts_log_filter: ReceiptsLogPruneConfig( - chain_spec - .deposit_contract - .as_ref() - .map(|contract| (contract.address, PruneMode::Before(contract.block))) - .into_iter() - .collect(), - ), - }, - }) - } else { - None + .map(|contract| (contract.address, PruneMode::Before(contract.block))) + .into_iter() + .collect(), + ), + }, }) } } diff --git a/crates/node-core/src/args/rpc_server_args.rs b/crates/node-core/src/args/rpc_server.rs similarity index 98% rename from crates/node-core/src/args/rpc_server_args.rs rename to crates/node-core/src/args/rpc_server.rs index b12f2740a..e19a88737 100644 --- a/crates/node-core/src/args/rpc_server_args.rs +++ b/crates/node-core/src/args/rpc_server.rs @@ -363,7 +363,7 @@ impl RpcServerArgs { impl RethRpcConfig for RpcServerArgs { fn is_ipc_enabled(&self) -> bool { - // By default IPC is enabled therefor it is enabled if the `ipcdisable` is false. + // By default IPC is enabled therefore it is enabled if the `ipcdisable` is false. !self.ipcdisable } @@ -437,7 +437,7 @@ impl RethRpcConfig for RpcServerArgs { .max_subscriptions_per_connection(self.rpc_max_subscriptions_per_connection.get()) } - fn ipc_server_builder(&self) -> IpcServerBuilder { + fn ipc_server_builder(&self) -> IpcServerBuilder { IpcServerBuilder::default() .max_subscriptions_per_connection(self.rpc_max_subscriptions_per_connection.get()) .max_request_body_size(self.rpc_max_request_size_bytes()) @@ -475,7 +475,9 @@ impl RethRpcConfig for RpcServerArgs { let mut builder = AuthServerConfig::builder(jwt_secret).socket_addr(address); if self.auth_ipc { - builder = builder.ipc_endpoint(self.auth_ipc_path.clone()); + builder = builder + .ipc_endpoint(self.auth_ipc_path.clone()) + .with_ipc_config(self.ipc_server_builder()); } Ok(builder.build()) } @@ -711,7 +713,7 @@ mod tests { config.ws_address().unwrap(), SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::new(127, 0, 0, 1), 8888)) ); - assert_eq!(config.ipc_endpoint().unwrap().path(), constants::DEFAULT_IPC_ENDPOINT); + assert_eq!(config.ipc_endpoint().unwrap(), constants::DEFAULT_IPC_ENDPOINT); } #[test] diff --git a/crates/node-core/src/args/rpc_state_cache_args.rs b/crates/node-core/src/args/rpc_state_cache.rs similarity index 100% rename from crates/node-core/src/args/rpc_state_cache_args.rs rename to crates/node-core/src/args/rpc_state_cache.rs diff --git a/crates/node-core/src/args/stage_args.rs b/crates/node-core/src/args/stage.rs similarity index 93% rename from crates/node-core/src/args/stage_args.rs rename to crates/node-core/src/args/stage.rs index d90eabcfc..337f5a4a6 100644 --- a/crates/node-core/src/args/stage_args.rs +++ b/crates/node-core/src/args/stage.rs @@ -30,11 +30,11 @@ pub enum StageEnum { /// /// Manages operations related to hashing storage data. StorageHashing, - /// The hashing stage within the pipeline. + /// The account and storage hashing stages within the pipeline. /// /// Covers general data hashing operations. Hashing, - /// The Merkle stage within the pipeline. + /// The merkle stage within the pipeline. /// /// Handles Merkle tree-related computations and data processing. Merkle, diff --git a/crates/node-core/src/args/txpool_args.rs b/crates/node-core/src/args/txpool.rs similarity index 97% rename from crates/node-core/src/args/txpool_args.rs rename to crates/node-core/src/args/txpool.rs index db9e43d82..12fc6bd79 100644 --- a/crates/node-core/src/args/txpool_args.rs +++ b/crates/node-core/src/args/txpool.rs @@ -35,7 +35,7 @@ pub struct TxPoolArgs { pub queued_max_size: usize, /// Max number of executable transaction slots guaranteed per account - #[arg(long = "txpool.max-account-slots", long = "txpool.max_account_slots", default_value_t = TXPOOL_MAX_ACCOUNT_SLOTS_PER_SENDER)] + #[arg(long = "txpool.max-account-slots", alias = "txpool.max_account_slots", default_value_t = TXPOOL_MAX_ACCOUNT_SLOTS_PER_SENDER)] pub max_account_slots: usize, /// Price bump (in %) for the transaction pool underpriced check. diff --git a/crates/node-core/src/cli/config.rs b/crates/node-core/src/cli/config.rs index 988ef34d5..1bce398ef 100644 --- a/crates/node-core/src/cli/config.rs +++ b/crates/node-core/src/cli/config.rs @@ -49,7 +49,7 @@ pub trait RethRpcConfig { fn http_ws_server_builder(&self) -> ServerBuilder; /// Returns the default ipc server builder - fn ipc_server_builder(&self) -> IpcServerBuilder; + fn ipc_server_builder(&self) -> IpcServerBuilder; /// Creates the [RpcServerConfig] from cli args. fn rpc_server_config(&self) -> RpcServerConfig; diff --git a/crates/node-core/src/dirs.rs b/crates/node-core/src/dirs.rs index 223e65bb2..75919f6f0 100644 --- a/crates/node-core/src/dirs.rs +++ b/crates/node-core/src/dirs.rs @@ -271,63 +271,65 @@ impl ChainPath { /// Returns the path to the reth data directory for this chain. /// /// `/` - pub fn data_dir_path(&self) -> PathBuf { - self.0.as_ref().into() + pub fn data_dir(&self) -> &Path { + self.0.as_ref() } /// Returns the path to the db directory for this chain. /// /// `//db` - pub fn db_path(&self) -> PathBuf { - self.0.join("db").into() + pub fn db(&self) -> PathBuf { + self.data_dir().join("db") } /// Returns the path to the static_files directory for this chain. - pub fn static_files_path(&self) -> PathBuf { - self.0.join("static_files").into() + /// + /// `//static_files` + pub fn static_files(&self) -> PathBuf { + self.data_dir().join("static_files") } /// Returns the path to the reth p2p secret key for this chain. /// /// `//discovery-secret` - pub fn p2p_secret_path(&self) -> PathBuf { - self.0.join("discovery-secret").into() + pub fn p2p_secret(&self) -> PathBuf { + self.data_dir().join("discovery-secret") } /// Returns the path to the known peers file for this chain. /// /// `//known-peers.json` - pub fn known_peers_path(&self) -> PathBuf { - self.0.join("known-peers.json").into() + pub fn known_peers(&self) -> PathBuf { + self.data_dir().join("known-peers.json") } /// Returns the path to the blobstore directory for this chain where blobs of unfinalized /// transactions are stored. /// /// `//blobstore` - pub fn blobstore_path(&self) -> PathBuf { - self.0.join("blobstore").into() + pub fn blobstore(&self) -> PathBuf { + self.data_dir().join("blobstore") } /// Returns the path to the local transactions backup file /// /// `//txpool-transactions-backup.rlp` - pub fn txpool_transactions_path(&self) -> PathBuf { - self.0.join("txpool-transactions-backup.rlp").into() + pub fn txpool_transactions(&self) -> PathBuf { + self.data_dir().join("txpool-transactions-backup.rlp") } /// Returns the path to the config file for this chain. /// /// `//reth.toml` - pub fn config_path(&self) -> PathBuf { - self.0.join("reth.toml").into() + pub fn config(&self) -> PathBuf { + self.data_dir().join("reth.toml") } /// Returns the path to the jwtsecret file for this chain. /// /// `//jwt.hex` - pub fn jwt_path(&self) -> PathBuf { - self.0.join("jwt.hex").into() + pub fn jwt(&self) -> PathBuf { + self.data_dir().join("jwt.hex") } } @@ -359,7 +361,7 @@ mod tests { let path = path.unwrap_or_chain_default(Chain::mainnet()); assert!(path.as_ref().ends_with("reth/mainnet"), "{path:?}"); - let db_path = path.db_path(); + let db_path = path.db(); assert!(db_path.ends_with("reth/mainnet/db"), "{db_path:?}"); let path = MaybePlatformPath::::from_str("my/path/to/datadir").unwrap(); diff --git a/crates/node-core/src/engine_api_store.rs b/crates/node-core/src/engine/engine_store.rs similarity index 78% rename from crates/node-core/src/engine_api_store.rs rename to crates/node-core/src/engine/engine_store.rs index 5552137f6..524e2c89b 100644 --- a/crates/node-core/src/engine_api_store.rs +++ b/crates/node-core/src/engine/engine_store.rs @@ -1,5 +1,6 @@ //! Stores engine API messages to disk for later inspection and replay. +use futures::{Stream, StreamExt}; use reth_beacon_consensus::BeaconEngineMessage; use reth_engine_primitives::EngineTypes; use reth_primitives::fs; @@ -8,8 +9,13 @@ use reth_rpc_types::{ ExecutionPayload, }; use serde::{Deserialize, Serialize}; -use std::{collections::BTreeMap, path::PathBuf, time::SystemTime}; -use tokio::sync::mpsc::{UnboundedReceiver, UnboundedSender}; +use std::{ + collections::BTreeMap, + path::PathBuf, + pin::Pin, + task::{ready, Context, Poll}, + time::SystemTime, +}; use tracing::*; /// A message from the engine API that has been stored to disk. @@ -34,13 +40,13 @@ pub enum StoredEngineApiMessage { /// This can read and write engine API messages in a specific directory. #[derive(Debug)] -pub struct EngineApiStore { +pub struct EngineMessageStore { /// The path to the directory that stores the engine API messages. path: PathBuf, } -impl EngineApiStore { - /// Creates a new [EngineApiStore] at the given path. +impl EngineMessageStore { + /// Creates a new [EngineMessageStore] at the given path. /// /// The path is expected to be a directory, where individual message JSON files will be stored. pub fn new(path: PathBuf) -> Self { @@ -108,22 +114,42 @@ impl EngineApiStore { } Ok(filenames_by_ts.into_iter().flat_map(|(_, paths)| paths)) } +} - /// Intercepts an incoming engine API message, storing it to disk and forwarding it to the - /// engine channel. - pub async fn intercept( - self, - mut rx: UnboundedReceiver>, - to_engine: UnboundedSender>, - ) where - Engine: EngineTypes, - BeaconEngineMessage: std::fmt::Debug, - { - while let Some(msg) = rx.recv().await { - if let Err(error) = self.on_message(&msg, SystemTime::now()) { +/// A wrapper stream that stores Engine API messages in +/// the specified directory. +#[derive(Debug)] +#[pin_project::pin_project] +pub struct EngineStoreStream { + /// Inner message stream. + #[pin] + stream: S, + /// Engine message store. + store: EngineMessageStore, +} + +impl EngineStoreStream { + /// Create new engine store stream wrapper. + pub fn new(stream: S, path: PathBuf) -> Self { + Self { stream, store: EngineMessageStore::new(path) } + } +} + +impl Stream for EngineStoreStream +where + Engine: EngineTypes, + S: Stream>, +{ + type Item = S::Item; + + fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let mut this = self.project(); + let next = ready!(this.stream.poll_next_unpin(cx)); + if let Some(msg) = &next { + if let Err(error) = this.store.on_message(msg, SystemTime::now()) { error!(target: "engine::intercept", ?msg, %error, "Error handling Engine API message"); } - let _ = to_engine.send(msg); } + Poll::Ready(next) } } diff --git a/crates/node-core/src/engine/mod.rs b/crates/node-core/src/engine/mod.rs new file mode 100644 index 000000000..2c4e12e68 --- /dev/null +++ b/crates/node-core/src/engine/mod.rs @@ -0,0 +1,99 @@ +//! Collection of various stream utilities for consensus engine. + +use futures::Stream; +use reth_beacon_consensus::BeaconEngineMessage; +use reth_engine_primitives::EngineTypes; +use std::path::PathBuf; +use tokio_util::either::Either; + +pub mod engine_store; +use engine_store::EngineStoreStream; + +pub mod skip_fcu; +use skip_fcu::EngineSkipFcu; + +pub mod skip_new_payload; +use skip_new_payload::EngineSkipNewPayload; + +/// The collection of stream extensions for engine API message stream. +pub trait EngineMessageStreamExt: + Stream> +{ + /// Skips the specified number of [BeaconEngineMessage::ForkchoiceUpdated] messages from the + /// engine message stream. + fn skip_fcu(self, count: usize) -> EngineSkipFcu + where + Self: Sized, + { + EngineSkipFcu::new(self, count) + } + + /// If the count is [Some], returns the stream that skips the specified number of + /// [BeaconEngineMessage::ForkchoiceUpdated] messages. Otherwise, returns `Self`. + fn maybe_skip_fcu(self, maybe_count: Option) -> Either, Self> + where + Self: Sized, + { + if let Some(count) = maybe_count { + Either::Left(self.skip_fcu(count)) + } else { + Either::Right(self) + } + } + + /// Skips the specified number of [BeaconEngineMessage::NewPayload] messages from the + /// engine message stream. + fn skip_new_payload(self, count: usize) -> EngineSkipNewPayload + where + Self: Sized, + { + EngineSkipNewPayload::new(self, count) + } + + /// If the count is [Some], returns the stream that skips the specified number of + /// [BeaconEngineMessage::NewPayload] messages. Otherwise, returns `Self`. + fn maybe_skip_new_payload( + self, + maybe_count: Option, + ) -> Either, Self> + where + Self: Sized, + { + if let Some(count) = maybe_count { + Either::Left(self.skip_new_payload(count)) + } else { + Either::Right(self) + } + } + + /// Stores engine messages at the specified location. + fn store_messages(self, path: PathBuf) -> EngineStoreStream + where + Self: Sized, + { + EngineStoreStream::new(self, path) + } + + /// If the path is [Some], returns the stream that stores engine messages at the specified + /// location. Otherwise, returns `Self`. + fn maybe_store_messages( + self, + maybe_path: Option, + ) -> Either, Self> + where + Self: Sized, + { + if let Some(path) = maybe_path { + Either::Left(self.store_messages(path)) + } else { + Either::Right(self) + } + } +} + +impl EngineMessageStreamExt for T +where + Engine: EngineTypes, + T: Stream>, +{ +} diff --git a/crates/node-core/src/engine/skip_fcu.rs b/crates/node-core/src/engine/skip_fcu.rs new file mode 100644 index 000000000..6deb34263 --- /dev/null +++ b/crates/node-core/src/engine/skip_fcu.rs @@ -0,0 +1,64 @@ +//! Stream wrapper that skips specified number of FCUs. + +use futures::{Stream, StreamExt}; +use reth_beacon_consensus::{BeaconEngineMessage, OnForkChoiceUpdated}; +use reth_engine_primitives::EngineTypes; +use std::{ + pin::Pin, + task::{ready, Context, Poll}, +}; + +/// Engine API stream wrapper that skips the specified number of forkchoice updated messages. +#[derive(Debug)] +#[pin_project::pin_project] +pub struct EngineSkipFcu { + #[pin] + stream: S, + /// The number of FCUs to skip. + threshold: usize, + /// Current count of skipped FCUs. + skipped: usize, +} + +impl EngineSkipFcu { + /// Creates new [EngineSkipFcu] stream wrapper. + pub fn new(stream: S, threshold: usize) -> Self { + Self { + stream, + threshold, + // Start with `threshold` so that the first FCU goes through. + skipped: threshold, + } + } +} + +impl Stream for EngineSkipFcu +where + Engine: EngineTypes, + S: Stream>, +{ + type Item = S::Item; + + fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let mut this = self.project(); + + loop { + let next = ready!(this.stream.poll_next_unpin(cx)); + let item = match next { + Some(BeaconEngineMessage::ForkchoiceUpdated { state, payload_attrs, tx }) => { + if this.skipped < this.threshold { + *this.skipped += 1; + tracing::warn!(target: "engine::intercept", ?state, ?payload_attrs, threshold=this.threshold, skipped=this.skipped, "Skipping FCU"); + let _ = tx.send(Ok(OnForkChoiceUpdated::syncing())); + continue + } else { + *this.skipped = 0; + Some(BeaconEngineMessage::ForkchoiceUpdated { state, payload_attrs, tx }) + } + } + next => next, + }; + return Poll::Ready(item) + } + } +} diff --git a/crates/node-core/src/engine/skip_new_payload.rs b/crates/node-core/src/engine/skip_new_payload.rs new file mode 100644 index 000000000..fdcb4aeec --- /dev/null +++ b/crates/node-core/src/engine/skip_new_payload.rs @@ -0,0 +1,67 @@ +//! Stream wrapper that skips specified number of new payload messages. + +use futures::{Stream, StreamExt}; +use reth_beacon_consensus::BeaconEngineMessage; +use reth_engine_primitives::EngineTypes; +use reth_rpc_types::engine::{PayloadStatus, PayloadStatusEnum}; +use std::{ + pin::Pin, + task::{ready, Context, Poll}, +}; + +/// Engine API stream wrapper that skips the specified number of new payload messages. +#[derive(Debug)] +#[pin_project::pin_project] +pub struct EngineSkipNewPayload { + #[pin] + stream: S, + /// The number of messages to skip. + threshold: usize, + /// Current count of skipped messages. + skipped: usize, +} + +impl EngineSkipNewPayload { + /// Creates new [EngineSkipNewPayload] stream wrapper. + pub fn new(stream: S, threshold: usize) -> Self { + Self { stream, threshold, skipped: 0 } + } +} + +impl Stream for EngineSkipNewPayload +where + Engine: EngineTypes, + S: Stream>, +{ + type Item = S::Item; + + fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let mut this = self.project(); + + loop { + let next = ready!(this.stream.poll_next_unpin(cx)); + let item = match next { + Some(BeaconEngineMessage::NewPayload { payload, cancun_fields, tx }) => { + if this.skipped < this.threshold { + *this.skipped += 1; + tracing::warn!( + target: "engine::intercept", + block_number = payload.block_number(), + block_hash = %payload.block_hash(), + ?cancun_fields, + threshold=this.threshold, + skipped=this.skipped, "Skipping new payload" + ); + let _ = tx.send(Ok(PayloadStatus::from_status(PayloadStatusEnum::Syncing))); + continue + } else { + *this.skipped = 0; + Some(BeaconEngineMessage::NewPayload { payload, cancun_fields, tx }) + } + } + next => next, + }; + return Poll::Ready(item) + } + } +} diff --git a/crates/node-core/src/engine_skip_fcu.rs b/crates/node-core/src/engine_skip_fcu.rs deleted file mode 100644 index a6e5e1b01..000000000 --- a/crates/node-core/src/engine_skip_fcu.rs +++ /dev/null @@ -1,55 +0,0 @@ -//! Stores engine API messages to disk for later inspection and replay. - -use reth_beacon_consensus::{BeaconEngineMessage, OnForkChoiceUpdated}; -use reth_engine_primitives::EngineTypes; -use tokio::sync::mpsc::{UnboundedReceiver, UnboundedSender}; - -/// Intercept Engine API message and skip FCUs. -#[derive(Debug)] -pub struct EngineApiSkipFcu { - /// The number of FCUs to skip. - threshold: usize, - /// Current count of skipped FCUs. - skipped: usize, -} - -impl EngineApiSkipFcu { - /// Creates new [EngineApiSkipFcu] interceptor. - pub fn new(threshold: usize) -> Self { - Self { - threshold, - // Start with `threshold` so that the first FCU goes through. - skipped: threshold, - } - } - - /// Intercepts an incoming engine API message, skips FCU or forwards it - /// to the engine depending on current number of skipped FCUs. - pub async fn intercept( - mut self, - mut rx: UnboundedReceiver>, - to_engine: UnboundedSender>, - ) where - Engine: EngineTypes, - BeaconEngineMessage: std::fmt::Debug, - { - while let Some(msg) = rx.recv().await { - if let BeaconEngineMessage::ForkchoiceUpdated { state, payload_attrs, tx } = msg { - if self.skipped < self.threshold { - self.skipped += 1; - tracing::warn!(target: "engine::intercept", ?state, ?payload_attrs, threshold=self.threshold, skipped=self.skipped, "Skipping FCU"); - let _ = tx.send(Ok(OnForkChoiceUpdated::syncing())); - } else { - self.skipped = 0; - let _ = to_engine.send(BeaconEngineMessage::ForkchoiceUpdated { - state, - payload_attrs, - tx, - }); - } - } else { - let _ = to_engine.send(msg); - } - } - } -} diff --git a/crates/node-core/src/init.rs b/crates/node-core/src/init.rs index 7f529c2b0..6d924b6b1 100644 --- a/crates/node-core/src/init.rs +++ b/crates/node-core/src/init.rs @@ -1,26 +1,48 @@ //! Reth genesis initialization utility functions. -use reth_db::{ - database::Database, - tables, - transaction::{DbTx, DbTxMut}, -}; +use reth_codecs::Compact; +use reth_config::config::EtlConfig; +use reth_db::{database::Database, tables, transaction::DbTxMut}; +use reth_etl::Collector; use reth_interfaces::{db::DatabaseError, provider::ProviderResult}; use reth_primitives::{ - stage::StageId, Account, Address, Bytecode, ChainSpec, GenesisAccount, Receipts, - StaticFileSegment, StorageEntry, B256, U256, + stage::{StageCheckpoint, StageId}, + Account, Address, Bytecode, ChainSpec, GenesisAccount, Receipts, StaticFileSegment, + StorageEntry, B256, U256, }; use reth_provider::{ bundle_state::{BundleStateInit, RevertsInit}, providers::{StaticFileProvider, StaticFileWriter}, - BlockHashReader, BundleStateWithReceipts, ChainSpecProvider, DatabaseProviderRW, HashingWriter, - HistoryWriter, OriginalValuesKnown, ProviderError, ProviderFactory, + BlockHashReader, BlockNumReader, BundleStateWithReceipts, ChainSpecProvider, + DatabaseProviderRW, HashingWriter, HistoryWriter, OriginalValuesKnown, ProviderError, + ProviderFactory, StageCheckpointWriter, StateWriter, StaticFileProviderFactory, }; +use reth_trie::{IntermediateStateRootState, StateRoot as StateRootComputer, StateRootProgress}; +use serde::{Deserialize, Serialize}; use std::{ collections::{BTreeMap, HashMap}, + io::BufRead, + ops::DerefMut, sync::Arc, }; -use tracing::debug; +use tracing::{debug, error, info, trace}; + +/// Default soft limit for number of bytes to read from state dump file, before inserting into +/// database. +/// +/// Default is 1 GB. +pub const DEFAULT_SOFT_LIMIT_BYTE_LEN_ACCOUNTS_CHUNK: usize = 1_000_000_000; + +/// Approximate number of accounts per 1 GB of state dump file. One account is approximately 3.5 KB +/// +/// Approximate is 285 228 accounts. +// +// (14.05 GB OP mainnet state dump at Bedrock block / 4 007 565 accounts in file > 3.5 KB per +// account) +pub const AVERAGE_COUNT_ACCOUNTS_PER_GB_STATE_DUMP: usize = 285_228; + +/// Soft limit for the number of flushed updates after which to log progress summary. +const SOFT_LIMIT_COUNT_FLUSHED_UPDATES: usize = 1_000_000; /// Database initialization error type. #[derive(Debug, thiserror::Error, PartialEq, Eq, Clone)] @@ -34,10 +56,19 @@ pub enum InitDatabaseError { /// Actual genesis hash. database_hash: B256, }, - /// Provider error. #[error(transparent)] Provider(#[from] ProviderError), + /// Computed state root doesn't match state root in state dump file. + #[error( + "state root mismatch, state dump: {expected_state_root}, computed: {computed_state_root}" + )] + SateRootMismatch { + /// Expected state root. + expected_state_root: B256, + /// Actual state root. + computed_state_root: B256, + }, } impl From for InitDatabaseError { @@ -80,18 +111,18 @@ pub fn init_genesis(factory: ProviderFactory) -> Result(&tx, &static_file_provider, chain.clone())?; + insert_genesis_header::(tx, &static_file_provider, chain.clone())?; - insert_genesis_state::(&tx, alloc.len(), alloc.iter())?; + insert_genesis_state::(tx, alloc.len(), alloc.iter())?; // insert sync stage - for stage in StageId::ALL.iter() { - tx.put::(stage.to_string(), Default::default())?; + for stage in StageId::ALL { + provider_rw.save_stage_checkpoint(stage, Default::default())?; } - tx.commit()?; + provider_rw.commit()?; static_file_provider.commit()?; Ok(hash) @@ -102,6 +133,16 @@ pub fn insert_genesis_state<'a, 'b, DB: Database>( tx: &::TXMut, capacity: usize, alloc: impl Iterator, +) -> ProviderResult<()> { + insert_state::(tx, capacity, alloc, 0) +} + +/// Inserts state at given block into database. +pub fn insert_state<'a, 'b, DB: Database>( + tx: &::TXMut, + capacity: usize, + alloc: impl Iterator, + block: u64, ) -> ProviderResult<()> { let mut state_init: BundleStateInit = HashMap::with_capacity(capacity); let mut reverts_init = HashMap::with_capacity(capacity); @@ -149,18 +190,20 @@ pub fn insert_genesis_state<'a, 'b, DB: Database>( ), ); } - let all_reverts_init: RevertsInit = HashMap::from([(0, reverts_init)]); + let all_reverts_init: RevertsInit = HashMap::from([(block, reverts_init)]); let bundle = BundleStateWithReceipts::new_init( state_init, all_reverts_init, contracts.into_iter().collect(), Receipts::new(), - 0, + block, ); bundle.write_to_storage(tx, None, OriginalValuesKnown::Yes)?; + trace!(target: "reth::cli", "Inserted state"); + Ok(()) } @@ -174,6 +217,8 @@ pub fn insert_genesis_hashes<'a, 'b, DB: Database>( alloc.clone().map(|(addr, account)| (*addr, Some(Account::from_genesis_account(account)))); provider.insert_account_for_hashing(alloc_accounts)?; + trace!(target: "reth::cli", "Inserted account hashes"); + let alloc_storage = alloc.filter_map(|(addr, account)| { // only return Some if there is storage account.storage.as_ref().map(|storage| { @@ -188,6 +233,8 @@ pub fn insert_genesis_hashes<'a, 'b, DB: Database>( }); provider.insert_storage_for_hashing(alloc_storage)?; + trace!(target: "reth::cli", "Inserted storage hashes"); + Ok(()) } @@ -195,17 +242,30 @@ pub fn insert_genesis_hashes<'a, 'b, DB: Database>( pub fn insert_genesis_history<'a, 'b, DB: Database>( provider: &DatabaseProviderRW, alloc: impl Iterator + Clone, +) -> ProviderResult<()> { + insert_history::(provider, alloc, 0) +} + +/// Inserts history indices for genesis accounts and storage. +pub fn insert_history<'a, 'b, DB: Database>( + provider: &DatabaseProviderRW, + alloc: impl Iterator + Clone, + block: u64, ) -> ProviderResult<()> { let account_transitions = - alloc.clone().map(|(addr, _)| (*addr, vec![0])).collect::>(); + alloc.clone().map(|(addr, _)| (*addr, vec![block])).collect::>(); provider.insert_account_history_index(account_transitions)?; + trace!(target: "reth::cli", "Inserted account history"); + let storage_transitions = alloc .filter_map(|(addr, account)| account.storage.as_ref().map(|storage| (addr, storage))) - .flat_map(|(addr, storage)| storage.iter().map(|(key, _)| ((*addr, *key), vec![0]))) + .flat_map(|(addr, storage)| storage.iter().map(|(key, _)| ((*addr, *key), vec![block]))) .collect::>(); provider.insert_storage_history_index(storage_transitions)?; + trace!(target: "reth::cli", "Inserted storage history"); + Ok(()) } @@ -233,6 +293,232 @@ pub fn insert_genesis_header( Ok(()) } +/// Reads account state from a [`BufRead`] reader and initializes it at the highest block that can +/// be found on database. +/// +/// It's similar to [`init_genesis`] but supports importing state too big to fit in memory, and can +/// be set to the highest block present. One practical usecase is to import OP mainnet state at +/// bedrock transition block. +pub fn init_from_state_dump( + mut reader: impl BufRead, + factory: ProviderFactory, + etl_config: EtlConfig, +) -> eyre::Result { + let block = factory.last_block_number()?; + let hash = factory.block_hash(block)?.unwrap(); + + debug!(target: "reth::cli", + block, + chain=%factory.chain_spec().chain, + "Initializing state at block" + ); + + // first line can be state root, then it can be used for verifying against computed state root + let expected_state_root = parse_state_root(&mut reader)?; + + // remaining lines are accounts + let collector = parse_accounts(&mut reader, etl_config)?; + + // write state to db + let mut provider_rw = factory.provider_rw()?; + dump_state(collector, &mut provider_rw, block)?; + + // compute and compare state root. this advances the stage checkpoints. + let computed_state_root = compute_state_root(&provider_rw)?; + if computed_state_root != expected_state_root { + error!(target: "reth::cli", + ?computed_state_root, + ?expected_state_root, + "Computed state root does not match state root in state dump" + ); + + Err(InitDatabaseError::SateRootMismatch { expected_state_root, computed_state_root })? + } else { + info!(target: "reth::cli", + ?computed_state_root, + "Computed state root matches state root in state dump" + ); + } + + // insert sync stages for stages that require state + for stage in StageId::STATE_REQUIRED { + provider_rw.save_stage_checkpoint(stage, StageCheckpoint::new(block))?; + } + + provider_rw.commit()?; + + Ok(hash) +} + +/// Parses and returns expected state root. +fn parse_state_root(reader: &mut impl BufRead) -> eyre::Result { + let mut line = String::new(); + reader.read_line(&mut line)?; + + let expected_state_root = serde_json::from_str::(&line)?.root; + trace!(target: "reth::cli", + root=%expected_state_root, + "Read state root from file" + ); + Ok(expected_state_root) +} + +/// Parses accounts and pushes them to a [`Collector`]. +fn parse_accounts( + mut reader: impl BufRead, + etl_config: EtlConfig, +) -> Result, eyre::Error> { + let mut line = String::new(); + let mut collector = Collector::new(etl_config.file_size, etl_config.dir); + + while let Ok(n) = reader.read_line(&mut line) { + if n == 0 { + break; + } + + let GenesisAccountWithAddress { genesis_account, address } = serde_json::from_str(&line)?; + collector.insert(address, genesis_account)?; + + if !collector.is_empty() && collector.len() % AVERAGE_COUNT_ACCOUNTS_PER_GB_STATE_DUMP == 0 + { + info!(target: "reth::cli", + parsed_new_accounts=collector.len(), + ); + } + + line.clear(); + } + + Ok(collector) +} + +/// Takes a [`Collector`] and processes all accounts. +fn dump_state( + mut collector: Collector, + provider_rw: &mut DatabaseProviderRW, + block: u64, +) -> Result<(), eyre::Error> { + let accounts_len = collector.len(); + let mut accounts = Vec::with_capacity(AVERAGE_COUNT_ACCOUNTS_PER_GB_STATE_DUMP); + let mut total_inserted_accounts = 0; + + for (index, entry) in collector.iter()?.enumerate() { + let (address, account) = entry?; + let (address, _) = Address::from_compact(address.as_slice(), address.len()); + let (account, _) = GenesisAccount::from_compact(account.as_slice(), account.len()); + + accounts.push((address, account)); + + if (index > 0 && index % AVERAGE_COUNT_ACCOUNTS_PER_GB_STATE_DUMP == 0) || + index == accounts_len - 1 + { + total_inserted_accounts += accounts.len(); + + info!(target: "reth::cli", + total_inserted_accounts, + "Writing accounts to db" + ); + + // use transaction to insert genesis header + insert_genesis_hashes( + provider_rw, + accounts.iter().map(|(address, account)| (address, account)), + )?; + + insert_history( + provider_rw, + accounts.iter().map(|(address, account)| (address, account)), + block, + )?; + + // block is already written to static files + let tx = provider_rw.deref_mut().tx_mut(); + insert_state::( + tx, + accounts.len(), + accounts.iter().map(|(address, account)| (address, account)), + block, + )?; + + accounts.clear(); + } + } + Ok(()) +} + +/// Computes the state root (from scratch) based on the accounts and storages present in the +/// database. +fn compute_state_root(provider: &DatabaseProviderRW) -> eyre::Result { + trace!(target: "reth::cli", "Computing state root"); + + let tx = provider.tx_ref(); + let mut intermediate_state: Option = None; + let mut total_flushed_updates = 0; + + loop { + match StateRootComputer::from_tx(tx) + .with_intermediate_state(intermediate_state) + .root_with_progress()? + { + StateRootProgress::Progress(state, _, updates) => { + let updates_len = updates.len(); + + trace!(target: "reth::cli", + last_account_key = %state.last_account_key, + updates_len, + total_flushed_updates, + "Flushing trie updates" + ); + + intermediate_state = Some(*state); + updates.flush(tx)?; + + total_flushed_updates += updates_len; + + if total_flushed_updates % SOFT_LIMIT_COUNT_FLUSHED_UPDATES == 0 { + info!(target: "reth::cli", + total_flushed_updates, + "Flushing trie updates" + ); + } + } + StateRootProgress::Complete(root, _, updates) => { + let updates_len = updates.len(); + + updates.flush(tx)?; + + total_flushed_updates += updates_len; + + trace!(target: "reth::cli", + %root, + updates_len = updates_len, + total_flushed_updates, + "State root has been computed" + ); + + return Ok(root) + } + } + } +} + +/// Type to deserialize state root from state dump file. +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq)] +struct StateRoot { + root: B256, +} + +/// An account as in the state dump file. This contains a [`GenesisAccount`] and the account's +/// address. +#[derive(Debug, Serialize, Deserialize)] +struct GenesisAccountWithAddress { + /// The account's balance, nonce, code, and storage. + #[serde(flatten)] + genesis_account: GenesisAccount, + /// The account's address. + address: Address, +} + #[cfg(test)] mod tests { use super::*; @@ -240,11 +526,12 @@ mod tests { cursor::DbCursorRO, models::{storage_sharded_key::StorageShardedKey, ShardedKey}, table::{Table, TableRow}, + transaction::DbTx, DatabaseEnv, }; use reth_primitives::{ - Chain, ForkTimestamps, Genesis, IntegerList, GOERLI, GOERLI_GENESIS_HASH, MAINNET, - MAINNET_GENESIS_HASH, SEPOLIA, SEPOLIA_GENESIS_HASH, + Chain, Genesis, IntegerList, GOERLI, GOERLI_GENESIS_HASH, MAINNET, MAINNET_GENESIS_HASH, + SEPOLIA, SEPOLIA_GENESIS_HASH, }; use reth_provider::test_utils::create_test_provider_factory_with_chain_spec; @@ -334,7 +621,6 @@ mod tests { ..Default::default() }, hardforks: BTreeMap::default(), - fork_timestamps: ForkTimestamps::default(), genesis_hash: None, paris_block_and_final_difficulty: None, deposit_contract: None, diff --git a/crates/node-core/src/lib.rs b/crates/node-core/src/lib.rs index 3d73e0e61..024467ab1 100644 --- a/crates/node-core/src/lib.rs +++ b/crates/node-core/src/lib.rs @@ -11,8 +11,7 @@ pub mod args; pub mod cli; pub mod dirs; -pub mod engine_api_store; -pub mod engine_skip_fcu; +pub mod engine; pub mod exit; pub mod init; pub mod metrics; diff --git a/crates/node-core/src/node_config.rs b/crates/node-core/src/node_config.rs index 2b186b19c..5cb28c873 100644 --- a/crates/node-core/src/node_config.rs +++ b/crates/node-core/src/node_config.rs @@ -26,7 +26,11 @@ use reth_provider::{ }; use reth_tasks::TaskExecutor; use secp256k1::SecretKey; -use std::{net::SocketAddr, path::PathBuf, sync::Arc}; +use std::{ + net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6}, + path::PathBuf, + sync::Arc, +}; use tracing::*; /// The default prometheus recorder handle. We use a global static to ensure that it is only @@ -234,7 +238,7 @@ impl NodeConfig { /// Get the network secret from the given data dir pub fn network_secret(&self, data_dir: &ChainPath) -> eyre::Result { let network_secret_path = - self.network.p2p_secret_key.clone().unwrap_or_else(|| data_dir.p2p_secret_path()); + self.network.p2p_secret_key.clone().unwrap_or_else(|| data_dir.p2p_secret()); debug!(target: "reth::cli", ?network_secret_path, "Loading p2p key file"); let secret_key = get_secret_key(&network_secret_path)?; Ok(secret_key) @@ -262,15 +266,15 @@ impl NodeConfig { } /// Returns pruning configuration. - pub fn prune_config(&self) -> eyre::Result> { - self.pruning.prune_config(Arc::clone(&self.chain)) + pub fn prune_config(&self) -> Option { + self.pruning.prune_config(&self.chain) } /// Returns the max block that the node should run to, looking it up from the network if /// necessary pub async fn max_block( &self, - network_client: &Client, + network_client: Client, provider: Provider, ) -> eyre::Result> where @@ -299,7 +303,7 @@ impl NodeConfig { ) -> eyre::Result> { info!(target: "reth::cli", "Connecting to P2P network"); let secret_key = self.network_secret(data_dir)?; - let default_peers_path = data_dir.known_peers_path(); + let default_peers_path = data_dir.known_peers(); Ok(self.load_network_config(config, client, executor, head, secret_key, default_peers_path)) } @@ -425,6 +429,7 @@ impl NodeConfig { Client: HeadersClient, { info!(target: "reth::cli", ?tip, "Fetching tip block from the network."); + let mut fetch_failures = 0; loop { match get_single_header(&client, tip).await { Ok(tip_header) => { @@ -432,7 +437,10 @@ impl NodeConfig { return Ok(tip_header); } Err(error) => { - error!(target: "reth::cli", %error, "Failed to fetch the tip. Retrying..."); + fetch_failures += 1; + if fetch_failures % 20 == 0 { + error!(target: "reth::cli", ?fetch_failures, %error, "Failed to fetch the tip. Retrying..."); + } } } } @@ -458,6 +466,7 @@ impl NodeConfig { // set discovery port based on instance number self.network.port + self.instance - 1, )) + .disable_discv4_discovery_if(self.chain.chain.is_optimism()) .discovery_addr(SocketAddr::new( self.network.discovery.addr, // set discovery port based on instance number @@ -466,21 +475,45 @@ impl NodeConfig { let config = cfg_builder.build(client); - if !self.network.discovery.enable_discv5_discovery { + if self.network.discovery.disable_discovery || + !self.network.discovery.enable_discv5_discovery && + !config.chain_spec.chain.is_optimism() + { return config } + + let rlpx_addr = config.listener_addr().ip(); // work around since discv5 config builder can't be integrated into network config builder // due to unsatisfied trait bounds config.discovery_v5_with_config_builder(|builder| { - let DiscoveryArgs { discv5_addr, discv5_port, .. } = self.network.discovery; + let DiscoveryArgs { + discv5_addr, + discv5_addr_ipv6, + discv5_port, + discv5_port_ipv6, + discv5_lookup_interval, + discv5_bootstrap_lookup_interval, + discv5_bootstrap_lookup_countdown, + .. + } = self.network.discovery; + + let discv5_addr_ipv4 = discv5_addr.or_else(|| ipv4(rlpx_addr)); + let discv5_addr_ipv6 = discv5_addr_ipv6.or_else(|| ipv6(rlpx_addr)); + let discv5_port_ipv4 = discv5_port + self.instance - 1; + let discv5_port_ipv6 = discv5_port_ipv6 + self.instance - 1; + builder .discv5_config( - discv5::ConfigBuilder::new(ListenConfig::from(Into::::into(( - discv5_addr, - discv5_port + self.instance - 1, - )))) + discv5::ConfigBuilder::new(ListenConfig::from_two_sockets( + discv5_addr_ipv4.map(|addr| SocketAddrV4::new(addr, discv5_port_ipv4)), + discv5_addr_ipv6 + .map(|addr| SocketAddrV6::new(addr, discv5_port_ipv6, 0, 0)), + )) .build(), ) + .lookup_interval(discv5_lookup_interval) + .bootstrap_lookup_interval(discv5_bootstrap_lookup_interval) + .bootstrap_lookup_countdown(discv5_bootstrap_lookup_countdown) .build() }) } @@ -518,3 +551,19 @@ impl Default for NodeConfig { } } } + +/// Returns the address if this is an [`Ipv4Addr`]. +pub fn ipv4(ip: IpAddr) -> Option { + match ip { + IpAddr::V4(ip) => Some(ip), + IpAddr::V6(_) => None, + } +} + +/// Returns the address if this is an [`Ipv6Addr`]. +pub fn ipv6(ip: IpAddr) -> Option { + match ip { + IpAddr::V4(_) => None, + IpAddr::V6(ip) => Some(ip), + } +} diff --git a/crates/node-ethereum/tests/e2e/p2p.rs b/crates/node-ethereum/tests/e2e/p2p.rs deleted file mode 100644 index 940096e18..000000000 --- a/crates/node-ethereum/tests/e2e/p2p.rs +++ /dev/null @@ -1,77 +0,0 @@ -use std::sync::Arc; - -use crate::utils::eth_payload_attributes; -use reth::{ - args::{DiscoveryArgs, NetworkArgs, RpcServerArgs}, - builder::{NodeBuilder, NodeConfig, NodeHandle}, - tasks::TaskManager, -}; -use reth_e2e_test_utils::{node::NodeHelper, wallet::Wallet}; -use reth_node_ethereum::EthereumNode; -use reth_primitives::{ChainSpecBuilder, Genesis, MAINNET}; - -#[tokio::test] -async fn can_sync() -> eyre::Result<()> { - reth_tracing::init_test_tracing(); - - let tasks = TaskManager::current(); - let exec = tasks.executor(); - - let genesis: Genesis = serde_json::from_str(include_str!("../assets/genesis.json")).unwrap(); - let chain_spec = Arc::new( - ChainSpecBuilder::default() - .chain(MAINNET.chain) - .genesis(genesis) - .cancun_activated() - .build(), - ); - - let network_config = NetworkArgs { - discovery: DiscoveryArgs { disable_discovery: true, ..DiscoveryArgs::default() }, - ..NetworkArgs::default() - }; - - let node_config = NodeConfig::test() - .with_chain(chain_spec) - .with_network(network_config) - .with_unused_ports() - .with_rpc(RpcServerArgs::default().with_unused_ports().with_http()); - - let NodeHandle { node, node_exit_future: _ } = NodeBuilder::new(node_config.clone()) - .testing_node(exec.clone()) - .node(EthereumNode::default()) - .launch() - .await?; - - let mut first_node = NodeHelper::new(node.clone()).await?; - - let NodeHandle { node, node_exit_future: _ } = NodeBuilder::new(node_config) - .testing_node(exec) - .node(EthereumNode::default()) - .launch() - .await?; - - let mut second_node = NodeHelper::new(node).await?; - - let mut wallet = Wallet::default(); - let raw_tx = wallet.transfer_tx().await; - - // Make them peer - first_node.network.add_peer(second_node.network.record()).await; - second_node.network.add_peer(first_node.network.record()).await; - - // Make sure they establish a new session - first_node.network.expect_session().await; - second_node.network.expect_session().await; - - // Make the first node advance - let (block_hash, tx_hash) = first_node.advance(raw_tx.clone(), eth_payload_attributes).await?; - - // only send forkchoice update to second node - second_node.engine_api.update_forkchoice(block_hash).await?; - - // expect second node advanced via p2p gossip - second_node.assert_new_block(tx_hash, block_hash, 1).await?; - - Ok(()) -} diff --git a/crates/node/api/src/node.rs b/crates/node/api/src/node.rs index 1304d77d1..355a7ecab 100644 --- a/crates/node/api/src/node.rs +++ b/crates/node/api/src/node.rs @@ -1,7 +1,11 @@ -//! Traits for configuring a node +//! Traits for configuring a node. use crate::{primitives::NodePrimitives, ConfigureEvm, EngineTypes}; -use reth_db::database::Database; +use reth_db::{ + database::Database, + database_metrics::{DatabaseMetadata, DatabaseMetrics}, +}; +use reth_evm::execute::BlockExecutorProvider; use reth_network::NetworkHandle; use reth_payload_builder::PayloadBuilderHandle; use reth_provider::FullProvider; @@ -12,24 +16,23 @@ use std::marker::PhantomData; /// The type that configures the essential types of an ethereum like node. /// /// This includes the primitive types of a node, the engine API types for communication with the -/// consensus layer, and the EVM configuration type for setting up the Ethereum Virtual Machine. +/// consensus layer. +/// +/// This trait is intended to be stateless and only define the types of the node. pub trait NodeTypes: Send + Sync + 'static { /// The node's primitive types, defining basic operations and structures. type Primitives: NodePrimitives; /// The node's engine types, defining the interaction with the consensus engine. type Engine: EngineTypes; - /// The node's EVM configuration, defining settings for the Ethereum Virtual Machine. - type Evm: ConfigureEvm; - - /// Returns the node's evm config. - fn evm_config(&self) -> Self::Evm; } -/// A helper type that is downstream of the [NodeTypes] trait and adds stateful components to the +/// A helper trait that is downstream of the [NodeTypes] trait and adds stateful components to the /// node. +/// +/// Its types are configured by node internally and are not intended to be user configurable. pub trait FullNodeTypes: NodeTypes + 'static { - /// Underlying database type. - type DB: Database + Clone + 'static; + /// Underlying database type used by the node to store and retrieve data. + type DB: Database + DatabaseMetrics + DatabaseMetadata + Clone + Unpin + 'static; /// The provider type used to interact with the node. type Provider: FullProvider; } @@ -38,7 +41,7 @@ pub trait FullNodeTypes: NodeTypes + 'static { #[derive(Debug)] pub struct FullNodeTypesAdapter { /// An instance of the user configured node types. - pub types: Types, + pub types: PhantomData, /// The database type used by the node. pub db: PhantomData, /// The provider type used by the node. @@ -46,9 +49,15 @@ pub struct FullNodeTypesAdapter { } impl FullNodeTypesAdapter { - /// Create a new adapter from the given node types. - pub fn new(types: Types) -> Self { - Self { types, db: Default::default(), provider: Default::default() } + /// Create a new adapter with the configured types. + pub fn new() -> Self { + Self { types: Default::default(), db: Default::default(), provider: Default::default() } + } +} + +impl Default for FullNodeTypesAdapter { + fn default() -> Self { + Self::new() } } @@ -60,18 +69,13 @@ where { type Primitives = Types::Primitives; type Engine = Types::Engine; - type Evm = Types::Evm; - - fn evm_config(&self) -> Self::Evm { - self.types.evm_config() - } } impl FullNodeTypes for FullNodeTypesAdapter where Types: NodeTypes, Provider: FullProvider, - DB: Database + Clone + 'static, + DB: Database + DatabaseMetrics + DatabaseMetadata + Clone + Unpin + 'static, { type DB = DB; type Provider = Provider; @@ -80,11 +84,23 @@ where /// Encapsulates all types and components of the node. pub trait FullNodeComponents: FullNodeTypes + 'static { /// The transaction pool of the node. - type Pool: TransactionPool; + type Pool: TransactionPool + Unpin; + + /// The node's EVM configuration, defining settings for the Ethereum Virtual Machine. + type Evm: ConfigureEvm; + + /// The type that knows how to execute blocks. + type Executor: BlockExecutorProvider; /// Returns the transaction pool of the node. fn pool(&self) -> &Self::Pool; + /// Returns the node's evm config. + fn evm_config(&self) -> &Self::Evm; + + /// Returns the node's executor type. + fn block_executor(&self) -> &Self::Executor; + /// Returns the provider of the node. fn provider(&self) -> &Self::Provider; @@ -97,87 +113,3 @@ pub trait FullNodeComponents: FullNodeTypes + 'static { /// Returns the task executor. fn task_executor(&self) -> &TaskExecutor; } - -/// A type that encapsulates all the components of the node. -#[derive(Debug)] -pub struct FullNodeComponentsAdapter { - /// The EVM configuration of the node. - pub evm_config: Node::Evm, - /// The transaction pool of the node. - pub pool: Pool, - /// The network handle of the node. - pub network: NetworkHandle, - /// The provider of the node. - pub provider: Node::Provider, - /// The payload builder service handle of the node. - pub payload_builder: PayloadBuilderHandle, - /// The task executor of the node. - pub executor: TaskExecutor, -} - -impl FullNodeTypes for FullNodeComponentsAdapter -where - Node: FullNodeTypes, - Pool: TransactionPool + 'static, -{ - type DB = Node::DB; - type Provider = Node::Provider; -} - -impl NodeTypes for FullNodeComponentsAdapter -where - Node: FullNodeTypes, - Pool: TransactionPool + 'static, -{ - type Primitives = Node::Primitives; - type Engine = Node::Engine; - type Evm = Node::Evm; - - fn evm_config(&self) -> Self::Evm { - self.evm_config.clone() - } -} - -impl FullNodeComponents for FullNodeComponentsAdapter -where - Node: FullNodeTypes, - Pool: TransactionPool + 'static, -{ - type Pool = Pool; - - fn pool(&self) -> &Self::Pool { - &self.pool - } - - fn provider(&self) -> &Self::Provider { - &self.provider - } - - fn network(&self) -> &NetworkHandle { - &self.network - } - - fn payload_builder(&self) -> &PayloadBuilderHandle { - &self.payload_builder - } - - fn task_executor(&self) -> &TaskExecutor { - &self.executor - } -} - -impl Clone for FullNodeComponentsAdapter -where - Pool: Clone, -{ - fn clone(&self) -> Self { - Self { - evm_config: self.evm_config.clone(), - pool: self.pool.clone(), - network: self.network.clone(), - provider: self.provider.clone(), - payload_builder: self.payload_builder.clone(), - executor: self.executor.clone(), - } - } -} diff --git a/crates/node-builder/Cargo.toml b/crates/node/builder/Cargo.toml similarity index 88% rename from crates/node-builder/Cargo.toml rename to crates/node/builder/Cargo.toml index c245203ca..26635e536 100644 --- a/crates/node-builder/Cargo.toml +++ b/crates/node/builder/Cargo.toml @@ -17,8 +17,8 @@ reth-auto-seal-consensus.workspace = true reth-beacon-consensus.workspace = true reth-blockchain-tree.workspace = true reth-exex.workspace = true +reth-evm.workspace = true reth-provider.workspace = true -reth-revm.workspace = true reth-db.workspace = true reth-rpc-engine-api.workspace = true reth-rpc.workspace = true @@ -37,6 +37,7 @@ reth-stages.workspace = true reth-config.workspace = true reth-downloaders.workspace = true reth-node-events.workspace = true +reth-consensus.workspace = true ## async futures.workspace = true @@ -46,10 +47,14 @@ tokio = { workspace = true, features = [ "time", "rt-multi-thread", ] } +tokio-stream.workspace = true ## misc aquamarine.workspace = true eyre.workspace = true -fdlimit = "0.3.0" +fdlimit.workspace = true confy.workspace = true rayon.workspace = true + +[dev-dependencies] +tempfile.workspace = true diff --git a/crates/node-builder/README.md b/crates/node/builder/README.md similarity index 100% rename from crates/node-builder/README.md rename to crates/node/builder/README.md diff --git a/crates/node-builder/docs/mermaid/builder.mmd b/crates/node/builder/docs/mermaid/builder.mmd similarity index 100% rename from crates/node-builder/docs/mermaid/builder.mmd rename to crates/node/builder/docs/mermaid/builder.mmd diff --git a/crates/node/builder/src/builder/mod.rs b/crates/node/builder/src/builder/mod.rs new file mode 100644 index 000000000..b6f0a191e --- /dev/null +++ b/crates/node/builder/src/builder/mod.rs @@ -0,0 +1,566 @@ +//! Customizable node builder. + +#![allow(clippy::type_complexity, missing_debug_implementations)] + +use crate::{ + components::NodeComponentsBuilder, + node::FullNode, + rpc::{RethRpcServerHandles, RpcContext}, + DefaultNodeLauncher, Node, NodeHandle, +}; +use futures::Future; +use reth_db::{ + database::Database, + database_metrics::{DatabaseMetadata, DatabaseMetrics}, + test_utils::{create_test_rw_db, TempDatabase}, + DatabaseEnv, +}; +use reth_exex::ExExContext; +use reth_network::{NetworkBuilder, NetworkConfig, NetworkHandle}; +use reth_node_api::{FullNodeTypes, FullNodeTypesAdapter, NodeTypes}; +use reth_node_core::{ + cli::config::{PayloadBuilderConfig, RethTransactionPoolConfig}, + dirs::{ChainPath, DataDirPath, MaybePlatformPath}, + node_config::NodeConfig, + primitives::{kzg::KzgSettings, Head}, + utils::write_peers_to_file, +}; +use reth_primitives::{constants::eip4844::MAINNET_KZG_TRUSTED_SETUP, ChainSpec}; +use reth_provider::{providers::BlockchainProvider, ChainSpecProvider}; +use reth_tasks::TaskExecutor; +use reth_transaction_pool::{PoolConfig, TransactionPool}; +pub use states::*; +use std::{str::FromStr, sync::Arc}; + +mod states; + +/// The adapter type for a reth node with the builtin provider type +// Note: we need to hardcode this because custom components might depend on it in associated types. +pub type RethFullAdapter = FullNodeTypesAdapter>; + +#[cfg_attr(doc, aquamarine::aquamarine)] +/// Declaratively construct a node. +/// +/// [`NodeBuilder`] provides a [builder-like interface][builder] for composing +/// components of a node. +/// +/// ## Order +/// +/// Configuring a node starts out with a [`NodeConfig`] (this can be obtained from cli arguments for +/// example) and then proceeds to configure the core static types of the node: [NodeTypes], these +/// include the node's primitive types and the node's engine types. +/// +/// Next all stateful components of the node are configured, these include all the +/// components of the node that are downstream of those types, these include: +/// +/// - The EVM and Executor configuration: [ExecutorBuilder](crate::components::ExecutorBuilder) +/// - The transaction pool: [PoolBuilder] +/// - The network: [NetworkBuilder](crate::components::NetworkBuilder) +/// - The payload builder: [PayloadBuilder](crate::components::PayloadServiceBuilder) +/// +/// Once all the components are configured, the node is ready to be launched. +/// +/// On launch the builder returns a fully type aware [NodeHandle] that has access to all the +/// configured components and can interact with the node. +/// +/// There are convenience functions for networks that come with a preset of types and components via +/// the [Node] trait, see `reth_node_ethereum::EthereumNode` or `reth_node_optimism::OptimismNode`. +/// +/// The [NodeBuilder::node] function configures the node's types and components in one step. +/// +/// ## Components +/// +/// All components are configured with a [NodeComponentsBuilder] that is responsible for actually +/// creating the node components during the launch process. The +/// [ComponentsBuilder](crate::components::ComponentsBuilder) is a general purpose implementation of +/// the [NodeComponentsBuilder] trait that can be used to configure the executor, network, +/// transaction pool and payload builder of the node. It enforces the correct order of +/// configuration, for example the network and the payload builder depend on the transaction pool +/// type that is configured first. +/// +/// All builder traits are generic over the node types and are invoked with the [BuilderContext] +/// that gives access to internals of the that are needed to configure the components. This include +/// the original config, chain spec, the database provider and the task executor, +/// +/// ## Hooks +/// +/// Once all the components are configured, the builder can be used to set hooks that are run at +/// specific points in the node's lifecycle. This way custom services can be spawned before the node +/// is launched [NodeBuilder::on_component_initialized], or once the rpc server(s) are launched +/// [NodeBuilder::on_rpc_started]. The [NodeBuilder::extend_rpc_modules] can be used to inject +/// custom rpc modules into the rpc server before it is launched. See also [RpcContext] +/// All hooks accept a closure that is then invoked at the appropriate time in the node's launch +/// process. +/// +/// ## Flow +/// +/// The [NodeBuilder] is intended to sit behind a CLI that provides the necessary [NodeConfig] +/// input: [NodeBuilder::new] +/// +/// From there the builder is configured with the node's types, components, and hooks, then launched +/// with the [NodeBuilder::launch] method. On launch all the builtin internals, such as the +/// `Database` and its providers [BlockchainProvider] are initialized before the configured +/// [NodeComponentsBuilder] is invoked with the [BuilderContext] to create the transaction pool, +/// network, and payload builder components. When the RPC is configured, the corresponding hooks are +/// invoked to allow for custom rpc modules to be injected into the rpc server: +/// [NodeBuilder::extend_rpc_modules] +/// +/// Finally all components are created and all services are launched and a [NodeHandle] is returned +/// that can be used to interact with the node: [FullNode] +/// +/// The following diagram shows the flow of the node builder from CLI to a launched node. +/// +/// include_mmd!("docs/mermaid/builder.mmd") +/// +/// ## Internals +/// +/// The node builder is fully type safe, it uses the [NodeTypes] trait to enforce that all +/// components are configured with the correct types. However the database types and with that the +/// provider trait implementations are currently created by the builder itself during the launch +/// process, hence the database type is not part of the [NodeTypes] trait and the node's components, +/// that depend on the database, are configured separately. In order to have a nice trait that +/// encapsulates the entire node the [FullNodeComponents] trait was introduced. This trait has +/// convenient associated types for all the components of the node. After [NodeBuilder::launch] the +/// [NodeHandle] contains an instance of [FullNode] that implements the [FullNodeComponents] trait +/// and has access to all the components of the node. Internally the node builder uses several +/// generic adapter types that are then map to traits with associated types for ease of use. +/// +/// ### Limitations +/// +/// Currently the launch process is limited to ethereum nodes and requires all the components +/// specified above. It also expects beacon consensus with the ethereum engine API that is +/// configured by the builder itself during launch. This might change in the future. +/// +/// [builder]: https://doc.rust-lang.org/1.0.0/style/ownership/builders.html +pub struct NodeBuilder { + /// All settings for how the node should be configured. + config: NodeConfig, + /// The configured database for the node. + database: DB, +} + +impl NodeBuilder<()> { + /// Create a new [`NodeBuilder`]. + pub fn new(config: NodeConfig) -> Self { + Self { config, database: () } + } +} + +impl NodeBuilder { + /// Returns a reference to the node builder's config. + pub fn config(&self) -> &NodeConfig { + &self.config + } + + /// Configures the underlying database that the node will use. + pub fn with_database(self, database: D) -> NodeBuilder { + NodeBuilder { config: self.config, database } + } + + /// Preconfigure the builder with the context to launch the node. + /// + /// This provides the task executor and the data directory for the node. + pub fn with_launch_context( + self, + task_executor: TaskExecutor, + data_dir: ChainPath, + ) -> WithLaunchContext> { + WithLaunchContext { builder: self, task_executor, data_dir } + } + + /// Creates an _ephemeral_ preconfigured node for testing purposes. + pub fn testing_node( + self, + task_executor: TaskExecutor, + ) -> WithLaunchContext>>> { + let db = create_test_rw_db(); + let db_path_str = db.path().to_str().expect("Path is not valid unicode"); + let path = + MaybePlatformPath::::from_str(db_path_str).expect("Path is not valid"); + let data_dir = path.unwrap_or_chain_default(self.config.chain.chain); + + WithLaunchContext { builder: self.with_database(db), task_executor, data_dir } + } +} + +impl NodeBuilder +where + DB: Database + DatabaseMetrics + DatabaseMetadata + Clone + Unpin + 'static, +{ + /// Configures the types of the node. + pub fn with_types(self) -> NodeBuilderWithTypes> + where + T: NodeTypes, + { + NodeBuilderWithTypes::new(self.config, self.database) + } + + /// Preconfigures the node with a specific node implementation. + /// + /// This is a convenience method that sets the node's types and components in one call. + pub fn node( + self, + node: N, + ) -> NodeBuilderWithComponents, N::ComponentsBuilder> + where + N: Node>, + { + self.with_types().with_components(node.components_builder()) + } +} + +/// A [NodeBuilder] with it's launch context already configured. +/// +/// This exposes the same methods as [NodeBuilder] but with the launch context already configured, +/// See [WithLaunchContext::launch] +pub struct WithLaunchContext { + builder: Builder, + task_executor: TaskExecutor, + data_dir: ChainPath, +} + +impl WithLaunchContext { + /// Returns a reference to the task executor. + pub fn task_executor(&self) -> &TaskExecutor { + &self.task_executor + } + + /// Returns a reference to the data directory. + pub fn data_dir(&self) -> &ChainPath { + &self.data_dir + } +} + +impl WithLaunchContext> +where + DB: Database + DatabaseMetrics + DatabaseMetadata + Clone + Unpin + 'static, +{ + /// Returns a reference to the node builder's config. + pub fn config(&self) -> &NodeConfig { + self.builder.config() + } + + /// Configures the types of the node. + pub fn with_types(self) -> WithLaunchContext>> + where + T: NodeTypes, + { + WithLaunchContext { + builder: self.builder.with_types(), + task_executor: self.task_executor, + data_dir: self.data_dir, + } + } + + /// Preconfigures the node with a specific node implementation. + /// + /// This is a convenience method that sets the node's types and components in one call. + pub fn node( + self, + node: N, + ) -> WithLaunchContext, N::ComponentsBuilder>> + where + N: Node>, + { + self.with_types().with_components(node.components_builder()) + } + + /// Launches a preconfigured [Node] + /// + /// This bootstraps the node internals, creates all the components with the given [Node] + /// + /// Returns a [NodeHandle] that can be used to interact with the node. + pub async fn launch_node( + self, + node: N, + ) -> eyre::Result< + NodeHandle< + NodeAdapter< + RethFullAdapter, + >>::Components, + >, + >, + > + where + N: Node>, + { + self.node(node).launch().await + } +} + +impl WithLaunchContext>> +where + DB: Database + DatabaseMetrics + DatabaseMetadata + Clone + Unpin + 'static, + T: NodeTypes, +{ + /// Advances the state of the node builder to the next state where all components are configured + pub fn with_components( + self, + components_builder: CB, + ) -> WithLaunchContext, CB>> + where + CB: NodeComponentsBuilder>, + { + WithLaunchContext { + builder: self.builder.with_components(components_builder), + task_executor: self.task_executor, + data_dir: self.data_dir, + } + } +} + +impl WithLaunchContext, CB>> +where + DB: Database + DatabaseMetrics + DatabaseMetadata + Clone + Unpin + 'static, + T: NodeTypes, + CB: NodeComponentsBuilder>, +{ + /// Sets the hook that is run once the node's components are initialized. + pub fn on_component_initialized(self, hook: F) -> Self + where + F: FnOnce(NodeAdapter, CB::Components>) -> eyre::Result<()> + + Send + + 'static, + { + Self { + builder: self.builder.on_component_initialized(hook), + task_executor: self.task_executor, + data_dir: self.data_dir, + } + } + + /// Sets the hook that is run once the node has started. + pub fn on_node_started(self, hook: F) -> Self + where + F: FnOnce( + FullNode, CB::Components>>, + ) -> eyre::Result<()> + + Send + + 'static, + { + Self { + builder: self.builder.on_node_started(hook), + task_executor: self.task_executor, + data_dir: self.data_dir, + } + } + + /// Sets the hook that is run once the rpc server is started. + pub fn on_rpc_started(self, hook: F) -> Self + where + F: FnOnce( + RpcContext<'_, NodeAdapter, CB::Components>>, + RethRpcServerHandles, + ) -> eyre::Result<()> + + Send + + 'static, + { + Self { + builder: self.builder.on_rpc_started(hook), + task_executor: self.task_executor, + data_dir: self.data_dir, + } + } + + /// Sets the hook that is run to configure the rpc modules. + pub fn extend_rpc_modules(self, hook: F) -> Self + where + F: FnOnce( + RpcContext<'_, NodeAdapter, CB::Components>>, + ) -> eyre::Result<()> + + Send + + 'static, + { + Self { + builder: self.builder.extend_rpc_modules(hook), + task_executor: self.task_executor, + data_dir: self.data_dir, + } + } + + /// Installs an ExEx (Execution Extension) in the node. + /// + /// # Note + /// + /// The ExEx ID must be unique. + pub fn install_exex(self, exex_id: impl Into, exex: F) -> Self + where + F: FnOnce(ExExContext, CB::Components>>) -> R + + Send + + 'static, + R: Future> + Send, + E: Future> + Send, + { + Self { + builder: self.builder.install_exex(exex_id, exex), + task_executor: self.task_executor, + data_dir: self.data_dir, + } + } + + /// Launches the node and returns a handle to it. + pub async fn launch( + self, + ) -> eyre::Result, CB::Components>>> { + let Self { builder, task_executor, data_dir } = self; + + let launcher = DefaultNodeLauncher::new(task_executor, data_dir); + builder.launch_with(launcher).await + } + + /// Check that the builder can be launched + /// + /// This is useful when writing tests to ensure that the builder is configured correctly. + pub fn check_launch(self) -> Self { + self + } +} + +/// Captures the necessary context for building the components of the node. +pub struct BuilderContext { + /// The current head of the blockchain at launch. + pub(crate) head: Head, + /// The configured provider to interact with the blockchain. + pub(crate) provider: Node::Provider, + /// The executor of the node. + pub(crate) executor: TaskExecutor, + /// The data dir of the node. + pub(crate) data_dir: ChainPath, + /// The config of the node + pub(crate) config: NodeConfig, + /// loaded config + pub(crate) reth_config: reth_config::Config, +} + +impl BuilderContext { + /// Create a new instance of [BuilderContext] + pub fn new( + head: Head, + provider: Node::Provider, + executor: TaskExecutor, + data_dir: ChainPath, + config: NodeConfig, + reth_config: reth_config::Config, + ) -> Self { + Self { head, provider, executor, data_dir, config, reth_config } + } + + /// Returns the configured provider to interact with the blockchain. + pub fn provider(&self) -> &Node::Provider { + &self.provider + } + + /// Returns the current head of the blockchain at launch. + pub fn head(&self) -> Head { + self.head + } + + /// Returns the config of the node. + pub fn config(&self) -> &NodeConfig { + &self.config + } + + /// Returns the data dir of the node. + /// + /// This gives access to all relevant files and directories of the node's datadir. + pub fn data_dir(&self) -> &ChainPath { + &self.data_dir + } + + /// Returns the executor of the node. + /// + /// This can be used to execute async tasks or functions during the setup. + pub fn task_executor(&self) -> &TaskExecutor { + &self.executor + } + + /// Returns the chain spec of the node. + pub fn chain_spec(&self) -> Arc { + self.provider().chain_spec() + } + + /// Returns the transaction pool config of the node. + pub fn pool_config(&self) -> PoolConfig { + self.config().txpool.pool_config() + } + + /// Loads `MAINNET_KZG_TRUSTED_SETUP`. + pub fn kzg_settings(&self) -> eyre::Result> { + Ok(Arc::clone(&MAINNET_KZG_TRUSTED_SETUP)) + } + + /// Returns the config for payload building. + pub fn payload_builder_config(&self) -> impl PayloadBuilderConfig { + self.config.builder.clone() + } + + /// Returns the default network config for the node. + pub fn network_config(&self) -> eyre::Result> { + self.config.network_config( + &self.reth_config, + self.provider.clone(), + self.executor.clone(), + self.head, + self.data_dir(), + ) + } + + /// Creates the [NetworkBuilder] for the node. + pub async fn network_builder(&self) -> eyre::Result> { + self.config + .build_network( + &self.reth_config, + self.provider.clone(), + self.executor.clone(), + self.head, + self.data_dir(), + ) + .await + } + + /// Convenience function to start the network. + /// + /// Spawns the configured network and associated tasks and returns the [NetworkHandle] connected + /// to that network. + pub fn start_network( + &self, + builder: NetworkBuilder, + pool: Pool, + ) -> NetworkHandle + where + Pool: TransactionPool + Unpin + 'static, + { + let (handle, network, txpool, eth) = builder + .transactions(pool, Default::default()) + .request_handler(self.provider().clone()) + .split_with_handle(); + + self.executor.spawn_critical("p2p txpool", txpool); + self.executor.spawn_critical("p2p eth request handler", eth); + + let default_peers_path = self.data_dir().known_peers(); + let known_peers_file = self.config.network.persistent_peers_file(default_peers_path); + self.executor.spawn_critical_with_graceful_shutdown_signal( + "p2p network task", + |shutdown| { + network.run_until_graceful_shutdown(shutdown, |network| { + write_peers_to_file(network, known_peers_file) + }) + }, + ); + + handle + } +} + +impl std::fmt::Debug for BuilderContext { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("BuilderContext") + .field("head", &self.head) + .field("provider", &std::any::type_name::()) + .field("executor", &self.executor) + .field("data_dir", &self.data_dir) + .field("config", &self.config) + .finish() + } +} diff --git a/crates/node/builder/src/builder/states.rs b/crates/node/builder/src/builder/states.rs new file mode 100644 index 000000000..103e4f174 --- /dev/null +++ b/crates/node/builder/src/builder/states.rs @@ -0,0 +1,239 @@ +//! Node builder states and helper traits. +//! +//! Keeps track of the current state of the node builder. +//! +//! The node builder process is essentially a state machine that transitions through various states +//! before the node can be launched. + +use crate::{ + components::{NodeComponents, NodeComponentsBuilder}, + exex::BoxedLaunchExEx, + hooks::NodeHooks, + launch::LaunchNode, + rpc::{RethRpcServerHandles, RpcContext, RpcHooks}, + FullNode, +}; +use reth_exex::ExExContext; +use reth_network::NetworkHandle; +use reth_node_api::{FullNodeComponents, FullNodeTypes, NodeTypes}; +use reth_node_core::node_config::NodeConfig; +use reth_payload_builder::PayloadBuilderHandle; +use reth_tasks::TaskExecutor; +use std::{fmt, future::Future}; + +/// A node builder that also has the configured types. +pub struct NodeBuilderWithTypes { + /// All settings for how the node should be configured. + config: NodeConfig, + /// The configured database for the node. + adapter: NodeTypesAdapter, +} + +impl NodeBuilderWithTypes { + /// Creates a new instance of the node builder with the given configuration and types. + pub fn new(config: NodeConfig, database: T::DB) -> Self { + Self { config, adapter: NodeTypesAdapter::new(database) } + } + + /// Advances the state of the node builder to the next state where all components are configured + pub fn with_components(self, components_builder: CB) -> NodeBuilderWithComponents + where + CB: NodeComponentsBuilder, + { + let Self { config, adapter } = self; + + NodeBuilderWithComponents { + config, + adapter, + components_builder, + add_ons: NodeAddOns { + hooks: NodeHooks::default(), + rpc: RpcHooks::new(), + exexs: Vec::new(), + }, + } + } +} + +/// Container for the node's types and the database the node uses. +pub(crate) struct NodeTypesAdapter { + /// The database type used by the node. + pub(crate) database: T::DB, +} + +impl NodeTypesAdapter { + /// Create a new adapter from the given node types. + pub(crate) fn new(database: T::DB) -> Self { + Self { database } + } +} + +impl fmt::Debug for NodeTypesAdapter { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("NodeTypesAdapter").field("db", &"...").field("types", &"...").finish() + } +} + +/// Container for the node's types and the components and other internals that can be used by addons +/// of the node. +pub struct NodeAdapter> { + /// The components of the node. + pub components: C, + /// The task executor for the node. + pub task_executor: TaskExecutor, + /// The provider of the node. + pub provider: T::Provider, +} + +impl> NodeTypes for NodeAdapter { + type Primitives = T::Primitives; + type Engine = T::Engine; +} + +impl> FullNodeTypes for NodeAdapter { + type DB = T::DB; + type Provider = T::Provider; +} + +impl> FullNodeComponents for NodeAdapter { + type Pool = C::Pool; + type Evm = C::Evm; + type Executor = C::Executor; + + fn pool(&self) -> &Self::Pool { + self.components.pool() + } + + fn evm_config(&self) -> &Self::Evm { + self.components.evm_config() + } + + fn block_executor(&self) -> &Self::Executor { + self.components.block_executor() + } + + fn provider(&self) -> &Self::Provider { + &self.provider + } + + fn network(&self) -> &NetworkHandle { + self.components.network() + } + + fn payload_builder(&self) -> &PayloadBuilderHandle { + self.components.payload_builder() + } + + fn task_executor(&self) -> &TaskExecutor { + &self.task_executor + } +} + +impl> Clone for NodeAdapter { + fn clone(&self) -> Self { + Self { + components: self.components.clone(), + task_executor: self.task_executor.clone(), + provider: self.provider.clone(), + } + } +} + +/// A fully type configured node builder. +/// +/// Supports adding additional addons to the node. +pub struct NodeBuilderWithComponents> { + /// All settings for how the node should be configured. + pub(crate) config: NodeConfig, + /// Adapter for the underlying node types and database + pub(crate) adapter: NodeTypesAdapter, + /// container for type specific components + pub(crate) components_builder: CB, + /// Additional node extensions. + pub(crate) add_ons: NodeAddOns>, +} + +impl> NodeBuilderWithComponents { + /// Sets the hook that is run once the node's components are initialized. + pub fn on_component_initialized(mut self, hook: F) -> Self + where + F: FnOnce(NodeAdapter) -> eyre::Result<()> + Send + 'static, + { + self.add_ons.hooks.set_on_component_initialized(hook); + self + } + + /// Sets the hook that is run once the node has started. + pub fn on_node_started(mut self, hook: F) -> Self + where + F: FnOnce(FullNode>) -> eyre::Result<()> + Send + 'static, + { + self.add_ons.hooks.set_on_node_started(hook); + self + } + + /// Sets the hook that is run once the rpc server is started. + pub fn on_rpc_started(mut self, hook: F) -> Self + where + F: FnOnce( + RpcContext<'_, NodeAdapter>, + RethRpcServerHandles, + ) -> eyre::Result<()> + + Send + + 'static, + { + self.add_ons.rpc.set_on_rpc_started(hook); + self + } + + /// Sets the hook that is run to configure the rpc modules. + pub fn extend_rpc_modules(mut self, hook: F) -> Self + where + F: FnOnce(RpcContext<'_, NodeAdapter>) -> eyre::Result<()> + + Send + + 'static, + { + self.add_ons.rpc.set_extend_rpc_modules(hook); + self + } + + /// Installs an ExEx (Execution Extension) in the node. + /// + /// # Note + /// + /// The ExEx ID must be unique. + pub fn install_exex(mut self, exex_id: impl Into, exex: F) -> Self + where + F: FnOnce(ExExContext>) -> R + Send + 'static, + R: Future> + Send, + E: Future> + Send, + { + self.add_ons.exexs.push((exex_id.into(), Box::new(exex))); + self + } + + /// Launches the node with the given launcher. + pub async fn launch_with(self, launcher: L) -> eyre::Result + where + L: LaunchNode, + { + launcher.launch_node(self).await + } + + /// Check that the builder can be launched + /// + /// This is useful when writing tests to ensure that the builder is configured correctly. + pub fn check_launch(self) -> Self { + self + } +} + +/// Additional node extensions. +pub(crate) struct NodeAddOns { + /// Additional NodeHooks that are called at specific points in the node's launch lifecycle. + pub(crate) hooks: NodeHooks, + /// Additional RPC hooks. + pub(crate) rpc: RpcHooks, + /// The ExExs (execution extensions) of the node. + pub(crate) exexs: Vec<(String, Box>)>, +} diff --git a/crates/node/builder/src/components/builder.rs b/crates/node/builder/src/components/builder.rs new file mode 100644 index 000000000..abeb2ca05 --- /dev/null +++ b/crates/node/builder/src/components/builder.rs @@ -0,0 +1,308 @@ +//! A generic [NodeComponentsBuilder] + +use crate::{ + components::{ + Components, ExecutorBuilder, NetworkBuilder, NodeComponents, PayloadServiceBuilder, + PoolBuilder, + }, + BuilderContext, ConfigureEvm, FullNodeTypes, +}; +use reth_evm::execute::BlockExecutorProvider; +use reth_transaction_pool::TransactionPool; +use std::{future::Future, marker::PhantomData}; + +/// A generic, general purpose and customizable [`NodeComponentsBuilder`] implementation. +/// +/// This type is stateful and captures the configuration of the node's components. +/// +/// ## Component dependencies: +/// +/// The components of the node depend on each other: +/// - The payload builder service depends on the transaction pool. +/// - The network depends on the transaction pool. +/// +/// We distinguish between different kind of components: +/// - Components that are standalone, such as the transaction pool. +/// - Components that are spawned as a service, such as the payload builder service or the network. +/// +/// ## Builder lifecycle: +/// +/// First all standalone components are built. Then the service components are spawned. +/// All component builders are captured in the builder state and will be consumed once the node is +/// launched. +#[derive(Debug)] +pub struct ComponentsBuilder { + pool_builder: PoolB, + payload_builder: PayloadB, + network_builder: NetworkB, + executor_builder: ExecB, + _marker: PhantomData, +} + +impl + ComponentsBuilder +{ + /// Configures the node types. + pub fn node_types(self) -> ComponentsBuilder + where + Types: FullNodeTypes, + { + let Self { + pool_builder, + payload_builder, + network_builder, + executor_builder: evm_builder, + _marker, + } = self; + ComponentsBuilder { + executor_builder: evm_builder, + pool_builder, + payload_builder, + network_builder, + _marker: Default::default(), + } + } + + /// Apply a function to the pool builder. + pub fn map_pool(self, f: impl FnOnce(PoolB) -> PoolB) -> Self { + Self { + pool_builder: f(self.pool_builder), + payload_builder: self.payload_builder, + network_builder: self.network_builder, + executor_builder: self.executor_builder, + _marker: self._marker, + } + } + + /// Apply a function to the payload builder. + pub fn map_payload(self, f: impl FnOnce(PayloadB) -> PayloadB) -> Self { + Self { + pool_builder: self.pool_builder, + payload_builder: f(self.payload_builder), + network_builder: self.network_builder, + executor_builder: self.executor_builder, + _marker: self._marker, + } + } + + /// Apply a function to the network builder. + pub fn map_network(self, f: impl FnOnce(NetworkB) -> NetworkB) -> Self { + Self { + pool_builder: self.pool_builder, + payload_builder: self.payload_builder, + network_builder: f(self.network_builder), + executor_builder: self.executor_builder, + _marker: self._marker, + } + } + + /// Apply a function to the executor builder. + pub fn map_executor(self, f: impl FnOnce(ExecB) -> ExecB) -> Self { + Self { + pool_builder: self.pool_builder, + payload_builder: self.payload_builder, + network_builder: self.network_builder, + executor_builder: f(self.executor_builder), + _marker: self._marker, + } + } +} + +impl + ComponentsBuilder +where + Node: FullNodeTypes, +{ + /// Configures the pool builder. + /// + /// This accepts a [PoolBuilder] instance that will be used to create the node's transaction + /// pool. + pub fn pool( + self, + pool_builder: PB, + ) -> ComponentsBuilder + where + PB: PoolBuilder, + { + let Self { + pool_builder: _, + payload_builder, + network_builder, + executor_builder: evm_builder, + _marker, + } = self; + ComponentsBuilder { + pool_builder, + payload_builder, + network_builder, + executor_builder: evm_builder, + _marker, + } + } +} + +impl + ComponentsBuilder +where + Node: FullNodeTypes, + PoolB: PoolBuilder, +{ + /// Configures the network builder. + /// + /// This accepts a [NetworkBuilder] instance that will be used to create the node's network + /// stack. + pub fn network( + self, + network_builder: NB, + ) -> ComponentsBuilder + where + NB: NetworkBuilder, + { + let Self { + pool_builder, + payload_builder, + network_builder: _, + executor_builder: evm_builder, + _marker, + } = self; + ComponentsBuilder { + pool_builder, + payload_builder, + network_builder, + executor_builder: evm_builder, + _marker, + } + } + + /// Configures the payload builder. + /// + /// This accepts a [PayloadServiceBuilder] instance that will be used to create the node's + /// payload builder service. + pub fn payload( + self, + payload_builder: PB, + ) -> ComponentsBuilder + where + PB: PayloadServiceBuilder, + { + let Self { + pool_builder, + payload_builder: _, + network_builder, + executor_builder: evm_builder, + _marker, + } = self; + ComponentsBuilder { + pool_builder, + payload_builder, + network_builder, + executor_builder: evm_builder, + _marker, + } + } + + /// Configures the executor builder. + /// + /// This accepts a [ExecutorBuilder] instance that will be used to create the node's components + /// for execution. + pub fn executor( + self, + executor_builder: EB, + ) -> ComponentsBuilder + where + EB: ExecutorBuilder, + { + let Self { pool_builder, payload_builder, network_builder, executor_builder: _, _marker } = + self; + ComponentsBuilder { + pool_builder, + payload_builder, + network_builder, + executor_builder, + _marker, + } + } +} + +impl NodeComponentsBuilder + for ComponentsBuilder +where + Node: FullNodeTypes, + PoolB: PoolBuilder, + NetworkB: NetworkBuilder, + PayloadB: PayloadServiceBuilder, + ExecB: ExecutorBuilder, +{ + type Components = Components; + + async fn build_components( + self, + context: &BuilderContext, + ) -> eyre::Result { + let Self { + pool_builder, + payload_builder, + network_builder, + executor_builder: evm_builder, + _marker, + } = self; + + let (evm_config, executor) = evm_builder.build_evm(context).await?; + let pool = pool_builder.build_pool(context).await?; + let network = network_builder.build_network(context, pool.clone()).await?; + let payload_builder = payload_builder.spawn_payload_service(context, pool.clone()).await?; + + Ok(Components { transaction_pool: pool, evm_config, network, payload_builder, executor }) + } +} + +impl Default for ComponentsBuilder<(), (), (), (), ()> { + fn default() -> Self { + Self { + pool_builder: (), + payload_builder: (), + network_builder: (), + executor_builder: (), + _marker: Default::default(), + } + } +} + +/// A type that configures all the customizable components of the node and knows how to build them. +/// +/// Implementers of this trait are responsible for building all the components of the node: See +/// [NodeComponents]. +/// +/// The [ComponentsBuilder] is a generic, general purpose implementation of this trait that can be +/// used to customize certain components of the node using the builder pattern and defaults, e.g. +/// Ethereum and Optimism. +/// A type that's responsible for building the components of the node. +pub trait NodeComponentsBuilder: Send { + /// The components for the node with the given types + type Components: NodeComponents; + + /// Consumes the type and returns the crated components. + fn build_components( + self, + ctx: &BuilderContext, + ) -> impl Future> + Send; +} + +impl NodeComponentsBuilder for F +where + Node: FullNodeTypes, + F: FnOnce(&BuilderContext) -> Fut + Send, + Fut: Future>> + Send, + Pool: TransactionPool + Unpin + 'static, + EVM: ConfigureEvm, + Executor: BlockExecutorProvider, +{ + type Components = Components; + + fn build_components( + self, + ctx: &BuilderContext, + ) -> impl Future> + Send { + self(ctx) + } +} diff --git a/crates/node/builder/src/components/execute.rs b/crates/node/builder/src/components/execute.rs new file mode 100644 index 000000000..891f8e01f --- /dev/null +++ b/crates/node/builder/src/components/execute.rs @@ -0,0 +1,41 @@ +//! EVM component for the node builder. +use crate::{BuilderContext, FullNodeTypes}; +use reth_evm::execute::BlockExecutorProvider; +use reth_node_api::ConfigureEvm; +use std::future::Future; + +/// A type that knows how to build the executor types. +pub trait ExecutorBuilder: Send { + /// The EVM config to use. + /// + /// This provides the node with the necessary configuration to configure an EVM. + type EVM: ConfigureEvm; + + /// The type that knows how to execute blocks. + type Executor: BlockExecutorProvider; + + /// Creates the EVM config. + fn build_evm( + self, + ctx: &BuilderContext, + ) -> impl Future> + Send; +} + +impl ExecutorBuilder for F +where + Node: FullNodeTypes, + EVM: ConfigureEvm, + Executor: BlockExecutorProvider, + F: FnOnce(&BuilderContext) -> Fut + Send, + Fut: Future> + Send, +{ + type EVM = EVM; + type Executor = Executor; + + fn build_evm( + self, + ctx: &BuilderContext, + ) -> impl Future> { + self(ctx) + } +} diff --git a/crates/node/builder/src/components/mod.rs b/crates/node/builder/src/components/mod.rs new file mode 100644 index 000000000..ef5ea4995 --- /dev/null +++ b/crates/node/builder/src/components/mod.rs @@ -0,0 +1,123 @@ +//! Support for configuring the components of a node. +//! +//! Customizable components of the node include: +//! - The transaction pool. +//! - The network implementation. +//! - The payload builder service. +//! +//! Components depend on a fully type configured node: [FullNodeTypes](crate::node::FullNodeTypes). + +use crate::{ConfigureEvm, FullNodeTypes}; +pub use builder::*; +pub use execute::*; +pub use network::*; +pub use payload::*; +pub use pool::*; +use reth_evm::execute::BlockExecutorProvider; +use reth_network::NetworkHandle; +use reth_payload_builder::PayloadBuilderHandle; +use reth_transaction_pool::TransactionPool; + +mod builder; +mod execute; +mod network; +mod payload; +mod pool; + +/// An abstraction over the components of a node, consisting of: +/// - evm and executor +/// - transaction pool +/// - network +/// - payload builder. +pub trait NodeComponents: Clone + Send + Sync + 'static { + /// The transaction pool of the node. + type Pool: TransactionPool + Unpin; + + /// The node's EVM configuration, defining settings for the Ethereum Virtual Machine. + type Evm: ConfigureEvm; + + /// The type that knows how to execute blocks. + type Executor: BlockExecutorProvider; + + /// Returns the transaction pool of the node. + fn pool(&self) -> &Self::Pool; + + /// Returns the node's evm config. + fn evm_config(&self) -> &Self::Evm; + + /// Returns the node's executor type. + fn block_executor(&self) -> &Self::Executor; + + /// Returns the handle to the network + fn network(&self) -> &NetworkHandle; + + /// Returns the handle to the payload builder service. + fn payload_builder(&self) -> &PayloadBuilderHandle; +} + +/// All the components of the node. +/// +/// This provides access to all the components of the node. +#[derive(Debug)] +pub struct Components { + /// The transaction pool of the node. + pub transaction_pool: Pool, + /// The node's EVM configuration, defining settings for the Ethereum Virtual Machine. + pub evm_config: EVM, + /// The node's executor type used to execute individual blocks and batches of blocks. + pub executor: Executor, + /// The network implementation of the node. + pub network: NetworkHandle, + /// The handle to the payload builder service. + pub payload_builder: PayloadBuilderHandle, +} + +impl NodeComponents for Components +where + Node: FullNodeTypes, + Pool: TransactionPool + Unpin + 'static, + EVM: ConfigureEvm, + Executor: BlockExecutorProvider, +{ + type Pool = Pool; + type Evm = EVM; + type Executor = Executor; + + fn pool(&self) -> &Self::Pool { + &self.transaction_pool + } + + fn evm_config(&self) -> &Self::Evm { + &self.evm_config + } + + fn block_executor(&self) -> &Self::Executor { + &self.executor + } + + fn network(&self) -> &NetworkHandle { + &self.network + } + + fn payload_builder(&self) -> &PayloadBuilderHandle { + &self.payload_builder + } +} + +impl Clone for Components +where + Node: FullNodeTypes, + Pool: TransactionPool, + EVM: ConfigureEvm, + Executor: BlockExecutorProvider, +{ + fn clone(&self) -> Self { + Self { + transaction_pool: self.transaction_pool.clone(), + evm_config: self.evm_config.clone(), + executor: self.executor.clone(), + network: self.network.clone(), + payload_builder: self.payload_builder.clone(), + } + } +} diff --git a/crates/node-builder/src/components/network.rs b/crates/node/builder/src/components/network.rs similarity index 100% rename from crates/node-builder/src/components/network.rs rename to crates/node/builder/src/components/network.rs diff --git a/crates/node-builder/src/components/payload.rs b/crates/node/builder/src/components/payload.rs similarity index 100% rename from crates/node-builder/src/components/payload.rs rename to crates/node/builder/src/components/payload.rs diff --git a/crates/node-builder/src/components/pool.rs b/crates/node/builder/src/components/pool.rs similarity index 100% rename from crates/node-builder/src/components/pool.rs rename to crates/node/builder/src/components/pool.rs diff --git a/crates/node-builder/src/exex.rs b/crates/node/builder/src/exex.rs similarity index 100% rename from crates/node-builder/src/exex.rs rename to crates/node/builder/src/exex.rs diff --git a/crates/node-builder/src/handle.rs b/crates/node/builder/src/handle.rs similarity index 100% rename from crates/node-builder/src/handle.rs rename to crates/node/builder/src/handle.rs diff --git a/crates/node-builder/src/hooks.rs b/crates/node/builder/src/hooks.rs similarity index 84% rename from crates/node-builder/src/hooks.rs rename to crates/node/builder/src/hooks.rs index 9d2127f5a..468c84e85 100644 --- a/crates/node-builder/src/hooks.rs +++ b/crates/node/builder/src/hooks.rs @@ -77,15 +77,15 @@ pub trait OnComponentInitializedHook: Send { /// Consumes the event hook and runs it. /// /// If this returns an error, the node launch will be aborted. - fn on_event(&self, node: Node) -> eyre::Result<()>; + fn on_event(self: Box, node: Node) -> eyre::Result<()>; } impl OnComponentInitializedHook for F where - F: Fn(Node) -> eyre::Result<()> + Send, + F: FnOnce(Node) -> eyre::Result<()> + Send, { - fn on_event(&self, node: Node) -> eyre::Result<()> { - self(node) + fn on_event(self: Box, node: Node) -> eyre::Result<()> { + (*self)(node) } } @@ -94,27 +94,27 @@ pub trait OnNodeStartedHook: Send { /// Consumes the event hook and runs it. /// /// If this returns an error, the node launch will be aborted. - fn on_event(&self, node: FullNode) -> eyre::Result<()>; + fn on_event(self: Box, node: FullNode) -> eyre::Result<()>; } impl OnNodeStartedHook for F where Node: FullNodeComponents, - F: Fn(FullNode) -> eyre::Result<()> + Send, + F: FnOnce(FullNode) -> eyre::Result<()> + Send, { - fn on_event(&self, node: FullNode) -> eyre::Result<()> { - self(node) + fn on_event(self: Box, node: FullNode) -> eyre::Result<()> { + (*self)(node) } } impl OnComponentInitializedHook for () { - fn on_event(&self, _node: Node) -> eyre::Result<()> { + fn on_event(self: Box, _node: Node) -> eyre::Result<()> { Ok(()) } } impl OnNodeStartedHook for () { - fn on_event(&self, _node: FullNode) -> eyre::Result<()> { + fn on_event(self: Box, _node: FullNode) -> eyre::Result<()> { Ok(()) } } diff --git a/crates/node/builder/src/launch/common.rs b/crates/node/builder/src/launch/common.rs new file mode 100644 index 000000000..043b587b8 --- /dev/null +++ b/crates/node/builder/src/launch/common.rs @@ -0,0 +1,517 @@ +//! Helper types that can be used by launchers. + +use std::{cmp::max, sync::Arc, thread::available_parallelism}; + +use eyre::Context; +use rayon::ThreadPoolBuilder; +use tokio::sync::mpsc::Receiver; + +use reth_auto_seal_consensus::MiningMode; +use reth_config::{config::EtlConfig, PruneConfig}; +use reth_db::{database::Database, database_metrics::DatabaseMetrics}; +use reth_interfaces::p2p::headers::client::HeadersClient; +use reth_node_core::{ + cli::config::RethRpcConfig, + dirs::{ChainPath, DataDirPath}, + init::{init_genesis, InitDatabaseError}, + node_config::NodeConfig, +}; +use reth_primitives::{BlockNumber, Chain, ChainSpec, Head, PruneModes, B256}; +use reth_provider::{providers::StaticFileProvider, ProviderFactory, StaticFileProviderFactory}; +use reth_prune::PrunerBuilder; +use reth_rpc::JwtSecret; +use reth_static_file::StaticFileProducer; +use reth_tasks::TaskExecutor; +use reth_tracing::tracing::{error, info, warn}; + +/// Reusable setup for launching a node. +/// +/// This provides commonly used boilerplate for launching a node. +#[derive(Debug, Clone)] +pub struct LaunchContext { + /// The task executor for the node. + pub task_executor: TaskExecutor, + /// The data directory for the node. + pub data_dir: ChainPath, +} + +impl LaunchContext { + /// Create a new instance of the default node launcher. + pub const fn new(task_executor: TaskExecutor, data_dir: ChainPath) -> Self { + Self { task_executor, data_dir } + } + + /// Attaches a database to the launch context. + pub fn with(self, database: DB) -> LaunchContextWith { + LaunchContextWith { inner: self, attachment: database } + } + + /// Loads the reth config with the configured `data_dir` and overrides settings according to the + /// `config`. + /// + /// Attaches both the `NodeConfig` and the loaded `reth.toml` config to the launch context. + pub fn with_loaded_toml_config( + self, + config: NodeConfig, + ) -> eyre::Result> { + let toml_config = self.load_toml_config(&config)?; + Ok(self.with(WithConfigs { config, toml_config })) + } + + /// Loads the reth config with the configured `data_dir` and overrides settings according to the + /// `config`. + pub fn load_toml_config(&self, config: &NodeConfig) -> eyre::Result { + let config_path = config.config.clone().unwrap_or_else(|| self.data_dir.config()); + + let mut toml_config = confy::load_path::(&config_path) + .wrap_err_with(|| format!("Could not load config file {config_path:?}"))?; + + Self::save_pruning_config_if_full_node(&mut toml_config, config, &config_path)?; + + info!(target: "reth::cli", path = ?config_path, "Configuration loaded"); + + // Update the config with the command line arguments + toml_config.peers.trusted_nodes_only = config.network.trusted_only; + + if !config.network.trusted_peers.is_empty() { + info!(target: "reth::cli", "Adding trusted nodes"); + config.network.trusted_peers.iter().for_each(|peer| { + toml_config.peers.trusted_nodes.insert(*peer); + }); + } + + Ok(toml_config) + } + + /// Save prune config to the toml file if node is a full node. + fn save_pruning_config_if_full_node( + reth_config: &mut reth_config::Config, + config: &NodeConfig, + config_path: impl AsRef, + ) -> eyre::Result<()> { + if reth_config.prune.is_none() { + if let Some(prune_config) = config.prune_config() { + reth_config.update_prune_confing(prune_config); + info!(target: "reth::cli", "Saving prune config to toml file"); + reth_config.save(config_path.as_ref())?; + } + } else if config.prune_config().is_none() { + warn!(target: "reth::cli", "Prune configs present in config file but --full not provided. Running as a Full node"); + } + Ok(()) + } + + /// Convenience function to [Self::configure_globals] + pub fn with_configured_globals(self) -> Self { + self.configure_globals(); + self + } + + /// Configure global settings this includes: + /// + /// - Raising the file descriptor limit + /// - Configuring the global rayon thread pool + pub fn configure_globals(&self) { + // Raise the fd limit of the process. + // Does not do anything on windows. + let _ = fdlimit::raise_fd_limit(); + + // Limit the global rayon thread pool, reserving 2 cores for the rest of the system + let _ = ThreadPoolBuilder::new() + .num_threads( + available_parallelism().map_or(25, |cpus| max(cpus.get().saturating_sub(2), 2)), + ) + .build_global() + .map_err(|e| error!("Failed to build global thread pool: {:?}", e)); + } +} + +/// A [LaunchContext] along with an additional value. +/// +/// This can be used to sequentially attach additional values to the type during the launch process. +/// +/// The type provides common boilerplate for launching a node depending on the additional value. +#[derive(Debug, Clone)] +pub struct LaunchContextWith { + /// The wrapped launch context. + pub inner: LaunchContext, + /// The additional attached value. + pub attachment: T, +} + +impl LaunchContextWith { + /// Configure global settings this includes: + /// + /// - Raising the file descriptor limit + /// - Configuring the global rayon thread pool + pub fn configure_globals(&self) { + self.inner.configure_globals(); + } + + /// Returns the data directory. + pub fn data_dir(&self) -> &ChainPath { + &self.inner.data_dir + } + + /// Returns the task executor. + pub fn task_executor(&self) -> &TaskExecutor { + &self.inner.task_executor + } + + /// Attaches another value to the launch context. + pub fn attach(self, attachment: A) -> LaunchContextWith> { + LaunchContextWith { + inner: self.inner, + attachment: Attached::new(self.attachment, attachment), + } + } + + /// Consumes the type and calls a function with a reference to the context. + // Returns the context again + pub fn inspect(self, f: F) -> Self + where + F: FnOnce(&Self), + { + f(&self); + self + } +} + +impl LaunchContextWith> { + /// Get a reference to the left value. + pub const fn left(&self) -> &L { + &self.attachment.left + } + + /// Get a reference to the right value. + pub const fn right(&self) -> &R { + &self.attachment.right + } + + /// Get a mutable reference to the right value. + pub fn left_mut(&mut self) -> &mut L { + &mut self.attachment.left + } + + /// Get a mutable reference to the right value. + pub fn right_mut(&mut self) -> &mut R { + &mut self.attachment.right + } +} +impl LaunchContextWith> { + /// Adjust certain settings in the config to make sure they are set correctly + /// + /// This includes: + /// - Making sure the ETL dir is set to the datadir + /// - RPC settings are adjusted to the correct port + pub fn with_adjusted_configs(self) -> Self { + self.ensure_etl_datadir().with_adjusted_rpc_instance_ports() + } + + /// Make sure ETL doesn't default to /tmp/, but to whatever datadir is set to + pub fn ensure_etl_datadir(mut self) -> Self { + if self.toml_config_mut().stages.etl.dir.is_none() { + self.toml_config_mut().stages.etl.dir = + Some(EtlConfig::from_datadir(self.data_dir().data_dir())) + } + + self + } + + /// Change rpc port numbers based on the instance number. + pub fn with_adjusted_rpc_instance_ports(mut self) -> Self { + self.node_config_mut().adjust_instance_ports(); + self + } + + /// Returns the attached [NodeConfig]. + pub const fn node_config(&self) -> &NodeConfig { + &self.left().config + } + + /// Returns the attached [NodeConfig]. + pub fn node_config_mut(&mut self) -> &mut NodeConfig { + &mut self.left_mut().config + } + + /// Returns the attached toml config [reth_config::Config]. + pub const fn toml_config(&self) -> &reth_config::Config { + &self.left().toml_config + } + + /// Returns the attached toml config [reth_config::Config]. + pub fn toml_config_mut(&mut self) -> &mut reth_config::Config { + &mut self.left_mut().toml_config + } + + /// Returns the configured chain spec. + pub fn chain_spec(&self) -> Arc { + self.node_config().chain.clone() + } + + /// Get the hash of the genesis block. + pub fn genesis_hash(&self) -> B256 { + self.node_config().chain.genesis_hash() + } + + /// Returns the chain identifier of the node. + pub fn chain_id(&self) -> Chain { + self.node_config().chain.chain + } + + /// Returns true if the node is configured as --dev + pub fn is_dev(&self) -> bool { + self.node_config().dev.dev + } + + /// Returns the configured [PruneConfig] + pub fn prune_config(&self) -> Option { + self.node_config().prune_config().or_else(|| self.toml_config().prune.clone()) + } + + /// Returns the configured [PruneModes] + pub fn prune_modes(&self) -> Option { + self.prune_config().map(|config| config.segments) + } + + /// Returns an initialized [PrunerBuilder] based on the configured [PruneConfig] + pub fn pruner_builder(&self) -> PrunerBuilder { + PrunerBuilder::new(self.prune_config().unwrap_or_default()) + .prune_delete_limit(self.chain_spec().prune_delete_limit) + .timeout(PrunerBuilder::DEFAULT_TIMEOUT) + } + + /// Returns the initial pipeline target, based on whether or not the node is running in + /// `debug.tip` mode, `debug.continuous` mode, or neither. + /// + /// If running in `debug.tip` mode, the configured tip is returned. + /// Otherwise, if running in `debug.continuous` mode, the genesis hash is returned. + /// Otherwise, `None` is returned. This is what the node will do by default. + pub fn initial_pipeline_target(&self) -> Option { + self.node_config().initial_pipeline_target(self.genesis_hash()) + } + + /// Loads the JWT secret for the engine API + pub fn auth_jwt_secret(&self) -> eyre::Result { + let default_jwt_path = self.data_dir().jwt(); + let secret = self.node_config().rpc.auth_jwt_secret(default_jwt_path)?; + Ok(secret) + } + + /// Returns the [MiningMode] intended for --dev mode. + pub fn dev_mining_mode(&self, pending_transactions_listener: Receiver) -> MiningMode { + if let Some(interval) = self.node_config().dev.block_time { + MiningMode::interval(interval) + } else if let Some(max_transactions) = self.node_config().dev.block_max_transactions { + MiningMode::instant(max_transactions, pending_transactions_listener) + } else { + MiningMode::instant(1, pending_transactions_listener) + } + } +} + +impl LaunchContextWith> +where + DB: Clone, +{ + /// Returns the [ProviderFactory] for the attached database. + pub fn create_provider_factory(&self) -> eyre::Result> { + let factory = ProviderFactory::new( + self.right().clone(), + self.chain_spec(), + self.data_dir().static_files(), + )? + .with_static_files_metrics(); + + Ok(factory) + } + + /// Creates a new [ProviderFactory] and attaches it to the launch context. + pub fn with_provider_factory( + self, + ) -> eyre::Result>>> { + let factory = self.create_provider_factory()?; + let ctx = LaunchContextWith { + inner: self.inner, + attachment: self.attachment.map_right(|_| factory), + }; + + Ok(ctx) + } +} + +impl LaunchContextWith>> +where + DB: Database + DatabaseMetrics + Send + Sync + Clone + 'static, +{ + /// Returns access to the underlying database. + pub fn database(&self) -> &DB { + self.right().db_ref() + } + + /// Returns the configured ProviderFactory. + pub fn provider_factory(&self) -> &ProviderFactory { + self.right() + } + + /// Returns the static file provider to interact with the static files. + pub fn static_file_provider(&self) -> StaticFileProvider { + self.right().static_file_provider() + } + + /// Creates a new [StaticFileProducer] with the attached database. + pub fn static_file_producer(&self) -> StaticFileProducer { + StaticFileProducer::new( + self.provider_factory().clone(), + self.static_file_provider(), + self.prune_modes().unwrap_or_default(), + ) + } + + /// Convenience function to [Self::init_genesis] + pub fn with_genesis(self) -> Result { + init_genesis(self.provider_factory().clone())?; + Ok(self) + } + + /// Write the genesis block and state if it has not already been written + pub fn init_genesis(&self) -> Result { + init_genesis(self.provider_factory().clone()) + } + + /// Returns the max block that the node should run to, looking it up from the network if + /// necessary + pub async fn max_block(&self, client: C) -> eyre::Result> + where + C: HeadersClient, + { + self.node_config().max_block(client, self.provider_factory().clone()).await + } + + /// Convenience function to [Self::start_prometheus_endpoint] + pub async fn with_prometheus(self) -> eyre::Result { + self.start_prometheus_endpoint().await?; + Ok(self) + } + + /// Starts the prometheus endpoint. + pub async fn start_prometheus_endpoint(&self) -> eyre::Result<()> { + let prometheus_handle = self.node_config().install_prometheus_recorder()?; + self.node_config() + .start_metrics_endpoint( + prometheus_handle, + self.database().clone(), + self.static_file_provider(), + self.task_executor().clone(), + ) + .await + } + + /// Fetches the head block from the database. + /// + /// If the database is empty, returns the genesis block. + pub fn lookup_head(&self) -> eyre::Result { + self.node_config() + .lookup_head(self.provider_factory().clone()) + .wrap_err("the head block is missing") + } +} + +/// Joins two attachments together. +#[derive(Clone, Copy, Debug)] +pub struct Attached { + left: L, + right: R, +} + +impl Attached { + /// Creates a new `Attached` with the given values. + pub const fn new(left: L, right: R) -> Self { + Self { left, right } + } + + /// Maps the left value to a new value. + pub fn map_left(self, f: F) -> Attached + where + F: FnOnce(L) -> T, + { + Attached::new(f(self.left), self.right) + } + + /// Maps the right value to a new value. + pub fn map_right(self, f: F) -> Attached + where + F: FnOnce(R) -> T, + { + Attached::new(self.left, f(self.right)) + } + + /// Get a reference to the left value. + pub const fn left(&self) -> &L { + &self.left + } + + /// Get a reference to the right value. + pub const fn right(&self) -> &R { + &self.right + } + + /// Get a mutable reference to the right value. + pub fn left_mut(&mut self) -> &mut R { + &mut self.right + } + + /// Get a mutable reference to the right value. + pub fn right_mut(&mut self) -> &mut R { + &mut self.right + } +} + +/// Helper container type to bundle the initial [NodeConfig] and the loaded settings from the +/// reth.toml config +#[derive(Debug, Clone)] +pub struct WithConfigs { + /// The configured, usually derived from the CLI. + pub config: NodeConfig, + /// The loaded reth.toml config. + pub toml_config: reth_config::Config, +} + +#[cfg(test)] +mod tests { + use super::{LaunchContext, NodeConfig}; + use reth_config::Config; + use reth_node_core::args::PruningArgs; + + const EXTENSION: &str = "toml"; + + fn with_tempdir(filename: &str, proc: fn(&std::path::Path)) { + let temp_dir = tempfile::tempdir().unwrap(); + let config_path = temp_dir.path().join(filename).with_extension(EXTENSION); + proc(&config_path); + temp_dir.close().unwrap() + } + + #[test] + fn test_save_prune_config() { + with_tempdir("prune-store-test", |config_path| { + let mut reth_config = Config::default(); + let node_config = + NodeConfig { pruning: PruningArgs { full: true }, ..NodeConfig::test() }; + LaunchContext::save_pruning_config_if_full_node( + &mut reth_config, + &node_config, + config_path, + ) + .unwrap(); + + assert_eq!( + reth_config.prune.as_ref().map(|p| p.block_interval), + node_config.prune_config().map(|p| p.block_interval) + ); + + let loaded_config: Config = confy::load_path(config_path).unwrap(); + assert_eq!(reth_config, loaded_config); + }) + } +} diff --git a/crates/node/builder/src/launch/mod.rs b/crates/node/builder/src/launch/mod.rs new file mode 100644 index 000000000..221434758 --- /dev/null +++ b/crates/node/builder/src/launch/mod.rs @@ -0,0 +1,468 @@ +//! Abstraction for launching a node. + +use crate::{ + builder::{NodeAdapter, NodeAddOns, NodeTypesAdapter}, + components::{NodeComponents, NodeComponentsBuilder}, + hooks::NodeHooks, + node::FullNode, + BuilderContext, NodeBuilderWithComponents, NodeHandle, +}; +use futures::{future, future::Either, stream, stream_select, StreamExt}; +use reth_auto_seal_consensus::AutoSealConsensus; +use reth_beacon_consensus::{ + hooks::{EngineHooks, PruneHook, StaticFileHook}, + BeaconConsensusEngine, EthBeaconConsensus, +}; +use reth_blockchain_tree::{ + noop::NoopBlockchainTree, BlockchainTree, BlockchainTreeConfig, ShareableBlockchainTree, + TreeExternals, +}; +use reth_consensus::Consensus; +use reth_exex::{ExExContext, ExExHandle, ExExManager, ExExManagerHandle}; +use reth_interfaces::p2p::either::EitherDownloader; +use reth_network::NetworkEvents; +use reth_node_api::{FullNodeComponents, FullNodeTypes}; +use reth_node_core::{ + dirs::{ChainPath, DataDirPath}, + engine::EngineMessageStreamExt, + exit::NodeExitFuture, +}; +use reth_node_events::{cl::ConsensusLayerHealthEvents, node}; +use reth_primitives::format_ether; +use reth_provider::{providers::BlockchainProvider, CanonStateSubscriptions}; +use reth_rpc_engine_api::EngineApi; +use reth_tasks::TaskExecutor; +use reth_tracing::tracing::{debug, info}; +use reth_transaction_pool::TransactionPool; +use std::{future::Future, sync::Arc}; +use tokio::sync::{mpsc::unbounded_channel, oneshot}; +use tokio_stream::wrappers::UnboundedReceiverStream; + +pub mod common; +pub use common::LaunchContext; + +/// A general purpose trait that launches a new node of any kind. +/// +/// Acts as a node factory. +/// +/// This is essentially the launch logic for a node. +/// +/// See also [DefaultNodeLauncher] and [NodeBuilderWithComponents::launch_with] +pub trait LaunchNode { + /// The node type that is created. + type Node; + + /// Create and return a new node asynchronously. + fn launch_node(self, target: Target) -> impl Future> + Send; +} + +/// The default launcher for a node. +#[derive(Debug)] +pub struct DefaultNodeLauncher { + /// The task executor for the node. + pub ctx: LaunchContext, +} + +impl DefaultNodeLauncher { + /// Create a new instance of the default node launcher. + pub fn new(task_executor: TaskExecutor, data_dir: ChainPath) -> Self { + Self { ctx: LaunchContext::new(task_executor, data_dir) } + } +} + +impl LaunchNode> for DefaultNodeLauncher +where + T: FullNodeTypes::DB>>, + CB: NodeComponentsBuilder, +{ + type Node = NodeHandle>; + + async fn launch_node( + self, + target: NodeBuilderWithComponents, + ) -> eyre::Result { + let Self { ctx } = self; + let NodeBuilderWithComponents { + adapter: NodeTypesAdapter { database }, + components_builder, + add_ons: NodeAddOns { hooks, rpc, exexs: installed_exex }, + config, + } = target; + + // setup the launch context + let ctx = ctx + .with_configured_globals() + // load the toml config + .with_loaded_toml_config(config)? + // attach the database + .attach(database.clone()) + // ensure certain settings take effect + .with_adjusted_configs() + // Create the provider factory + .with_provider_factory()? + .inspect(|_| { + info!(target: "reth::cli", "Database opened"); + }) + .with_prometheus().await? + .inspect(|this| { + debug!(target: "reth::cli", chain=%this.chain_id(), genesis=?this.genesis_hash(), "Initializing genesis"); + }) + .with_genesis()? + .inspect(|this| { + info!(target: "reth::cli", "\n{}", this.chain_spec().display_hardforks()); + }); + + // setup the consensus instance + let consensus: Arc = if ctx.is_dev() { + Arc::new(AutoSealConsensus::new(ctx.chain_spec())) + } else { + Arc::new(EthBeaconConsensus::new(ctx.chain_spec())) + }; + + debug!(target: "reth::cli", "Spawning stages metrics listener task"); + let (sync_metrics_tx, sync_metrics_rx) = unbounded_channel(); + let sync_metrics_listener = reth_stages::MetricsListener::new(sync_metrics_rx); + ctx.task_executor().spawn_critical("stages metrics listener task", sync_metrics_listener); + + // fetch the head block from the database + let head = ctx.lookup_head()?; + + // Configure the blockchain tree for the node + let tree_config = BlockchainTreeConfig::default(); + + // NOTE: This is a temporary workaround to provide the canon state notification sender to the components builder because there's a cyclic dependency between the blockchain provider and the tree component. This will be removed once the Blockchain provider no longer depends on an instance of the tree: + let (canon_state_notification_sender, _receiver) = + tokio::sync::broadcast::channel(tree_config.max_reorg_depth() as usize * 2); + + let blockchain_db = BlockchainProvider::new( + ctx.provider_factory().clone(), + Arc::new(NoopBlockchainTree::with_canon_state_notifications( + canon_state_notification_sender.clone(), + )), + )?; + + let builder_ctx = BuilderContext::new( + head, + blockchain_db.clone(), + ctx.task_executor().clone(), + ctx.data_dir().clone(), + ctx.node_config().clone(), + ctx.toml_config().clone(), + ); + + debug!(target: "reth::cli", "creating components"); + let components = components_builder.build_components(&builder_ctx).await?; + + let tree_externals = TreeExternals::new( + ctx.provider_factory().clone(), + consensus.clone(), + components.block_executor().clone(), + ); + let tree = BlockchainTree::new(tree_externals, tree_config, ctx.prune_modes())? + .with_sync_metrics_tx(sync_metrics_tx.clone()) + // Note: This is required because we need to ensure that both the components and the + // tree are using the same channel for canon state notifications. This will be removed + // once the Blockchain provider no longer depends on an instance of the tree + .with_canon_state_notification_sender(canon_state_notification_sender); + + let canon_state_notification_sender = tree.canon_state_notification_sender(); + let blockchain_tree = Arc::new(ShareableBlockchainTree::new(tree)); + + // Replace the tree component with the actual tree + let blockchain_db = blockchain_db.with_tree(blockchain_tree); + + debug!(target: "reth::cli", "configured blockchain tree"); + + let NodeHooks { on_component_initialized, on_node_started, .. } = hooks; + + let node_adapter = NodeAdapter { + components, + task_executor: ctx.task_executor().clone(), + provider: blockchain_db.clone(), + }; + + debug!(target: "reth::cli", "calling on_component_initialized hook"); + on_component_initialized.on_event(node_adapter.clone())?; + + // spawn exexs + let mut exex_handles = Vec::with_capacity(installed_exex.len()); + let mut exexs = Vec::with_capacity(installed_exex.len()); + for (id, exex) in installed_exex { + // create a new exex handle + let (handle, events, notifications) = ExExHandle::new(id.clone()); + exex_handles.push(handle); + + // create the launch context for the exex + let context = ExExContext { + head, + data_dir: ctx.data_dir().clone(), + config: ctx.node_config().clone(), + reth_config: ctx.toml_config().clone(), + components: node_adapter.clone(), + events, + notifications, + }; + + let executor = ctx.task_executor().clone(); + exexs.push(async move { + debug!(target: "reth::cli", id, "spawning exex"); + let span = reth_tracing::tracing::info_span!("exex", id); + let _enter = span.enter(); + + // init the exex + let exex = exex.launch(context).await.unwrap(); + + // spawn it as a crit task + executor.spawn_critical("exex", async move { + info!(target: "reth::cli", "ExEx started"); + match exex.await { + Ok(_) => panic!("ExEx {id} finished. ExEx's should run indefinitely"), + Err(err) => panic!("ExEx {id} crashed: {err}"), + } + }); + }); + } + + future::join_all(exexs).await; + + // spawn exex manager + let exex_manager_handle = if !exex_handles.is_empty() { + debug!(target: "reth::cli", "spawning exex manager"); + // todo(onbjerg): rm magic number + let exex_manager = ExExManager::new(exex_handles, 1024); + let exex_manager_handle = exex_manager.handle(); + ctx.task_executor().spawn_critical("exex manager", async move { + exex_manager.await.expect("exex manager crashed"); + }); + + // send notifications from the blockchain tree to exex manager + let mut canon_state_notifications = blockchain_db.subscribe_to_canonical_state(); + let mut handle = exex_manager_handle.clone(); + ctx.task_executor().spawn_critical( + "exex manager blockchain tree notifications", + async move { + while let Ok(notification) = canon_state_notifications.recv().await { + handle.send_async(notification.into()).await.expect( + "blockchain tree notification could not be sent to exex manager", + ); + } + }, + ); + + info!(target: "reth::cli", "ExEx Manager started"); + + Some(exex_manager_handle) + } else { + None + }; + + // create pipeline + let network_client = node_adapter.network().fetch_client().await?; + let (consensus_engine_tx, consensus_engine_rx) = unbounded_channel(); + + let node_config = ctx.node_config(); + let consensus_engine_stream = UnboundedReceiverStream::from(consensus_engine_rx) + .maybe_skip_fcu(node_config.debug.skip_fcu) + .maybe_skip_new_payload(node_config.debug.skip_new_payload) + // Store messages _after_ skipping so that `replay-engine` command + // would replay only the messages that were observed by the engine + // during this run. + .maybe_store_messages(node_config.debug.engine_api_store.clone()); + + let max_block = ctx.max_block(network_client.clone()).await?; + let mut hooks = EngineHooks::new(); + + let static_file_producer = ctx.static_file_producer(); + let static_file_producer_events = static_file_producer.lock().events(); + hooks.add(StaticFileHook::new( + static_file_producer.clone(), + Box::new(ctx.task_executor().clone()), + )); + info!(target: "reth::cli", "StaticFileProducer initialized"); + + // Configure the pipeline + let pipeline_exex_handle = + exex_manager_handle.clone().unwrap_or_else(ExExManagerHandle::empty); + let (mut pipeline, client) = if ctx.is_dev() { + info!(target: "reth::cli", "Starting Reth in dev mode"); + + for (idx, (address, alloc)) in ctx.chain_spec().genesis.alloc.iter().enumerate() { + info!(target: "reth::cli", "Allocated Genesis Account: {:02}. {} ({} ETH)", idx, address.to_string(), format_ether(alloc.balance)); + } + + // install auto-seal + let mining_mode = + ctx.dev_mining_mode(node_adapter.components.pool().pending_transactions_listener()); + info!(target: "reth::cli", mode=%mining_mode, "configuring dev mining mode"); + + let (_, client, mut task) = reth_auto_seal_consensus::AutoSealBuilder::new( + ctx.chain_spec(), + blockchain_db.clone(), + node_adapter.components.pool().clone(), + consensus_engine_tx.clone(), + canon_state_notification_sender, + mining_mode, + node_adapter.components.block_executor().clone(), + ) + .build(); + + let mut pipeline = crate::setup::build_networked_pipeline( + ctx.node_config(), + &ctx.toml_config().stages, + client.clone(), + Arc::clone(&consensus), + ctx.provider_factory().clone(), + ctx.task_executor(), + sync_metrics_tx, + ctx.prune_config(), + max_block, + static_file_producer, + node_adapter.components.block_executor().clone(), + pipeline_exex_handle, + ) + .await?; + + let pipeline_events = pipeline.events(); + task.set_pipeline_events(pipeline_events); + debug!(target: "reth::cli", "Spawning auto mine task"); + ctx.task_executor().spawn(Box::pin(task)); + + (pipeline, EitherDownloader::Left(client)) + } else { + let pipeline = crate::setup::build_networked_pipeline( + ctx.node_config(), + &ctx.toml_config().stages, + network_client.clone(), + Arc::clone(&consensus), + ctx.provider_factory().clone(), + ctx.task_executor(), + sync_metrics_tx, + ctx.prune_config(), + max_block, + static_file_producer, + node_adapter.components.block_executor().clone(), + pipeline_exex_handle, + ) + .await?; + + (pipeline, EitherDownloader::Right(network_client.clone())) + }; + + let pipeline_events = pipeline.events(); + + let initial_target = ctx.initial_pipeline_target(); + + let mut pruner_builder = + ctx.pruner_builder().max_reorg_depth(tree_config.max_reorg_depth() as usize); + if let Some(exex_manager_handle) = &exex_manager_handle { + pruner_builder = + pruner_builder.finished_exex_height(exex_manager_handle.finished_height()); + } + + let mut pruner = pruner_builder.build(ctx.provider_factory().clone()); + + let pruner_events = pruner.events(); + info!(target: "reth::cli", prune_config=?ctx.prune_config().unwrap_or_default(), "Pruner initialized"); + hooks.add(PruneHook::new(pruner, Box::new(ctx.task_executor().clone()))); + + // Configure the consensus engine + let (beacon_consensus_engine, beacon_engine_handle) = BeaconConsensusEngine::with_channel( + client, + pipeline, + blockchain_db.clone(), + Box::new(ctx.task_executor().clone()), + Box::new(node_adapter.components.network().clone()), + max_block, + ctx.node_config().debug.continuous, + node_adapter.components.payload_builder().clone(), + initial_target, + reth_beacon_consensus::MIN_BLOCKS_FOR_PIPELINE_RUN, + consensus_engine_tx, + Box::pin(consensus_engine_stream), + hooks, + )?; + info!(target: "reth::cli", "Consensus engine initialized"); + + let events = stream_select!( + node_adapter.components.network().event_listener().map(Into::into), + beacon_engine_handle.event_listener().map(Into::into), + pipeline_events.map(Into::into), + if ctx.node_config().debug.tip.is_none() && !ctx.is_dev() { + Either::Left( + ConsensusLayerHealthEvents::new(Box::new(blockchain_db.clone())) + .map(Into::into), + ) + } else { + Either::Right(stream::empty()) + }, + pruner_events.map(Into::into), + static_file_producer_events.map(Into::into) + ); + ctx.task_executor().spawn_critical( + "events task", + node::handle_events( + Some(node_adapter.components.network().clone()), + Some(head.number), + events, + database.clone(), + ), + ); + + let engine_api = EngineApi::new( + blockchain_db.clone(), + ctx.chain_spec(), + beacon_engine_handle, + node_adapter.components.payload_builder().clone().into(), + Box::new(ctx.task_executor().clone()), + ); + info!(target: "reth::cli", "Engine API handler initialized"); + + // extract the jwt secret from the args if possible + let jwt_secret = ctx.auth_jwt_secret()?; + + // Start RPC servers + let (rpc_server_handles, mut rpc_registry) = crate::rpc::launch_rpc_servers( + node_adapter.clone(), + engine_api, + ctx.node_config(), + jwt_secret, + rpc, + ) + .await?; + + // in dev mode we generate 20 random dev-signer accounts + if ctx.is_dev() { + rpc_registry.eth_api().with_dev_accounts(); + } + + // Run consensus engine to completion + let (tx, rx) = oneshot::channel(); + info!(target: "reth::cli", "Starting consensus engine"); + ctx.task_executor().spawn_critical_blocking("consensus engine", async move { + let res = beacon_consensus_engine.await; + let _ = tx.send(res); + }); + + let full_node = FullNode { + evm_config: node_adapter.components.evm_config().clone(), + pool: node_adapter.components.pool().clone(), + network: node_adapter.components.network().clone(), + provider: node_adapter.provider.clone(), + payload_builder: node_adapter.components.payload_builder().clone(), + task_executor: ctx.task_executor().clone(), + rpc_server_handles, + rpc_registry, + config: ctx.node_config().clone(), + data_dir: ctx.data_dir().clone(), + }; + // Notify on node started + on_node_started.on_event(full_node.clone())?; + + let handle = NodeHandle { + node_exit_future: NodeExitFuture::new(rx, full_node.config.debug.terminate), + node: full_node, + }; + + Ok(handle) + } +} diff --git a/crates/node-builder/src/lib.rs b/crates/node/builder/src/lib.rs similarity index 97% rename from crates/node-builder/src/lib.rs rename to crates/node/builder/src/lib.rs index f5d7012d1..11b56ba24 100644 --- a/crates/node-builder/src/lib.rs +++ b/crates/node/builder/src/lib.rs @@ -21,6 +21,9 @@ pub mod components; mod builder; pub use builder::*; +mod launch; +pub use launch::*; + mod handle; pub use handle::NodeHandle; diff --git a/crates/node-builder/src/node.rs b/crates/node/builder/src/node.rs similarity index 85% rename from crates/node-builder/src/node.rs rename to crates/node/builder/src/node.rs index 766bae14f..7831f29d0 100644 --- a/crates/node-builder/src/node.rs +++ b/crates/node/builder/src/node.rs @@ -1,7 +1,4 @@ -use crate::{ - components::ComponentsBuilder, - rpc::{RethRpcServerHandles, RpcRegistry}, -}; +use crate::rpc::{RethRpcServerHandles, RpcRegistry}; use reth_network::NetworkHandle; use reth_node_api::FullNodeComponents; use reth_node_core::{ @@ -19,23 +16,18 @@ use reth_tasks::TaskExecutor; use std::sync::Arc; // re-export the node api types +use crate::components::NodeComponentsBuilder; pub use reth_node_api::{FullNodeTypes, NodeTypes}; -/// A [Node] is a [NodeTypes] that comes with preconfigured components. +/// A [crate::Node] is a [NodeTypes] that comes with preconfigured components. /// /// This can be used to configure the builder with a preset of components. -pub trait Node: NodeTypes + Clone { - /// The type that builds the node's pool. - type PoolBuilder; - /// The type that builds the node's network. - type NetworkBuilder; - /// The type that builds the node's payload service. - type PayloadBuilder; +pub trait Node: NodeTypes + Clone { + /// The type that builds the node's components. + type ComponentsBuilder: NodeComponentsBuilder; - /// Returns the [ComponentsBuilder] for the node. - fn components( - self, - ) -> ComponentsBuilder; + /// Returns a [NodeComponentsBuilder] for the node. + fn components_builder(self) -> Self::ComponentsBuilder; } /// The launched node with all components including RPC handlers. diff --git a/crates/node-builder/src/rpc.rs b/crates/node/builder/src/rpc.rs similarity index 93% rename from crates/node-builder/src/rpc.rs rename to crates/node/builder/src/rpc.rs index d6e2eb0f2..3ac553fa3 100644 --- a/crates/node-builder/src/rpc.rs +++ b/crates/node/builder/src/rpc.rs @@ -98,7 +98,7 @@ impl fmt::Debug for RpcHooks { pub trait OnRpcStarted: Send { /// The hook that is called once the rpc server is started. fn on_rpc_started( - &self, + self: Box, ctx: RpcContext<'_, Node>, handles: RethRpcServerHandles, ) -> eyre::Result<()>; @@ -106,20 +106,24 @@ pub trait OnRpcStarted: Send { impl OnRpcStarted for F where - F: Fn(RpcContext<'_, Node>, RethRpcServerHandles) -> eyre::Result<()> + Send, + F: FnOnce(RpcContext<'_, Node>, RethRpcServerHandles) -> eyre::Result<()> + Send, Node: FullNodeComponents, { fn on_rpc_started( - &self, + self: Box, ctx: RpcContext<'_, Node>, handles: RethRpcServerHandles, ) -> eyre::Result<()> { - self(ctx, handles) + (*self)(ctx, handles) } } impl OnRpcStarted for () { - fn on_rpc_started(&self, _: RpcContext<'_, Node>, _: RethRpcServerHandles) -> eyre::Result<()> { + fn on_rpc_started( + self: Box, + _: RpcContext<'_, Node>, + _: RethRpcServerHandles, + ) -> eyre::Result<()> { Ok(()) } } @@ -127,21 +131,21 @@ impl OnRpcStarted for () { /// Event hook that is called when the rpc server is started. pub trait ExtendRpcModules: Send { /// The hook that is called once the rpc server is started. - fn extend_rpc_modules(&self, ctx: RpcContext<'_, Node>) -> eyre::Result<()>; + fn extend_rpc_modules(self: Box, ctx: RpcContext<'_, Node>) -> eyre::Result<()>; } impl ExtendRpcModules for F where - F: Fn(RpcContext<'_, Node>) -> eyre::Result<()> + Send, + F: FnOnce(RpcContext<'_, Node>) -> eyre::Result<()> + Send, Node: FullNodeComponents, { - fn extend_rpc_modules(&self, ctx: RpcContext<'_, Node>) -> eyre::Result<()> { - self(ctx) + fn extend_rpc_modules(self: Box, ctx: RpcContext<'_, Node>) -> eyre::Result<()> { + (*self)(ctx) } } impl ExtendRpcModules for () { - fn extend_rpc_modules(&self, _: RpcContext<'_, Node>) -> eyre::Result<()> { + fn extend_rpc_modules(self: Box, _: RpcContext<'_, Node>) -> eyre::Result<()> { Ok(()) } } @@ -270,7 +274,7 @@ where .with_network(node.network().clone()) .with_events(node.provider().clone()) .with_executor(node.task_executor().clone()) - .with_evm_config(node.evm_config()) + .with_evm_config(node.evm_config().clone()) .build_with_auth_server(module_config, engine_api); let mut registry = RpcRegistry { registry }; @@ -301,7 +305,6 @@ where let launch_auth = auth_module.clone().start_server(auth_config).map_ok(|handle| { let addr = handle.local_addr(); if let Some(ipc_endpoint) = handle.ipc_endpoint() { - let ipc_endpoint = ipc_endpoint.path(); info!(target: "reth::cli", url=%addr, ipc_endpoint=%ipc_endpoint,"RPC auth server started"); } else { info!(target: "reth::cli", url=%addr, "RPC auth server started"); diff --git a/crates/node-builder/src/setup.rs b/crates/node/builder/src/setup.rs similarity index 83% rename from crates/node-builder/src/setup.rs rename to crates/node/builder/src/setup.rs index bb67cad66..8033ab1c6 100644 --- a/crates/node-builder/src/setup.rs +++ b/crates/node/builder/src/setup.rs @@ -1,26 +1,23 @@ //! Helpers for setting up parts of the node. -use crate::ConfigureEvm; use reth_config::{config::StageConfig, PruneConfig}; +use reth_consensus::Consensus; use reth_db::database::Database; use reth_downloaders::{ bodies::bodies::BodiesDownloaderBuilder, headers::reverse_headers::ReverseHeadersDownloaderBuilder, }; +use reth_evm::execute::BlockExecutorProvider; use reth_exex::ExExManagerHandle; -use reth_interfaces::{ - consensus::Consensus, - p2p::{ - bodies::{client::BodiesClient, downloader::BodyDownloader}, - headers::{client::HeadersClient, downloader::HeaderDownloader}, - }, +use reth_interfaces::p2p::{ + bodies::{client::BodiesClient, downloader::BodyDownloader}, + headers::{client::HeadersClient, downloader::HeaderDownloader}, }; use reth_node_core::{ node_config::NodeConfig, primitives::{BlockNumber, B256}, }; use reth_provider::{HeaderSyncMode, ProviderFactory}; -use reth_revm::stack::{Hook, InspectorStackConfig}; use reth_stages::{ prelude::DefaultStages, stages::{ @@ -38,7 +35,7 @@ use tokio::sync::watch; /// Constructs a [Pipeline] that's wired to the network #[allow(clippy::too_many_arguments)] -pub async fn build_networked_pipeline( +pub async fn build_networked_pipeline( node_config: &NodeConfig, config: &StageConfig, client: Client, @@ -49,13 +46,13 @@ pub async fn build_networked_pipeline( prune_config: Option, max_block: Option, static_file_producer: StaticFileProducer, - evm_config: EvmConfig, + executor: Executor, exex_manager_handle: ExExManagerHandle, ) -> eyre::Result> where DB: Database + Unpin + Clone + 'static, Client: HeadersClient + BodiesClient + Clone + 'static, - EvmConfig: ConfigureEvm + Clone + 'static, + Executor: BlockExecutorProvider, { // building network downloaders using the fetch client let header_downloader = ReverseHeadersDownloaderBuilder::new(config.headers) @@ -77,7 +74,7 @@ where metrics_tx, prune_config, static_file_producer, - evm_config, + executor, exex_manager_handle, ) .await?; @@ -87,7 +84,7 @@ where /// Builds the [Pipeline] with the given [ProviderFactory] and downloaders. #[allow(clippy::too_many_arguments)] -pub async fn build_pipeline( +pub async fn build_pipeline( node_config: &NodeConfig, provider_factory: ProviderFactory, stage_config: &StageConfig, @@ -98,14 +95,14 @@ pub async fn build_pipeline( metrics_tx: reth_stages::MetricEventsSender, prune_config: Option, static_file_producer: StaticFileProducer, - evm_config: EvmConfig, + executor: Executor, exex_manager_handle: ExExManagerHandle, ) -> eyre::Result> where DB: Database + Clone + 'static, H: HeaderDownloader + 'static, B: BodyDownloader + 'static, - EvmConfig: ConfigureEvm + Clone + 'static, + Executor: BlockExecutorProvider, { let mut builder = Pipeline::builder(); @@ -115,22 +112,6 @@ where } let (tip_tx, tip_rx) = watch::channel(B256::ZERO); - let factory = reth_revm::EvmProcessorFactory::new(node_config.chain.clone(), evm_config); - - let stack_config = InspectorStackConfig { - use_printer_tracer: node_config.debug.print_inspector, - hook: if let Some(hook_block) = node_config.debug.hook_block { - Hook::Block(hook_block) - } else if let Some(tx) = node_config.debug.hook_transaction { - Hook::Transaction(tx) - } else if node_config.debug.hook_all { - Hook::All - } else { - Hook::None - }, - }; - - let factory = factory.with_stack_config(stack_config); let prune_modes = prune_config.map(|prune| prune.segments).unwrap_or_default(); @@ -149,7 +130,7 @@ where Arc::clone(&consensus), header_downloader, body_downloader, - factory.clone(), + executor.clone(), stage_config.etl.clone(), ) .set(SenderRecoveryStage { @@ -157,7 +138,7 @@ where }) .set( ExecutionStage::new( - factory, + executor, ExecutionStageThresholds { max_blocks: stage_config.execution.max_blocks, max_changes: stage_config.execution.max_changes, diff --git a/crates/node/events/Cargo.toml b/crates/node/events/Cargo.toml index 9c66f1469..83f2bd13a 100644 --- a/crates/node/events/Cargo.toml +++ b/crates/node/events/Cargo.toml @@ -18,9 +18,9 @@ reth-network-api.workspace = true reth-stages.workspace = true reth-prune.workspace = true reth-static-file.workspace = true -reth-interfaces.workspace = true reth-db.workspace = true reth-primitives.workspace = true +reth-rpc-types.workspace = true # async tokio.workspace = true diff --git a/crates/node/events/src/node.rs b/crates/node/events/src/node.rs index b18cc5f0b..ba7ae8da4 100644 --- a/crates/node/events/src/node.rs +++ b/crates/node/events/src/node.rs @@ -6,7 +6,6 @@ use reth_beacon_consensus::{ BeaconConsensusEngineEvent, ConsensusEngineLiveSyncProgress, ForkchoiceStatus, }; use reth_db::{database::Database, database_metrics::DatabaseMetadata}; -use reth_interfaces::consensus::ForkchoiceState; use reth_network::{NetworkEvent, NetworkHandle}; use reth_network_api::PeersInfo; use reth_primitives::{ @@ -15,6 +14,7 @@ use reth_primitives::{ BlockNumber, B256, }; use reth_prune::PrunerEvent; +use reth_rpc_types::engine::ForkchoiceState; use reth_stages::{ExecOutput, PipelineEvent}; use reth_static_file::StaticFileProducerEvent; use std::{ @@ -217,11 +217,22 @@ impl NodeState { self.current_stage = None; } } + PipelineEvent::Unwind { stage_id, input } => { + let current_stage = CurrentStage { + stage_id, + eta: Eta::default(), + checkpoint: input.checkpoint, + target: Some(input.unwind_to), + entities_checkpoint: input.checkpoint.entities(), + }; + + self.current_stage = Some(current_stage); + } _ => (), } } - fn handle_network_event(&mut self, _: NetworkEvent) { + fn handle_network_event(&self, _: NetworkEvent) { // NOTE(onbjerg): This used to log established/disconnecting sessions, but this is already // logged in the networking component. I kept this stub in case we want to catch other // networking events later on. diff --git a/crates/optimism/consensus/Cargo.toml b/crates/optimism/consensus/Cargo.toml new file mode 100644 index 000000000..4ebbaa8d8 --- /dev/null +++ b/crates/optimism/consensus/Cargo.toml @@ -0,0 +1,23 @@ +[package] +name = "reth-optimism-consensus" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true +exclude.workspace = true + +[lints] +workspace = true + +[dependencies] +# reth +reth-consensus-common.workspace = true +reth-primitives.workspace = true +reth-consensus.workspace = true + +[features] +optimism = [ + "reth-primitives/optimism", +] \ No newline at end of file diff --git a/crates/optimism/consensus/src/lib.rs b/crates/optimism/consensus/src/lib.rs new file mode 100644 index 000000000..4deea2879 --- /dev/null +++ b/crates/optimism/consensus/src/lib.rs @@ -0,0 +1,102 @@ +//! Optimism Consensus implementation. + +#![doc( + html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", + html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", + issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" +)] +#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +// The `optimism` feature must be enabled to use this crate. +#![cfg(feature = "optimism")] + +use reth_consensus::{Consensus, ConsensusError}; +use reth_consensus_common::{validation, validation::validate_header_extradata}; +use reth_primitives::{ChainSpec, Header, SealedBlock, SealedHeader, EMPTY_OMMER_ROOT_HASH, U256}; +use std::{sync::Arc, time::SystemTime}; + +/// Optimism consensus implementation. +/// +/// Provides basic checks as outlined in the execution specs. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct OptimismBeaconConsensus { + /// Configuration + chain_spec: Arc, +} + +impl OptimismBeaconConsensus { + /// Create a new instance of [OptimismBeaconConsensus] + /// + /// # Panics + /// + /// If given chain spec is not optimism [ChainSpec::is_optimism] + pub fn new(chain_spec: Arc) -> Self { + assert!(chain_spec.is_optimism(), "optimism consensus only valid for optimism chains"); + Self { chain_spec } + } +} + +impl Consensus for OptimismBeaconConsensus { + fn validate_header(&self, header: &SealedHeader) -> Result<(), ConsensusError> { + validation::validate_header_standalone(header, &self.chain_spec)?; + Ok(()) + } + + fn validate_header_against_parent( + &self, + header: &SealedHeader, + parent: &SealedHeader, + ) -> Result<(), ConsensusError> { + header.validate_against_parent(parent, &self.chain_spec).map_err(ConsensusError::from)?; + Ok(()) + } + + fn validate_header_with_total_difficulty( + &self, + header: &Header, + _total_difficulty: U256, + ) -> Result<(), ConsensusError> { + // with OP-stack Bedrock activation number determines when TTD (eth Merge) has been reached. + let is_post_merge = self.chain_spec.is_bedrock_active_at_block(header.number); + + if is_post_merge { + if header.nonce != 0 { + return Err(ConsensusError::TheMergeNonceIsNotZero) + } + + if header.ommers_hash != EMPTY_OMMER_ROOT_HASH { + return Err(ConsensusError::TheMergeOmmerRootIsNotEmpty) + } + + // Post-merge, the consensus layer is expected to perform checks such that the block + // timestamp is a function of the slot. This is different from pre-merge, where blocks + // are only allowed to be in the future (compared to the system's clock) by a certain + // threshold. + // + // Block validation with respect to the parent should ensure that the block timestamp + // is greater than its parent timestamp. + + // validate header extradata for all networks post merge + validate_header_extradata(header)?; + + // mixHash is used instead of difficulty inside EVM + // https://eips.ethereum.org/EIPS/eip-4399#using-mixhash-field-instead-of-difficulty + } else { + // Check if timestamp is in the future. Clock can drift but this can be consensus issue. + let present_timestamp = + SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap().as_secs(); + + if header.exceeds_allowed_future_timestamp(present_timestamp) { + return Err(ConsensusError::TimestampIsInFuture { + timestamp: header.timestamp, + present_timestamp, + }) + } + } + + Ok(()) + } + + fn validate_block(&self, block: &SealedBlock) -> Result<(), ConsensusError> { + validation::validate_block_standalone(block, &self.chain_spec) + } +} diff --git a/crates/optimism/evm/Cargo.toml b/crates/optimism/evm/Cargo.toml new file mode 100644 index 000000000..a1c3a168b --- /dev/null +++ b/crates/optimism/evm/Cargo.toml @@ -0,0 +1,38 @@ +[package] +name = "reth-evm-optimism" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true + +[lints] +workspace = true + +[dependencies] +# Reth +reth-evm.workspace = true +reth-primitives.workspace = true +reth-revm.workspace = true +reth-interfaces.workspace = true +reth-provider.workspace = true + +# Optimism +revm.workspace = true +revm-primitives.workspace = true + +# misc +thiserror.workspace = true +tracing.workspace = true + +[dev-dependencies] +reth-revm = { workspace = true, features = ["test-utils"] } + +[features] +optimism = [ + "reth-primitives/optimism", + "reth-provider/optimism", + "reth-interfaces/optimism", + "revm-primitives/optimism", +] diff --git a/crates/optimism/evm/src/error.rs b/crates/optimism/evm/src/error.rs new file mode 100644 index 000000000..de923d44c --- /dev/null +++ b/crates/optimism/evm/src/error.rs @@ -0,0 +1,29 @@ +//! Error types for the Optimism EVM module. + +use reth_interfaces::executor::BlockExecutionError; + +/// Optimism Block Executor Errors +#[derive(thiserror::Error, Debug, Clone, PartialEq, Eq)] +pub enum OptimismBlockExecutionError { + /// Error when trying to parse L1 block info + #[error("could not get L1 block info from L2 block: {message:?}")] + L1BlockInfoError { + /// The inner error message + message: String, + }, + /// Thrown when force deploy of create2deployer code fails. + #[error("failed to force create2deployer account code")] + ForceCreate2DeployerFail, + /// Thrown when a blob transaction is included in a sequencer's block. + #[error("blob transaction included in sequencer block")] + BlobTransactionRejected, + /// Thrown when a database account could not be loaded. + #[error("failed to load account {0}")] + AccountLoadFailed(reth_primitives::Address), +} + +impl From for BlockExecutionError { + fn from(err: OptimismBlockExecutionError) -> Self { + BlockExecutionError::other(err) + } +} diff --git a/crates/optimism/node/src/evm/execute.rs b/crates/optimism/evm/src/execute.rs similarity index 77% rename from crates/optimism/node/src/evm/execute.rs rename to crates/optimism/evm/src/execute.rs index cca13fb7d..f729ceda1 100644 --- a/crates/optimism/node/src/evm/execute.rs +++ b/crates/optimism/evm/src/execute.rs @@ -1,29 +1,27 @@ //! Optimism block executor. -use crate::OptimismEvmConfig; +use crate::{ + l1::ensure_create2_deployer, verify::verify_receipts, OptimismBlockExecutionError, + OptimismEvmConfig, +}; use reth_evm::{ execute::{ - BatchBlockOutput, BatchExecutor, EthBlockExecutionInput, EthBlockOutput, Executor, - ExecutorProvider, + BatchBlockExecutionOutput, BatchExecutor, BlockExecutionInput, BlockExecutionOutput, + BlockExecutorProvider, Executor, }, - ConfigureEvm, ConfigureEvmEnv, + ConfigureEvm, }; use reth_interfaces::{ - executor::{BlockExecutionError, BlockValidationError, OptimismBlockExecutionError}, + executor::{BlockExecutionError, BlockValidationError}, provider::ProviderError, }; use reth_primitives::{ - proofs::calculate_receipt_root_optimism, BlockWithSenders, Bloom, Bytes, ChainSpec, - GotExpected, Hardfork, Header, PruneModes, Receipt, ReceiptWithBloom, Receipts, TxType, - Withdrawals, B256, U256, + BlockNumber, BlockWithSenders, ChainSpec, GotExpected, Hardfork, Header, PruneModes, Receipt, + Receipts, TxType, Withdrawals, U256, }; -use reth_provider::BundleStateWithReceipts; use reth_revm::{ batch::{BlockBatchRecord, BlockExecutorStats}, db::states::bundle_state::BundleRetention, - optimism::ensure_create2_deployer, - processor::compare_receipts_root_and_logs_bloom, - stack::InspectorStack, state_change::{apply_beacon_root_contract_call, post_block_balance_increments}, Evm, State, }; @@ -36,14 +34,12 @@ use tracing::{debug, trace}; /// Provides executors to execute regular ethereum blocks #[derive(Debug, Clone)] -pub struct OpExecutorProvider { +pub struct OpExecutorProvider { chain_spec: Arc, evm_config: EvmConfig, - inspector: Option, - prune_modes: PruneModes, } -impl OpExecutorProvider { +impl OpExecutorProvider { /// Creates a new default optimism executor provider. pub fn optimism(chain_spec: Arc) -> Self { Self::new(chain_spec, Default::default()) @@ -53,26 +49,13 @@ impl OpExecutorProvider { impl OpExecutorProvider { /// Creates a new executor provider. pub fn new(chain_spec: Arc, evm_config: EvmConfig) -> Self { - Self { chain_spec, evm_config, inspector: None, prune_modes: PruneModes::none() } - } - - /// Configures an optional inspector stack for debugging. - pub fn with_inspector(mut self, inspector: Option) -> Self { - self.inspector = inspector; - self - } - - /// Configures the prune modes for the executor. - pub fn with_prune_modes(mut self, prune_modes: PruneModes) -> Self { - self.prune_modes = prune_modes; - self + Self { chain_spec, evm_config } } } impl OpExecutorProvider where EvmConfig: ConfigureEvm, - EvmConfig: ConfigureEvmEnv, { fn op_executor(&self, db: DB) -> OpBlockExecutor where @@ -83,14 +66,12 @@ where self.evm_config.clone(), State::builder().with_database(db).with_bundle_update().without_state_clear().build(), ) - .with_inspector(self.inspector.clone()) } } -impl ExecutorProvider for OpExecutorProvider +impl BlockExecutorProvider for OpExecutorProvider where EvmConfig: ConfigureEvm, - EvmConfig: ConfigureEvmEnv, { type Executor> = OpBlockExecutor; @@ -102,14 +83,14 @@ where self.op_executor(db) } - fn batch_executor(&self, db: DB) -> Self::BatchExecutor + fn batch_executor(&self, db: DB, prune_modes: PruneModes) -> Self::BatchExecutor where DB: Database, { let executor = self.op_executor(db); OpBatchExecutor { executor, - batch_record: BlockBatchRecord::new(self.prune_modes.clone()), + batch_record: BlockBatchRecord::new(prune_modes), stats: BlockExecutorStats::default(), } } @@ -127,7 +108,6 @@ struct OpEvmExecutor { impl OpEvmExecutor where EvmConfig: ConfigureEvm, - EvmConfig: ConfigureEvmEnv, { /// Executes the transactions in the block and returns the receipts. /// @@ -137,7 +117,7 @@ where /// /// It does __not__ apply post-execution changes. fn execute_pre_and_transactions( - &mut self, + &self, block: &BlockWithSenders, mut evm: Evm<'_, Ext, &mut State>, ) -> Result<(Vec, u64), BlockExecutionError> @@ -161,13 +141,8 @@ where // blocks will always have at least a single transaction in them (the L1 info transaction), // so we can safely assume that this will always be triggered upon the transition and that // the above check for empty blocks will never be hit on OP chains. - ensure_create2_deployer(self.chain_spec.clone(), block.timestamp, evm.db_mut()).map_err( - |_| { - BlockExecutionError::OptimismBlockExecution( - OptimismBlockExecutionError::ForceCreate2DeployerFail, - ) - }, - )?; + ensure_create2_deployer(self.chain_spec.clone(), block.timestamp, evm.db_mut()) + .map_err(|_| OptimismBlockExecutionError::ForceCreate2DeployerFail)?; let mut cumulative_gas_used = 0; let mut receipts = Vec::with_capacity(block.body.len()); @@ -182,14 +157,12 @@ where transaction_gas_limit: transaction.gas_limit(), block_available_gas, } - .into()) + .into()); } // An optimism block should never contain blob transactions. if matches!(transaction.tx_type(), TxType::Eip4844) { - return Err(BlockExecutionError::OptimismBlockExecution( - OptimismBlockExecutionError::BlobTransactionRejected, - )) + return Err(OptimismBlockExecutionError::BlobTransactionRejected.into()); } // Cache the depositor account prior to the state transition for the deposit nonce. @@ -204,15 +177,9 @@ where .map(|acc| acc.account_info().unwrap_or_default()) }) .transpose() - .map_err(|_| { - BlockExecutionError::OptimismBlockExecution( - OptimismBlockExecutionError::AccountLoadFailed(*sender), - ) - })?; + .map_err(|_| OptimismBlockExecutionError::AccountLoadFailed(*sender))?; - let mut buf = Vec::with_capacity(transaction.length_without_header()); - transaction.encode_enveloped(&mut buf); - EvmConfig::fill_tx_env(evm.tx_mut(), transaction, *sender, buf.into()); + EvmConfig::fill_tx_env(evm.tx_mut(), transaction, *sender); // Execute transaction. let ResultAndState { result, state } = evm.transact().map_err(move |err| { @@ -261,7 +228,7 @@ where gas: GotExpected { got: cumulative_gas_used, expected: block.gas_used }, gas_spent_by_tx: receipts.gas_spent_by_tx()?, } - .into()) + .into()); } Ok((receipts, cumulative_gas_used)) @@ -279,20 +246,12 @@ pub struct OpBlockExecutor { executor: OpEvmExecutor, /// The state to use for execution state: State, - /// Optional inspector stack for debugging - inspector: Option, } impl OpBlockExecutor { /// Creates a new Ethereum block executor. pub fn new(chain_spec: Arc, evm_config: EvmConfig, state: State) -> Self { - Self { executor: OpEvmExecutor { chain_spec, evm_config }, state, inspector: None } - } - - /// Sets the inspector stack for debugging. - pub fn with_inspector(mut self, inspector: Option) -> Self { - self.inspector = inspector; - self + Self { executor: OpEvmExecutor { chain_spec, evm_config }, state } } #[inline] @@ -310,8 +269,6 @@ impl OpBlockExecutor { impl OpBlockExecutor where EvmConfig: ConfigureEvm, - // TODO(mattsse): get rid of this - EvmConfig: ConfigureEvmEnv, DB: Database, { /// Configures a new evm configuration and block environment for the given block. @@ -348,19 +305,9 @@ where let env = self.evm_env_for_block(&block.header, total_difficulty); let (receipts, gas_used) = { - if let Some(inspector) = self.inspector.as_mut() { - let evm = self.executor.evm_config.evm_with_env_and_inspector( - &mut self.state, - env, - inspector, - ); - self.executor.execute_pre_and_transactions(block, evm)? - } else { - let evm = self.executor.evm_config.evm_with_env(&mut self.state, env); - - self.executor.execute_pre_and_transactions(block, evm)? - } - }; + let evm = self.executor.evm_config.evm_with_env(&mut self.state, env); + self.executor.execute_pre_and_transactions(block, evm) + }?; // 3. apply post execution changes self.post_execution(block, total_difficulty)?; @@ -370,7 +317,7 @@ where // transaction This was replaced with is_success flag. // See more about EIP here: https://eips.ethereum.org/EIPS/eip-658 if self.chain_spec().is_byzantium_active_at_block(block.header.number) { - if let Err(error) = verify_receipt_optimism( + if let Err(error) = verify_receipts( block.header.receipts_root, block.header.logs_bloom, receipts.iter(), @@ -378,7 +325,7 @@ where block.timestamp, ) { debug!(target: "evm", %error, ?receipts, "receipts verification failed"); - return Err(error) + return Err(error); }; } @@ -421,11 +368,10 @@ where impl Executor for OpBlockExecutor where EvmConfig: ConfigureEvm, - EvmConfig: ConfigureEvmEnv, DB: Database, { - type Input<'a> = EthBlockExecutionInput<'a, BlockWithSenders>; - type Output = EthBlockOutput; + type Input<'a> = BlockExecutionInput<'a, BlockWithSenders>; + type Output = BlockExecutionOutput; type Error = BlockExecutionError; /// Executes the block and commits the state changes. @@ -436,13 +382,13 @@ where /// /// State changes are committed to the database. fn execute(mut self, input: Self::Input<'_>) -> Result { - let EthBlockExecutionInput { block, total_difficulty } = input; + let BlockExecutionInput { block, total_difficulty } = input; let (receipts, gas_used) = self.execute_and_verify(block, total_difficulty)?; - // prepare the state for extraction - self.state.merge_transitions(BundleRetention::PlainState); + // NOTE: we need to merge keep the reverts for the bundle retention + self.state.merge_transitions(BundleRetention::Reverts); - Ok(EthBlockOutput { state: self.state.take_bundle(), receipts, gas_used }) + Ok(BlockExecutionOutput { state: self.state.take_bundle(), receipts, gas_used }) } } @@ -474,16 +420,14 @@ impl OpBatchExecutor { impl BatchExecutor for OpBatchExecutor where EvmConfig: ConfigureEvm, - // TODO: get rid of this - EvmConfig: ConfigureEvmEnv, DB: Database, { - type Input<'a> = EthBlockExecutionInput<'a, BlockWithSenders>; - type Output = BundleStateWithReceipts; + type Input<'a> = BlockExecutionInput<'a, BlockWithSenders>; + type Output = BatchBlockExecutionOutput; type Error = BlockExecutionError; - fn execute_one(&mut self, input: Self::Input<'_>) -> Result { - let EthBlockExecutionInput { block, total_difficulty } = input; + fn execute_one(&mut self, input: Self::Input<'_>) -> Result<(), Self::Error> { + let BlockExecutionInput { block, total_difficulty } = input; let (receipts, _gas_used) = self.executor.execute_and_verify(block, total_difficulty)?; // prepare the state according to the prune mode @@ -493,45 +437,30 @@ where // store receipts in the set self.batch_record.save_receipts(receipts)?; - Ok(BatchBlockOutput { size_hint: Some(self.executor.state.bundle_size_hint()) }) + if self.batch_record.first_block().is_none() { + self.batch_record.set_first_block(block.number); + } + + Ok(()) } fn finalize(mut self) -> Self::Output { - // TODO: track stats self.stats.log_debug(); - BundleStateWithReceipts::new( + BatchBlockExecutionOutput::new( self.executor.state.take_bundle(), self.batch_record.take_receipts(), self.batch_record.first_block().unwrap_or_default(), ) } -} -/// Verify the calculated receipts root against the expected receipts root. -pub fn verify_receipt_optimism<'a>( - expected_receipts_root: B256, - expected_logs_bloom: Bloom, - receipts: impl Iterator + Clone, - chain_spec: &ChainSpec, - timestamp: u64, -) -> Result<(), BlockExecutionError> { - // Calculate receipts root. - let receipts_with_bloom = receipts.map(|r| r.clone().into()).collect::>(); - let receipts_root = - calculate_receipt_root_optimism(&receipts_with_bloom, chain_spec, timestamp); - - // Create header log bloom. - let logs_bloom = receipts_with_bloom.iter().fold(Bloom::ZERO, |bloom, r| bloom | r.bloom); - - compare_receipts_root_and_logs_bloom( - receipts_root, - logs_bloom, - expected_receipts_root, - expected_logs_bloom, - )?; - - Ok(()) + fn set_tip(&mut self, tip: BlockNumber) { + self.batch_record.set_tip(tip); + } + + fn size_hint(&self) -> Option { + Some(self.executor.state.bundle_state.size_hint()) + } } #[cfg(test)] @@ -539,15 +468,13 @@ mod tests { use super::*; use reth_primitives::{ b256, Account, Address, Block, ChainSpecBuilder, Signature, StorageKey, StorageValue, - Transaction, TransactionKind, TransactionSigned, TxEip1559, BASE_MAINNET, + Transaction, TransactionSigned, TxEip1559, BASE_MAINNET, + }; + use reth_revm::{ + database::StateProviderDatabase, test_utils::StateProviderTest, L1_BLOCK_CONTRACT, }; - use reth_revm::database::StateProviderDatabase; - use revm::L1_BLOCK_CONTRACT; use std::{collections::HashMap, str::FromStr}; - use crate::OptimismEvmConfig; - use reth_revm::test_utils::StateProviderTest; - fn create_op_state_provider() -> StateProviderTest { let mut db = StateProviderTest::default(); @@ -576,12 +503,7 @@ mod tests { } fn executor_provider(chain_spec: Arc) -> OpExecutorProvider { - OpExecutorProvider { - chain_spec, - evm_config: Default::default(), - inspector: None, - prune_modes: Default::default(), - } + OpExecutorProvider { chain_spec, evm_config: Default::default() } } #[test] @@ -611,7 +533,7 @@ mod tests { chain_id: chain_spec.chain.id(), nonce: 0, gas_limit: 21_000, - to: TransactionKind::Call(addr), + to: addr.into(), ..Default::default() }), Signature::default(), @@ -620,7 +542,7 @@ mod tests { let tx_deposit = TransactionSigned::from_transaction_and_signature( Transaction::Deposit(reth_primitives::TxDeposit { from: addr, - to: TransactionKind::Call(addr), + to: addr.into(), gas_limit: 21_000, ..Default::default() }), @@ -628,7 +550,8 @@ mod tests { ); let provider = executor_provider(chain_spec); - let mut executor = provider.batch_executor(StateProviderDatabase::new(&db)); + let mut executor = + provider.batch_executor(StateProviderDatabase::new(&db), PruneModes::none()); executor.state_mut().load_cache_account(L1_BLOCK_CONTRACT).unwrap(); @@ -691,7 +614,7 @@ mod tests { chain_id: chain_spec.chain.id(), nonce: 0, gas_limit: 21_000, - to: TransactionKind::Call(addr), + to: addr.into(), ..Default::default() }), Signature::default(), @@ -700,7 +623,7 @@ mod tests { let tx_deposit = TransactionSigned::from_transaction_and_signature( Transaction::Deposit(reth_primitives::TxDeposit { from: addr, - to: TransactionKind::Call(addr), + to: addr.into(), gas_limit: 21_000, ..Default::default() }), @@ -708,7 +631,8 @@ mod tests { ); let provider = executor_provider(chain_spec); - let mut executor = provider.batch_executor(StateProviderDatabase::new(&db)); + let mut executor = + provider.batch_executor(StateProviderDatabase::new(&db), PruneModes::none()); executor.state_mut().load_cache_account(L1_BLOCK_CONTRACT).unwrap(); diff --git a/crates/revm/src/optimism/mod.rs b/crates/optimism/evm/src/l1.rs similarity index 76% rename from crates/revm/src/optimism/mod.rs rename to crates/optimism/evm/src/l1.rs index 470e7a914..7b605448f 100644 --- a/crates/revm/src/optimism/mod.rs +++ b/crates/optimism/evm/src/l1.rs @@ -1,7 +1,7 @@ -use reth_interfaces::{ - executor::{self as reth_executor, BlockExecutionError}, - RethError, -}; +//! Optimism-specific implementation and utilities for the executor + +use crate::OptimismBlockExecutionError; +use reth_interfaces::{executor::BlockExecutionError, RethError}; use reth_primitives::{address, b256, hex, Address, Block, Bytes, ChainSpec, Hardfork, B256, U256}; use revm::{ primitives::{Bytecode, HashMap, SpecId}, @@ -10,14 +10,13 @@ use revm::{ use std::sync::Arc; use tracing::trace; -/// Optimism-specific processor implementation for the `EVMProcessor` -pub mod processor; - /// The address of the create2 deployer const CREATE_2_DEPLOYER_ADDR: Address = address!("13b0D85CcB8bf860b6b79AF3029fCA081AE9beF2"); + /// The codehash of the create2 deployer contract. const CREATE_2_DEPLOYER_CODEHASH: B256 = b256!("b0550b5b431e30d38000efb7107aaa0ade03d48a7198a140edda9d27134468b2"); + /// The raw bytecode of the create2 deployer contract. const CREATE_2_DEPLOYER_BYTECODE: [u8; 1584] = hex!("6080604052600436106100435760003560e01c8063076c37b21461004f578063481286e61461007157806356299481146100ba57806366cfa057146100da57600080fd5b3661004a57005b600080fd5b34801561005b57600080fd5b5061006f61006a366004610327565b6100fa565b005b34801561007d57600080fd5b5061009161008c366004610327565b61014a565b60405173ffffffffffffffffffffffffffffffffffffffff909116815260200160405180910390f35b3480156100c657600080fd5b506100916100d5366004610349565b61015d565b3480156100e657600080fd5b5061006f6100f53660046103ca565b610172565b61014582826040518060200161010f9061031a565b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe082820381018352601f90910116604052610183565b505050565b600061015683836102e7565b9392505050565b600061016a8484846102f0565b949350505050565b61017d838383610183565b50505050565b6000834710156101f4576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601d60248201527f437265617465323a20696e73756666696369656e742062616c616e636500000060448201526064015b60405180910390fd5b815160000361025f576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820181905260248201527f437265617465323a2062797465636f6465206c656e677468206973207a65726f60448201526064016101eb565b8282516020840186f5905073ffffffffffffffffffffffffffffffffffffffff8116610156576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601960248201527f437265617465323a204661696c6564206f6e206465706c6f790000000000000060448201526064016101eb565b60006101568383305b6000604051836040820152846020820152828152600b8101905060ff815360559020949350505050565b61014e806104ad83390190565b6000806040838503121561033a57600080fd5b50508035926020909101359150565b60008060006060848603121561035e57600080fd5b8335925060208401359150604084013573ffffffffffffffffffffffffffffffffffffffff8116811461039057600080fd5b809150509250925092565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b6000806000606084860312156103df57600080fd5b8335925060208401359150604084013567ffffffffffffffff8082111561040557600080fd5b818601915086601f83011261041957600080fd5b81358181111561042b5761042b61039b565b604051601f82017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0908116603f011681019083821181831017156104715761047161039b565b8160405282815289602084870101111561048a57600080fd5b826020860160208301376000602084830101528095505050505050925092509256fe608060405234801561001057600080fd5b5061012e806100206000396000f3fe6080604052348015600f57600080fd5b506004361060285760003560e01c8063249cb3fa14602d575b600080fd5b603c603836600460b1565b604e565b60405190815260200160405180910390f35b60008281526020818152604080832073ffffffffffffffffffffffffffffffffffffffff8516845290915281205460ff16608857600060aa565b7fa2ef4600d742022d532d4747cb3547474667d6f13804902513b2ec01c848f4b45b9392505050565b6000806040838503121560c357600080fd5b82359150602083013573ffffffffffffffffffffffffffffffffffffffff8116811460ed57600080fd5b80915050925092905056fea26469706673582212205ffd4e6cede7d06a5daf93d48d0541fc68189eeb16608c1999a82063b666eb1164736f6c63430008130033a2646970667358221220fdc4a0fe96e3b21c108ca155438d37c9143fb01278a3c1d274948bad89c564ba64736f6c63430008130033"); @@ -28,17 +27,21 @@ const L1_BLOCK_ECOTONE_SELECTOR: [u8; 4] = hex!("440a5e20"); /// transaction in the L2 block. /// /// Returns an error if the L1 info transaction is not found, if the block is empty. -pub fn extract_l1_info(block: &Block) -> Result { +pub fn extract_l1_info(block: &Block) -> Result { let l1_info_tx_data = block .body .first() - .ok_or(reth_executor::BlockExecutionError::OptimismBlockExecution( - reth_executor::OptimismBlockExecutionError::L1BlockInfoError { - message: "could not find l1 block info tx in the L2 block".to_string(), - }, - )) + .ok_or(OptimismBlockExecutionError::L1BlockInfoError { + message: "could not find l1 block info tx in the L2 block".to_string(), + }) .map(|tx| tx.input())?; + if l1_info_tx_data.len() < 4 { + return Err(OptimismBlockExecutionError::L1BlockInfoError { + message: "invalid l1 block info transaction calldata in the L2 block".to_string(), + }) + } + // If the first 4 bytes of the calldata are the L1BlockInfoEcotone selector, then we parse the // calldata as an Ecotone hardfork L1BlockInfo transaction. Otherwise, we parse it as a // Bedrock hardfork L1BlockInfo transaction. @@ -50,7 +53,7 @@ pub fn extract_l1_info(block: &Block) -> Result Result { +pub fn parse_l1_info_tx_bedrock(data: &[u8]) -> Result { // The setL1BlockValues tx calldata must be exactly 260 bytes long, considering that // we already removed the first 4 bytes (the function selector). Detailed breakdown: // 32 bytes for the block number @@ -62,33 +65,25 @@ pub fn parse_l1_info_tx_bedrock(data: &[u8]) -> Result Result Result { - // The setL1BlockValuesEcotone tx calldata must be exactly 160 bytes long, considering that - // we already removed the first 4 bytes (the function selector). Detailed breakdown: - // 8 bytes for the block sequence number - // + 4 bytes for the blob base fee scalar - // + 4 bytes for the base fee scalar - // + 8 bytes for the block number - // + 8 bytes for the block timestamp - // + 32 bytes for the base fee - // + 32 bytes for the blob base fee - // + 32 bytes for the block hash - // + 32 bytes for the batcher hash +/// +/// This will fail if the call data is not exactly 160 bytes long: +/// +/// The `setL1BlockValuesEcotone` tx calldata must be exactly 160 bytes long, considering that +/// we already removed the first 4 bytes (the function selector). Detailed breakdown: +/// 8 bytes for the block sequence number +/// + 4 bytes for the blob base fee scalar +/// + 4 bytes for the base fee scalar +/// + 8 bytes for the block number +/// + 8 bytes for the block timestamp +/// + 32 bytes for the base fee +/// + 32 bytes for the blob base fee +/// + 32 bytes for the block hash +/// + 32 bytes for the batcher hash +pub fn parse_l1_info_tx_ecotone(data: &[u8]) -> Result { if data.len() != 160 { - return Err(reth_executor::BlockExecutionError::OptimismBlockExecution( - reth_executor::OptimismBlockExecutionError::L1BlockInfoError { - message: "unexpected l1 block info tx calldata length found".to_string(), - }, - )) + return Err(OptimismBlockExecutionError::L1BlockInfoError { + message: "unexpected l1 block info tx calldata length found".to_string(), + }) } let l1_blob_base_fee_scalar = U256::try_from_be_slice(&data[8..12]).ok_or( - reth_executor::BlockExecutionError::OptimismBlockExecution( - reth_executor::OptimismBlockExecutionError::L1BlockInfoError { - message: "could not convert l1 blob base fee scalar".to_string(), - }, - ), + OptimismBlockExecutionError::L1BlockInfoError { + message: "could not convert l1 blob base fee scalar".to_string(), + }, )?; let l1_base_fee_scalar = U256::try_from_be_slice(&data[12..16]).ok_or( - reth_executor::BlockExecutionError::OptimismBlockExecution( - reth_executor::OptimismBlockExecutionError::L1BlockInfoError { - message: "could not convert l1 base fee scalar".to_string(), - }, - ), + OptimismBlockExecutionError::L1BlockInfoError { + message: "could not convert l1 base fee scalar".to_string(), + }, )?; let l1_base_fee = U256::try_from_be_slice(&data[32..64]).ok_or( - reth_executor::BlockExecutionError::OptimismBlockExecution( - reth_executor::OptimismBlockExecutionError::L1BlockInfoError { - message: "could not convert l1 blob base fee".to_string(), - }, - ), + OptimismBlockExecutionError::L1BlockInfoError { + message: "could not convert l1 blob base fee".to_string(), + }, )?; let l1_blob_base_fee = U256::try_from_be_slice(&data[64..96]).ok_or( - reth_executor::BlockExecutionError::OptimismBlockExecution( - reth_executor::OptimismBlockExecutionError::L1BlockInfoError { - message: "could not convert l1 blob base fee".to_string(), - }, - ), + OptimismBlockExecutionError::L1BlockInfoError { + message: "could not convert l1 blob base fee".to_string(), + }, )?; let mut l1block = L1BlockInfo::default(); @@ -207,11 +195,10 @@ impl RethL1BlockInfo for L1BlockInfo { } else if chain_spec.is_fork_active_at_timestamp(Hardfork::Bedrock, timestamp) { SpecId::BEDROCK } else { - return Err(reth_executor::BlockExecutionError::OptimismBlockExecution( - reth_executor::OptimismBlockExecutionError::L1BlockInfoError { - message: "Optimism hardforks are not active".to_string(), - }, - )) + return Err(OptimismBlockExecutionError::L1BlockInfoError { + message: "Optimism hardforks are not active".to_string(), + } + .into()) }; Ok(self.calculate_tx_l1_cost(input, spec_id)) } @@ -227,11 +214,10 @@ impl RethL1BlockInfo for L1BlockInfo { } else if chain_spec.is_fork_active_at_timestamp(Hardfork::Bedrock, timestamp) { SpecId::BEDROCK } else { - return Err(reth_executor::BlockExecutionError::OptimismBlockExecution( - reth_executor::OptimismBlockExecutionError::L1BlockInfoError { - message: "Optimism hardforks are not active".to_string(), - }, - )) + return Err(OptimismBlockExecutionError::L1BlockInfoError { + message: "Optimism hardforks are not active".to_string(), + } + .into()) }; Ok(self.data_gas(input, spec_id)) } @@ -280,10 +266,11 @@ where } #[cfg(test)] -mod test_l1_fee { +mod tests { + use super::*; + #[test] fn sanity_l1_block() { - use super::*; use reth_primitives::{hex_literal::hex, Bytes, Header, TransactionSigned}; let bytes = Bytes::from_static(&hex!("7ef9015aa044bae9d41b8380d781187b426c6fe43df5fb2fb57bd4466ef6a701e1f01e015694deaddeaddeaddeaddeaddeaddeaddeaddead000194420000000000000000000000000000000000001580808408f0d18001b90104015d8eb900000000000000000000000000000000000000000000000000000000008057650000000000000000000000000000000000000000000000000000000063d96d10000000000000000000000000000000000000000000000000000000000009f35273d89754a1e0387b89520d989d3be9c37c1f32495a88faf1ea05c61121ab0d1900000000000000000000000000000000000000000000000000000000000000010000000000000000000000002d679b567db6187c0c8323fa982cfb88b74dbcc7000000000000000000000000000000000000000000000000000000000000083400000000000000000000000000000000000000000000000000000000000f4240")); @@ -295,7 +282,7 @@ mod test_l1_fee { withdrawals: None, }; - let l1_info: L1BlockInfo = super::extract_l1_info(&mock_block).unwrap(); + let l1_info: L1BlockInfo = extract_l1_info(&mock_block).unwrap(); assert_eq!(l1_info.l1_base_fee, U256::from(652_114)); assert_eq!(l1_info.l1_fee_overhead, Some(U256::from(2100))); assert_eq!(l1_info.l1_base_fee_scalar, U256::from(1_000_000)); @@ -305,7 +292,6 @@ mod test_l1_fee { #[test] fn sanity_l1_block_ecotone() { - use super::*; use reth_primitives::{hex_literal::hex, Bytes, Header, TransactionSigned}; let bytes = Bytes::from_static(&hex!("7ef8f8a0b84fa363879a2159e341c50a32da3ea0d21765b7bd43db37f2e5e04e8848b1ee94deaddeaddeaddeaddeaddeaddeaddeaddead00019442000000000000000000000000000000000000158080830f424080b8a4440a5e20000f42400000000000000000000000040000000065c41f680000000000a03f6b00000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000535f4d983dea59eac60478a64ecfdcde8571e611404295350de7ed4ccb404296c1a84ab7a00000000000000000000000073b4168cc87f35cc239200a20eb841cded23493b")); @@ -317,7 +303,7 @@ mod test_l1_fee { withdrawals: None, }; - let l1_info: L1BlockInfo = super::extract_l1_info(&mock_block).unwrap(); + let l1_info: L1BlockInfo = extract_l1_info(&mock_block).unwrap(); assert_eq!(l1_info.l1_base_fee, U256::from(8)); assert_eq!(l1_info.l1_base_fee_scalar, U256::from(4)); assert_eq!(l1_info.l1_blob_base_fee, Some(U256::from(22_380_075_395u64))); diff --git a/crates/optimism/node/src/evm/mod.rs b/crates/optimism/evm/src/lib.rs similarity index 65% rename from crates/optimism/node/src/evm/mod.rs rename to crates/optimism/evm/src/lib.rs index 086253a0d..31d39fcb6 100644 --- a/crates/optimism/node/src/evm/mod.rs +++ b/crates/optimism/evm/src/lib.rs @@ -1,13 +1,30 @@ -use reth_node_api::{ConfigureEvm, ConfigureEvmEnv}; +//! EVM config for vanilla optimism. + +#![doc( + html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", + html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", + issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" +)] +#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +// The `optimism` feature must be enabled to use this crate. +#![cfg(feature = "optimism")] + +use reth_evm::{ConfigureEvm, ConfigureEvmEnv}; use reth_primitives::{ revm::{config::revm_spec, env::fill_op_tx_env}, revm_primitives::{AnalysisKind, CfgEnvWithHandlerCfg, TxEnv}, - Address, Bytes, ChainSpec, Head, Header, Transaction, U256, + Address, ChainSpec, Head, Header, TransactionSigned, U256, }; -use revm::{inspector_handle_register, Database, Evm, EvmBuilder, GetInspector}; +use reth_revm::{inspector_handle_register, Database, Evm, EvmBuilder, GetInspector}; mod execute; pub use execute::*; +pub mod l1; +pub use l1::*; + +mod error; +pub mod verify; +pub use error::OptimismBlockExecutionError; /// Optimism-related EVM configuration. #[derive(Debug, Default, Clone, Copy)] @@ -15,13 +32,10 @@ pub use execute::*; pub struct OptimismEvmConfig; impl ConfigureEvmEnv for OptimismEvmConfig { - type TxMeta = Bytes; - - fn fill_tx_env(tx_env: &mut TxEnv, transaction: T, sender: Address, meta: Bytes) - where - T: AsRef, - { - fill_op_tx_env(tx_env, transaction, sender, meta); + fn fill_tx_env(tx_env: &mut TxEnv, transaction: &TransactionSigned, sender: Address) { + let mut buf = Vec::with_capacity(transaction.length_without_header()); + transaction.encode_enveloped(&mut buf); + fill_op_tx_env(tx_env, transaction, sender, buf.into()); } fn fill_cfg_env( @@ -50,7 +64,9 @@ impl ConfigureEvmEnv for OptimismEvmConfig { } impl ConfigureEvm for OptimismEvmConfig { - fn evm<'a, DB: Database + 'a>(&self, db: DB) -> Evm<'a, (), DB> { + type DefaultExternalContext<'a> = (); + + fn evm<'a, DB: Database + 'a>(&self, db: DB) -> Evm<'a, Self::DefaultExternalContext<'a>, DB> { EvmBuilder::default().with_db(db).optimism().build() } @@ -72,7 +88,7 @@ impl ConfigureEvm for OptimismEvmConfig { mod tests { use super::*; use reth_primitives::revm_primitives::{BlockEnv, CfgEnv}; - use revm::primitives::SpecId; + use revm_primitives::SpecId; #[test] #[ignore] diff --git a/crates/optimism/evm/src/verify.rs b/crates/optimism/evm/src/verify.rs new file mode 100644 index 000000000..d96965d03 --- /dev/null +++ b/crates/optimism/evm/src/verify.rs @@ -0,0 +1,58 @@ +//! Helpers for verifying the receipts. + +use reth_interfaces::executor::{BlockExecutionError, BlockValidationError}; +use reth_primitives::{ + proofs::calculate_receipt_root_optimism, Bloom, ChainSpec, GotExpected, Receipt, + ReceiptWithBloom, B256, +}; + +/// Verify the calculated receipts root against the expected receipts root. +pub fn verify_receipts<'a>( + expected_receipts_root: B256, + expected_logs_bloom: Bloom, + receipts: impl Iterator + Clone, + chain_spec: &ChainSpec, + timestamp: u64, +) -> Result<(), BlockExecutionError> { + // Calculate receipts root. + let receipts_with_bloom = receipts.map(|r| r.clone().into()).collect::>(); + let receipts_root = + calculate_receipt_root_optimism(&receipts_with_bloom, chain_spec, timestamp); + + // Create header log bloom. + let logs_bloom = receipts_with_bloom.iter().fold(Bloom::ZERO, |bloom, r| bloom | r.bloom); + + compare_receipts_root_and_logs_bloom( + receipts_root, + logs_bloom, + expected_receipts_root, + expected_logs_bloom, + )?; + + Ok(()) +} + +/// Compare the calculated receipts root with the expected receipts root, also compare +/// the calculated logs bloom with the expected logs bloom. +pub fn compare_receipts_root_and_logs_bloom( + calculated_receipts_root: B256, + calculated_logs_bloom: Bloom, + expected_receipts_root: B256, + expected_logs_bloom: Bloom, +) -> Result<(), BlockExecutionError> { + if calculated_receipts_root != expected_receipts_root { + return Err(BlockValidationError::ReceiptRootDiff( + GotExpected { got: calculated_receipts_root, expected: expected_receipts_root }.into(), + ) + .into()) + } + + if calculated_logs_bloom != expected_logs_bloom { + return Err(BlockValidationError::BloomLogDiff( + GotExpected { got: calculated_logs_bloom, expected: expected_logs_bloom }.into(), + ) + .into()) + } + + Ok(()) +} diff --git a/crates/optimism/node/Cargo.toml b/crates/optimism/node/Cargo.toml index 8f10c00d7..9432ce9ed 100644 --- a/crates/optimism/node/Cargo.toml +++ b/crates/optimism/node/Cargo.toml @@ -28,18 +28,17 @@ reth-network.workspace = true reth-interfaces.workspace = true reth-evm.workspace = true reth-revm.workspace = true - +reth-evm-optimism.workspace = true +reth-beacon-consensus.workspace = true revm.workspace = true revm-primitives.workspace = true # async async-trait.workspace = true hyper.workspace = true -http.workspace = true -http-body.workspace = true -reqwest = { version = "0.11", default-features = false, features = [ - "rustls-tls", -]} +reqwest = { workspace = true, default-features = false, features = [ + "rustls-tls-native-roots", +] } tracing.workspace = true # misc @@ -54,7 +53,7 @@ jsonrpsee.workspace = true [dev-dependencies] reth.workspace = true reth-db.workspace = true -reth-revm = { workspace = true, features = ["test-utils"]} +reth-revm = { workspace = true, features = ["test-utils"] } reth-e2e-test-utils.workspace = true tokio.workspace = true alloy-primitives.workspace = true @@ -65,6 +64,7 @@ optimism = [ "reth-provider/optimism", "reth-rpc-types-compat/optimism", "reth-rpc/optimism", - "reth-revm/optimism", + "reth-evm-optimism/optimism", "reth-optimism-payload-builder/optimism", + "reth-beacon-consensus/optimism", ] diff --git a/crates/optimism/node/src/engine.rs b/crates/optimism/node/src/engine.rs index f5c53d98e..7382d2184 100644 --- a/crates/optimism/node/src/engine.rs +++ b/crates/optimism/node/src/engine.rs @@ -74,7 +74,7 @@ pub fn validate_withdrawals_presence( .to_error(VersionSpecificValidationError::NoWithdrawalsPostShanghai)) } } - EngineApiMessageVersion::V2 | EngineApiMessageVersion::V3 => { + EngineApiMessageVersion::V2 | EngineApiMessageVersion::V3 | EngineApiMessageVersion::V4 => { if is_shanghai && !has_withdrawals { return Err(message_validation_kind .to_error(VersionSpecificValidationError::NoWithdrawalsPostShanghai)) diff --git a/crates/optimism/node/src/lib.rs b/crates/optimism/node/src/lib.rs index e75b03890..7fc1c34b6 100644 --- a/crates/optimism/node/src/lib.rs +++ b/crates/optimism/node/src/lib.rs @@ -17,11 +17,6 @@ pub mod args; pub mod engine; pub use engine::OptimismEngineTypes; -/// Exports optimism-specific implementations of the -/// [ConfigureEvmEnv](reth_node_api::ConfigureEvmEnv) trait. -pub mod evm; -pub use evm::OptimismEvmConfig; - pub mod node; pub use node::OptimismNode; @@ -32,3 +27,5 @@ pub mod rpc; pub use reth_optimism_payload_builder::{ OptimismBuiltPayload, OptimismPayloadBuilder, OptimismPayloadBuilderAttributes, }; + +pub use reth_evm_optimism::*; diff --git a/crates/optimism/node/src/node.rs b/crates/optimism/node/src/node.rs index 0c365ccc7..7d715fece 100644 --- a/crates/optimism/node/src/node.rs +++ b/crates/optimism/node/src/node.rs @@ -3,12 +3,16 @@ use crate::{ args::RollupArgs, txpool::{OpTransactionPool, OpTransactionValidator}, - OptimismEngineTypes, OptimismEvmConfig, + OptimismEngineTypes, }; use reth_basic_payload_builder::{BasicPayloadJobGenerator, BasicPayloadJobGeneratorConfig}; +use reth_evm::ConfigureEvm; +use reth_evm_optimism::{OpExecutorProvider, OptimismEvmConfig}; use reth_network::{NetworkHandle, NetworkManager}; use reth_node_builder::{ - components::{ComponentsBuilder, NetworkBuilder, PayloadServiceBuilder, PoolBuilder}, + components::{ + ComponentsBuilder, ExecutorBuilder, NetworkBuilder, PayloadServiceBuilder, PoolBuilder, + }, node::{FullNodeTypes, NodeTypes}, BuilderContext, Node, PayloadBuilderConfig, }; @@ -37,7 +41,13 @@ impl OptimismNode { /// Returns the components for the given [RollupArgs]. pub fn components( args: RollupArgs, - ) -> ComponentsBuilder + ) -> ComponentsBuilder< + Node, + OptimismPoolBuilder, + OptimismPayloadBuilder, + OptimismNetworkBuilder, + OptimismExecutorBuilder, + > where Node: FullNodeTypes, { @@ -45,8 +55,12 @@ impl OptimismNode { ComponentsBuilder::default() .node_types::() .pool(OptimismPoolBuilder::default()) - .payload(OptimismPayloadBuilder::new(compute_pending_block)) + .payload(OptimismPayloadBuilder::new( + compute_pending_block, + OptimismEvmConfig::default(), + )) .network(OptimismNetworkBuilder { disable_txpool_gossip }) + .executor(OptimismExecutorBuilder::default()) } } @@ -54,13 +68,15 @@ impl Node for OptimismNode where N: FullNodeTypes, { - type PoolBuilder = OptimismPoolBuilder; - type NetworkBuilder = OptimismNetworkBuilder; - type PayloadBuilder = OptimismPayloadBuilder; + type ComponentsBuilder = ComponentsBuilder< + N, + OptimismPoolBuilder, + OptimismPayloadBuilder, + OptimismNetworkBuilder, + OptimismExecutorBuilder, + >; - fn components( - self, - ) -> ComponentsBuilder { + fn components_builder(self) -> Self::ComponentsBuilder { let Self { args } = self; Self::components(args) } @@ -69,10 +85,29 @@ where impl NodeTypes for OptimismNode { type Primitives = (); type Engine = OptimismEngineTypes; - type Evm = OptimismEvmConfig; +} + +/// A regular optimism evm and executor builder. +#[derive(Debug, Default, Clone, Copy)] +#[non_exhaustive] +pub struct OptimismExecutorBuilder; + +impl ExecutorBuilder for OptimismExecutorBuilder +where + Node: FullNodeTypes, +{ + type EVM = OptimismEvmConfig; + type Executor = OpExecutorProvider; + + async fn build_evm( + self, + ctx: &BuilderContext, + ) -> eyre::Result<(Self::EVM, Self::Executor)> { + let chain_spec = ctx.chain_spec(); + let evm_config = OptimismEvmConfig::default(); + let executor = OpExecutorProvider::new(chain_spec, evm_config); - fn evm_config(&self) -> Self::Evm { - OptimismEvmConfig::default() + Ok((evm_config, executor)) } } @@ -92,7 +127,7 @@ where async fn build_pool(self, ctx: &BuilderContext) -> eyre::Result { let data_dir = ctx.data_dir(); - let blob_store = DiskFileBlobStore::open(data_dir.blobstore_path(), Default::default())?; + let blob_store = DiskFileBlobStore::open(data_dir.blobstore(), Default::default())?; let validator = TransactionValidationTaskExecutor::eth_builder(ctx.chain_spec()) .with_head_timestamp(ctx.head().timestamp) @@ -112,7 +147,7 @@ where ctx.pool_config(), ); info!(target: "reth::cli", "Transaction pool initialized"); - let transactions_path = data_dir.txpool_transactions_path(); + let transactions_path = data_dir.txpool_transactions(); // spawn txpool maintenance task { @@ -153,7 +188,7 @@ where /// A basic optimism payload service builder #[derive(Debug, Default, Clone)] -pub struct OptimismPayloadBuilder { +pub struct OptimismPayloadBuilder { /// By default the pending block equals the latest block /// to save resources and not leak txs from the tx-pool, /// this flag enables computing of the pending block @@ -163,19 +198,22 @@ pub struct OptimismPayloadBuilder { /// will use the payload attributes from the latest block. Note /// that this flag is not yet functional. pub compute_pending_block: bool, + /// The EVM configuration to use for the payload builder. + pub evm_config: EVM, } -impl OptimismPayloadBuilder { - /// Create a new instance with the given `compute_pending_block` flag. - pub const fn new(compute_pending_block: bool) -> Self { - Self { compute_pending_block } +impl OptimismPayloadBuilder { + /// Create a new instance with the given `compute_pending_block` flag and evm config. + pub const fn new(compute_pending_block: bool, evm_config: EVM) -> Self { + Self { compute_pending_block, evm_config } } } -impl PayloadServiceBuilder for OptimismPayloadBuilder +impl PayloadServiceBuilder for OptimismPayloadBuilder where Node: FullNodeTypes, Pool: TransactionPool + Unpin + 'static, + EVM: ConfigureEvm, { async fn spawn_payload_service( self, @@ -184,7 +222,7 @@ where ) -> eyre::Result> { let payload_builder = reth_optimism_payload_builder::OptimismPayloadBuilder::new( ctx.chain_spec(), - ctx.evm_config().clone(), + self.evm_config, ) .set_compute_pending_block(self.compute_pending_block); let conf = ctx.payload_builder_config(); @@ -194,8 +232,7 @@ where .deadline(conf.deadline()) .max_payload_tasks(conf.max_payload_tasks()) // no extradata for OP - .extradata(Default::default()) - .max_gas_limit(conf.max_gas_limit()); + .extradata(Default::default()); let payload_generator = BasicPayloadJobGenerator::with_builder( ctx.provider().clone(), diff --git a/crates/optimism/node/src/rpc.rs b/crates/optimism/node/src/rpc.rs index 66eb82450..515e1d8eb 100644 --- a/crates/optimism/node/src/rpc.rs +++ b/crates/optimism/node/src/rpc.rs @@ -3,9 +3,10 @@ use jsonrpsee::types::ErrorObject; use reqwest::Client; use reth_rpc::eth::{ - error::{EthApiError, EthResult, ToRpcError}, + error::{EthApiError, EthResult}, traits::RawTransactionForwarder, }; +use reth_rpc_types::ToRpcError; use std::sync::{atomic::AtomicUsize, Arc}; /// Error type when interacting with the Sequencer @@ -94,7 +95,7 @@ impl SequencerClient { self.http_client() .post(self.endpoint()) - .header(http::header::CONTENT_TYPE, "application/json") + .header(reqwest::header::CONTENT_TYPE, "application/json") .body(body) .send() .await diff --git a/crates/optimism/node/src/txpool.rs b/crates/optimism/node/src/txpool.rs index 73097ce27..db6a6266e 100644 --- a/crates/optimism/node/src/txpool.rs +++ b/crates/optimism/node/src/txpool.rs @@ -1,8 +1,9 @@ //! OP transaction pool types use parking_lot::RwLock; +use reth_evm_optimism::RethL1BlockInfo; use reth_primitives::{Block, ChainSpec, GotExpected, InvalidTransactionError, SealedBlock}; use reth_provider::{BlockReaderIdExt, StateProviderFactory}; -use reth_revm::{optimism::RethL1BlockInfo, L1BlockInfo}; +use reth_revm::L1BlockInfo; use reth_transaction_pool::{ CoinbaseTipOrdering, EthPoolTransaction, EthPooledTransaction, EthTransactionValidator, Pool, TransactionOrigin, TransactionValidationOutcome, TransactionValidationTaskExecutor, @@ -75,7 +76,7 @@ where /// Update the L1 block info. fn update_l1_block_info(&self, block: &Block) { self.block_info.timestamp.store(block.timestamp, Ordering::Relaxed); - if let Ok(cost_addition) = reth_revm::optimism::extract_l1_info(block) { + if let Ok(cost_addition) = reth_evm_optimism::extract_l1_info(block) { *self.block_info.l1_block_info.write() = cost_addition; } } @@ -202,8 +203,8 @@ pub struct OpL1BlockInfo { mod tests { use crate::txpool::OpTransactionValidator; use reth_primitives::{ - Signature, Transaction, TransactionKind, TransactionSigned, TransactionSignedEcRecovered, - TxDeposit, MAINNET, U256, + Signature, Transaction, TransactionSigned, TransactionSignedEcRecovered, TxDeposit, TxKind, + MAINNET, U256, }; use reth_provider::test_utils::MockEthProvider; use reth_transaction_pool::{ @@ -225,7 +226,7 @@ mod tests { let deposit_tx = Transaction::Deposit(TxDeposit { source_hash: Default::default(), from: signer, - to: TransactionKind::Create, + to: TxKind::Create, mint: None, value: U256::ZERO, gas_limit: 0u64, diff --git a/crates/optimism/node/tests/e2e/p2p.rs b/crates/optimism/node/tests/e2e/p2p.rs index 5fe4daa7b..9e3741055 100644 --- a/crates/optimism/node/tests/e2e/p2p.rs +++ b/crates/optimism/node/tests/e2e/p2p.rs @@ -1,80 +1,89 @@ +use crate::utils::{advance_chain, setup}; +use reth_interfaces::blockchain_tree::error::BlockchainTreeError; +use reth_rpc_types::engine::PayloadStatusEnum; use std::sync::Arc; - -use crate::utils::optimism_payload_attributes; -use reth::{ - args::{DiscoveryArgs, NetworkArgs, RpcServerArgs}, - builder::{NodeBuilder, NodeConfig, NodeHandle}, - tasks::TaskManager, -}; -use reth_e2e_test_utils::{node::NodeHelper, wallet::Wallet}; -use reth_node_optimism::node::OptimismNode; -use reth_primitives::{hex, Bytes, ChainSpecBuilder, Genesis, BASE_MAINNET}; +use tokio::sync::Mutex; #[tokio::test] async fn can_sync() -> eyre::Result<()> { reth_tracing::init_test_tracing(); - let tasks = TaskManager::current(); - let exec = tasks.executor(); + let (mut nodes, _tasks, wallet) = setup(3).await?; + let wallet = Arc::new(Mutex::new(wallet)); - let genesis: Genesis = serde_json::from_str(include_str!("../assets/genesis.json")).unwrap(); - let chain_spec = Arc::new( - ChainSpecBuilder::default() - .chain(BASE_MAINNET.chain) - .genesis(genesis) - .ecotone_activated() - .build(), - ); - let mut wallet = Wallet::default().with_chain_id(chain_spec.chain.into()); + let third_node = nodes.pop().unwrap(); + let mut second_node = nodes.pop().unwrap(); + let mut first_node = nodes.pop().unwrap(); - let network_config = NetworkArgs { - discovery: DiscoveryArgs { disable_discovery: true, ..DiscoveryArgs::default() }, - ..NetworkArgs::default() - }; + let tip: usize = 90; + let tip_index: usize = tip - 1; + let reorg_depth = 2; - let node_config = NodeConfig::test() - .with_chain(chain_spec) - .with_network(network_config) - .with_unused_ports() - .with_rpc(RpcServerArgs::default().with_unused_ports().with_http()); + // On first node, create a chain up to block number 90a + let canonical_payload_chain = advance_chain(tip, &mut first_node, wallet.clone()).await?; + let canonical_chain = + canonical_payload_chain.iter().map(|p| p.0.block().hash()).collect::>(); - let NodeHandle { node, node_exit_future: _ } = NodeBuilder::new(node_config.clone()) - .testing_node(exec.clone()) - .node(OptimismNode::default()) - .launch() + // On second node, sync optimistically up to block number 88a + second_node + .engine_api + .update_optimistic_forkchoice(canonical_chain[tip_index - reorg_depth]) .await?; - - let mut first_node = NodeHelper::new(node.clone()).await?; - - let NodeHandle { node, node_exit_future: _ } = NodeBuilder::new(node_config) - .testing_node(exec) - .node(OptimismNode::default()) - .launch() + second_node + .wait_block((tip - reorg_depth) as u64, canonical_chain[tip_index - reorg_depth], true) .await?; - let mut second_node = NodeHelper::new(node).await?; - - // Make them peer - first_node.network.add_peer(second_node.network.record()).await; - second_node.network.add_peer(first_node.network.record()).await; - - // Make sure they establish a new session - first_node.network.expect_session().await; - second_node.network.expect_session().await; - - // Taken from optimism tests - let l1_block_info = Bytes::from_static(&hex!("7ef9015aa044bae9d41b8380d781187b426c6fe43df5fb2fb57bd4466ef6a701e1f01e015694deaddeaddeaddeaddeaddeaddeaddeaddead000194420000000000000000000000000000000000001580808408f0d18001b90104015d8eb900000000000000000000000000000000000000000000000000000000008057650000000000000000000000000000000000000000000000000000000063d96d10000000000000000000000000000000000000000000000000000000000009f35273d89754a1e0387b89520d989d3be9c37c1f32495a88faf1ea05c61121ab0d1900000000000000000000000000000000000000000000000000000000000000010000000000000000000000002d679b567db6187c0c8323fa982cfb88b74dbcc7000000000000000000000000000000000000000000000000000000000000083400000000000000000000000000000000000000000000000000000000000f4240")); - - // Make the first node advance - let raw_tx = wallet.tx(Some(l1_block_info)).await; - let (block_hash, tx_hash) = - first_node.advance(raw_tx.clone(), optimism_payload_attributes).await?; - - // only send forkchoice update to second node - second_node.engine_api.update_forkchoice(block_hash).await?; + // On third node, sync optimistically up to block number 90a + third_node.engine_api.update_optimistic_forkchoice(canonical_chain[tip_index]).await?; + third_node.wait_block(tip as u64, canonical_chain[tip_index], true).await?; + + // On second node, create a side chain: 88a -> 89b -> 90b + wallet.lock().await.inner_nonce -= reorg_depth as u64; + second_node.payload.timestamp = first_node.payload.timestamp - reorg_depth as u64; // TODO: probably want to make it node agnostic + let side_payload_chain = advance_chain(reorg_depth, &mut second_node, wallet.clone()).await?; + let side_chain = side_payload_chain.iter().map(|p| p.0.block().hash()).collect::>(); + + // Creates fork chain by submitting 89b payload. + // By returning Valid here, op-node will finally return a finalized hash + let _ = third_node + .engine_api + .submit_payload( + side_payload_chain[0].0.clone(), + side_payload_chain[0].1.clone(), + PayloadStatusEnum::Valid, + Default::default(), + ) + .await; + + // It will issue a pipeline reorg to 88a, and then make 89b canonical AND finalized. + third_node.engine_api.update_forkchoice(side_chain[0], side_chain[0]).await?; + + // Make sure we have the updated block + third_node.wait_unwind((tip - reorg_depth) as u64).await?; + third_node + .wait_block( + side_payload_chain[0].0.block().number, + side_payload_chain[0].0.block().hash(), + true, + ) + .await?; - // expect second node advanced via p2p gossip - second_node.assert_new_block(tx_hash, block_hash, 1).await?; + // Make sure that trying to submit 89a again will result in an invalid payload status, since 89b + // has been set as finalized. + let _ = third_node + .engine_api + .submit_payload( + canonical_payload_chain[tip_index - reorg_depth + 1].0.clone(), + canonical_payload_chain[tip_index - reorg_depth + 1].1.clone(), + PayloadStatusEnum::Invalid { + validation_error: BlockchainTreeError::PendingBlockIsFinalized { + last_finalized: (tip - reorg_depth) as u64 + 1, + } + .to_string(), + }, + Default::default(), + ) + .await; Ok(()) } diff --git a/crates/optimism/node/tests/e2e/utils.rs b/crates/optimism/node/tests/e2e/utils.rs index 1f655502e..ad19086ae 100644 --- a/crates/optimism/node/tests/e2e/utils.rs +++ b/crates/optimism/node/tests/e2e/utils.rs @@ -1,7 +1,55 @@ -use reth::rpc::types::engine::PayloadAttributes; -use reth_node_optimism::OptimismPayloadBuilderAttributes; +use reth::{rpc::types::engine::PayloadAttributes, tasks::TaskManager}; +use reth_e2e_test_utils::{transaction::TransactionTestContext, wallet::Wallet, NodeHelperType}; +use reth_node_optimism::{OptimismBuiltPayload, OptimismNode, OptimismPayloadBuilderAttributes}; use reth_payload_builder::EthPayloadBuilderAttributes; -use reth_primitives::{Address, B256}; +use reth_primitives::{Address, ChainSpecBuilder, Genesis, B256, BASE_MAINNET}; +use std::sync::Arc; +use tokio::sync::Mutex; + +/// Optimism Node Helper type +pub(crate) type OpNode = NodeHelperType; + +pub(crate) async fn setup(num_nodes: usize) -> eyre::Result<(Vec, TaskManager, Wallet)> { + let genesis: Genesis = serde_json::from_str(include_str!("../assets/genesis.json")).unwrap(); + reth_e2e_test_utils::setup( + num_nodes, + Arc::new( + ChainSpecBuilder::default() + .chain(BASE_MAINNET.chain) + .genesis(genesis) + .ecotone_activated() + .build(), + ), + false, + ) + .await +} + +/// Advance the chain with sequential payloads returning them in the end. +pub(crate) async fn advance_chain( + length: usize, + node: &mut OpNode, + wallet: Arc>, +) -> eyre::Result> { + node.advance( + length as u64, + |_| { + let wallet = wallet.clone(); + Box::pin(async move { + let mut wallet = wallet.lock().await; + let tx_fut = TransactionTestContext::optimism_l1_block_info_tx( + wallet.chain_id, + wallet.inner.clone(), + wallet.inner_nonce, + ); + wallet.inner_nonce += 1; + tx_fut.await + }) + }, + optimism_payload_attributes, + ) + .await +} /// Helper function to create a new eth payload attributes pub(crate) fn optimism_payload_attributes(timestamp: u64) -> OptimismPayloadBuilderAttributes { diff --git a/crates/optimism/node/tests/it/builder.rs b/crates/optimism/node/tests/it/builder.rs index 64f96bd2d..5d26e8bda 100644 --- a/crates/optimism/node/tests/it/builder.rs +++ b/crates/optimism/node/tests/it/builder.rs @@ -12,7 +12,7 @@ fn test_basic_setup() { let db = create_test_rw_db(); let _builder = NodeBuilder::new(config) .with_database(db) - .with_types(OptimismNode::default()) + .with_types::() .with_components(OptimismNode::components(Default::default())) .on_component_initialized(move |ctx| { let _provider = ctx.provider(); diff --git a/crates/payload/basic/src/lib.rs b/crates/payload/basic/src/lib.rs index 359a0fb16..6529710ca 100644 --- a/crates/payload/basic/src/lib.rs +++ b/crates/payload/basic/src/lib.rs @@ -18,7 +18,7 @@ use reth_payload_builder::{ PayloadJobGenerator, }; use reth_primitives::{ - constants::{EMPTY_WITHDRAWALS, ETHEREUM_BLOCK_GAS_LIMIT, RETH_CLIENT_VERSION, SLOT_DURATION}, + constants::{EMPTY_WITHDRAWALS, RETH_CLIENT_VERSION, SLOT_DURATION}, proofs, BlockNumberOrTag, Bytes, ChainSpec, SealedBlock, Withdrawals, B256, U256, }; use reth_provider::{ @@ -35,6 +35,7 @@ use revm::{ }; use std::{ future::Future, + ops::Deref, pin::Pin, sync::{atomic::AtomicBool, Arc}, task::{Context, Poll}, @@ -53,9 +54,9 @@ mod metrics; pub struct BasicPayloadJobGenerator { /// The client that can interact with the chain. client: Client, - /// txpool + /// The transaction pool to pull transactions from. pool: Pool, - /// How to spawn building tasks + /// The task executor to spawn payload building tasks on. executor: Tasks, /// The configuration for the job generator. config: BasicPayloadJobGeneratorConfig, @@ -226,12 +227,21 @@ pub struct PrecachedState { /// Restricts how many generator tasks can be executed at once. #[derive(Debug, Clone)] -struct PayloadTaskGuard(Arc); +pub struct PayloadTaskGuard(Arc); + +impl Deref for PayloadTaskGuard { + type Target = Semaphore; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} // === impl PayloadTaskGuard === impl PayloadTaskGuard { - fn new(max_payload_tasks: usize) -> Self { + /// Constructs `Self` with a maximum task count of `max_payload_tasks`. + pub fn new(max_payload_tasks: usize) -> Self { Self(Arc::new(Semaphore::new(max_payload_tasks))) } } @@ -241,8 +251,6 @@ impl PayloadTaskGuard { pub struct BasicPayloadJobGeneratorConfig { /// Data to include in the block's extra data field. extradata: Bytes, - /// Target gas ceiling for built blocks, defaults to [ETHEREUM_BLOCK_GAS_LIMIT] gas. - max_gas_limit: u64, /// The interval at which the job should build a new payload after the last. interval: Duration, /// The deadline for when the payload builder job should resolve. @@ -286,21 +294,12 @@ impl BasicPayloadJobGeneratorConfig { self.extradata = extradata; self } - - /// Sets the target gas ceiling for mined blocks. - /// - /// Defaults to [ETHEREUM_BLOCK_GAS_LIMIT] gas. - pub fn max_gas_limit(mut self, max_gas_limit: u64) -> Self { - self.max_gas_limit = max_gas_limit; - self - } } impl Default for BasicPayloadJobGeneratorConfig { fn default() -> Self { Self { extradata: alloy_rlp::encode(RETH_CLIENT_VERSION.as_bytes()).into(), - max_gas_limit: ETHEREUM_BLOCK_GAS_LIMIT, interval: Duration::from_secs(1), // 12s slot time deadline: SLOT_DURATION, @@ -384,7 +383,7 @@ where let builder = this.builder.clone(); this.executor.spawn_blocking(Box::pin(async move { // acquire the permit for executing the task - let _permit = guard.0.acquire().await; + let _permit = guard.acquire().await; let args = BuildArguments { client, pool, @@ -410,7 +409,6 @@ where BuildOutcome::Better { payload, cached_reads } => { this.cached_reads = Some(cached_reads); debug!(target: "payload_builder", value = %payload.fees(), "built better payload"); - let payload = payload; this.best_payload = Some(payload); } BuildOutcome::Aborted { fees, cached_reads } => { @@ -528,11 +526,11 @@ where #[derive(Debug)] pub struct ResolveBestPayload { /// Best payload so far. - best_payload: Option, + pub best_payload: Option, /// Regular payload job that's currently running that might produce a better payload. - maybe_better: Option>, + pub maybe_better: Option>, /// The empty payload building job in progress. - empty_payload: Option>>, + pub empty_payload: Option>>, } impl Future for ResolveBestPayload @@ -581,13 +579,23 @@ where /// A future that resolves to the result of the block building job. #[derive(Debug)] -struct PendingPayload

{ +pub struct PendingPayload

{ /// The marker to cancel the job on drop _cancel: Cancelled, /// The channel to send the result to. payload: oneshot::Receiver, PayloadBuilderError>>, } +impl

PendingPayload

{ + /// Constructs a `PendingPayload` future. + pub fn new( + cancel: Cancelled, + payload: oneshot::Receiver, PayloadBuilderError>>, + ) -> Self { + Self { _cancel: cancel, payload } + } +} + impl

Future for PendingPayload

{ type Output = Result, PayloadBuilderError>; diff --git a/crates/payload/builder/src/database.rs b/crates/payload/builder/src/database.rs index 5b5239fdd..ac36de98c 100644 --- a/crates/payload/builder/src/database.rs +++ b/crates/payload/builder/src/database.rs @@ -61,10 +61,13 @@ impl CachedReads { } } +/// A [Database] that caches reads inside [CachedReads]. #[derive(Debug)] -struct CachedReadsDbMut<'a, DB> { - cached: &'a mut CachedReads, - db: DB, +pub struct CachedReadsDbMut<'a, DB> { + /// The cache of reads. + pub cached: &'a mut CachedReads, + /// The underlying database. + pub db: DB, } impl<'a, DB: DatabaseRef> Database for CachedReadsDbMut<'a, DB> { @@ -126,7 +129,8 @@ impl<'a, DB: DatabaseRef> Database for CachedReadsDbMut<'a, DB> { /// `revm::db::State` for repeated payload build jobs. #[derive(Debug)] pub struct CachedReadsDBRef<'a, DB> { - inner: RefCell>, + /// The inner cache reads db mut. + pub inner: RefCell>, } impl<'a, DB: DatabaseRef> DatabaseRef for CachedReadsDBRef<'a, DB> { diff --git a/crates/payload/ethereum/src/lib.rs b/crates/payload/ethereum/src/lib.rs index f1c0a215b..e34287f76 100644 --- a/crates/payload/ethereum/src/lib.rs +++ b/crates/payload/ethereum/src/lib.rs @@ -73,36 +73,54 @@ where debug!(target: "payload_builder", parent_hash = ?parent_block.hash(), parent_number = parent_block.number, "building empty payload"); let state = client.state_by_block_hash(parent_block.hash()).map_err(|err| { - warn!(target: "payload_builder", parent_hash=%parent_block.hash(), %err, "failed to get state for empty payload"); - err - })?; + warn!(target: "payload_builder", + parent_hash=%parent_block.hash(), + %err, + "failed to get state for empty payload" + ); + err + })?; let mut db = State::builder() - .with_database_boxed(Box::new(StateProviderDatabase::new(&state))) + .with_database(StateProviderDatabase::new(state)) .with_bundle_update() .build(); let base_fee = initialized_block_env.basefee.to::(); let block_number = initialized_block_env.number.to::(); - let block_gas_limit: u64 = initialized_block_env.gas_limit.try_into().unwrap_or(u64::MAX); + let block_gas_limit = initialized_block_env.gas_limit.try_into().unwrap_or(u64::MAX); // apply eip-4788 pre block contract call pre_block_beacon_root_contract_call( - &mut db, - &chain_spec, - block_number, - &initialized_cfg, - &initialized_block_env, - &attributes, - ).map_err(|err| { - warn!(target: "payload_builder", parent_hash=%parent_block.hash(), %err, "failed to apply beacon root contract call for empty payload"); - err - })?; - - let WithdrawalsOutcome { withdrawals_root, withdrawals } = - commit_withdrawals(&mut db, &chain_spec, attributes.timestamp, attributes.withdrawals.clone()).map_err(|err| { - warn!(target: "payload_builder", parent_hash=%parent_block.hash(), %err, "failed to commit withdrawals for empty payload"); - err - })?; + &mut db, + &chain_spec, + block_number, + &initialized_cfg, + &initialized_block_env, + &attributes, + ) + .map_err(|err| { + warn!(target: "payload_builder", + parent_hash=%parent_block.hash(), + %err, + "failed to apply beacon root contract call for empty payload" + ); + err + })?; + + let WithdrawalsOutcome { withdrawals_root, withdrawals } = commit_withdrawals( + &mut db, + &chain_spec, + attributes.timestamp, + attributes.withdrawals.clone(), + ) + .map_err(|err| { + warn!(target: "payload_builder", + parent_hash=%parent_block.hash(), + %err, + "failed to commit withdrawals for empty payload" + ); + err + })?; // merge all transitions into bundle state, this would apply the withdrawal balance // changes and 4788 contract call @@ -110,10 +128,14 @@ where // calculate the state root let bundle_state = db.take_bundle(); - let state_root = state.state_root(&bundle_state).map_err(|err| { - warn!(target: "payload_builder", parent_hash=%parent_block.hash(), %err, "failed to calculate state root for empty payload"); - err - })?; + let state_root = db.database.state_root(&bundle_state).map_err(|err| { + warn!(target: "payload_builder", + parent_hash=%parent_block.hash(), + %err, + "failed to calculate state root for empty payload" + ); + err + })?; let mut excess_blob_gas = None; let mut blob_gas_used = None; @@ -178,9 +200,9 @@ where let BuildArguments { client, pool, mut cached_reads, config, cancel, best_payload } = args; let state_provider = client.state_by_block_hash(config.parent_block.hash())?; - let state = StateProviderDatabase::new(&state_provider); + let state = StateProviderDatabase::new(state_provider); let mut db = - State::builder().with_database_ref(cached_reads.as_db(&state)).with_bundle_update().build(); + State::builder().with_database_ref(cached_reads.as_db(state)).with_bundle_update().build(); let extra_data = config.extra_data(); let PayloadConfig { initialized_block_env, @@ -349,7 +371,10 @@ where let logs_bloom = bundle.block_logs_bloom(block_number).expect("Number is in range"); // calculate the state root - let state_root = state_provider.state_root(bundle.state())?; + let state_root = { + let state_provider = db.database.0.inner.borrow_mut(); + state_provider.db.state_root(bundle.state())? + }; // create the block header let transactions_root = proofs::calculate_transaction_root(&executed_txs); diff --git a/crates/payload/optimism/Cargo.toml b/crates/payload/optimism/Cargo.toml index ebc776e74..567c02833 100644 --- a/crates/payload/optimism/Cargo.toml +++ b/crates/payload/optimism/Cargo.toml @@ -21,6 +21,7 @@ reth-rpc-types.workspace = true reth-rpc-types-compat.workspace = true reth-engine-primitives.workspace = true reth-evm.workspace = true +reth-evm-optimism.workspace = true reth-payload-builder.workspace = true reth-basic-payload-builder.workspace = true @@ -36,7 +37,7 @@ sha2.workspace = true [features] optimism = [ "reth-primitives/optimism", - "reth-revm/optimism", "reth-provider/optimism", "reth-rpc-types-compat/optimism", + "reth-evm-optimism/optimism", ] \ No newline at end of file diff --git a/crates/payload/optimism/src/builder.rs b/crates/payload/optimism/src/builder.rs index 1d1a2dade..2794ad968 100644 --- a/crates/payload/optimism/src/builder.rs +++ b/crates/payload/optimism/src/builder.rs @@ -123,7 +123,7 @@ where err })?; let mut db = State::builder() - .with_database_boxed(Box::new(StateProviderDatabase::new(&state))) + .with_database(StateProviderDatabase::new(state)) .with_bundle_update() .build(); @@ -133,22 +133,36 @@ where // apply eip-4788 pre block contract call pre_block_beacon_root_contract_call( - &mut db, - &chain_spec, - block_number, - &initialized_cfg, - &initialized_block_env, - &attributes, - ).map_err(|err| { - warn!(target: "payload_builder", parent_hash=%parent_block.hash(), %err, "failed to apply beacon root contract call for empty payload"); - err - })?; + &mut db, + &chain_spec, + block_number, + &initialized_cfg, + &initialized_block_env, + &attributes, + ) + .map_err(|err| { + warn!(target: "payload_builder", + parent_hash=%parent_block.hash(), + %err, + "failed to apply beacon root contract call for empty payload" + ); + err + })?; - let WithdrawalsOutcome { withdrawals_root, withdrawals } = - commit_withdrawals(&mut db, &chain_spec, attributes.payload_attributes.timestamp, attributes.payload_attributes.withdrawals.clone()).map_err(|err| { - warn!(target: "payload_builder", parent_hash=%parent_block.hash(), %err, "failed to commit withdrawals for empty payload"); - err - })?; + let WithdrawalsOutcome { withdrawals_root, withdrawals } = commit_withdrawals( + &mut db, + &chain_spec, + attributes.payload_attributes.timestamp, + attributes.payload_attributes.withdrawals.clone(), + ) + .map_err(|err| { + warn!(target: "payload_builder", + parent_hash=%parent_block.hash(), + %err, + "failed to commit withdrawals for empty payload" + ); + err + })?; // merge all transitions into bundle state, this would apply the withdrawal balance // changes and 4788 contract call @@ -156,10 +170,14 @@ where // calculate the state root let bundle_state = db.take_bundle(); - let state_root = state.state_root(&bundle_state).map_err(|err| { - warn!(target: "payload_builder", parent_hash=%parent_block.hash(), %err, "failed to calculate state root for empty payload"); - err - })?; + let state_root = db.database.state_root(&bundle_state).map_err(|err| { + warn!(target: "payload_builder", + parent_hash=%parent_block.hash(), + %err, + "failed to calculate state root for empty payload" + ); + err + })?; let mut excess_blob_gas = None; let mut blob_gas_used = None; @@ -236,9 +254,9 @@ where let BuildArguments { client, pool, mut cached_reads, config, cancel, best_payload } = args; let state_provider = client.state_by_block_hash(config.parent_block.hash())?; - let state = StateProviderDatabase::new(&state_provider); + let state = StateProviderDatabase::new(state_provider); let mut db = - State::builder().with_database_ref(cached_reads.as_db(&state)).with_bundle_update().build(); + State::builder().with_database_ref(cached_reads.as_db(state)).with_bundle_update().build(); let extra_data = config.extra_data(); let PayloadConfig { initialized_block_env, @@ -250,13 +268,15 @@ where } = config; debug!(target: "payload_builder", id=%attributes.payload_attributes.payload_id(), parent_hash = ?parent_block.hash(), parent_number = parent_block.number, "building new payload"); + let mut cumulative_gas_used = 0; let block_gas_limit: u64 = attributes .gas_limit .unwrap_or_else(|| initialized_block_env.gas_limit.try_into().unwrap_or(u64::MAX)); let base_fee = initialized_block_env.basefee.to::(); - let mut executed_txs = Vec::new(); + let mut executed_txs = Vec::with_capacity(attributes.transactions.len()); + let mut best_txs = pool.best_transactions_with_attributes(BestTransactionsAttributes::new( base_fee, initialized_block_env.get_blob_gasprice().map(|gasprice| gasprice as u64), @@ -283,16 +303,17 @@ where // blocks will always have at least a single transaction in them (the L1 info transaction), // so we can safely assume that this will always be triggered upon the transition and that // the above check for empty blocks will never be hit on OP chains. - reth_revm::optimism::ensure_create2_deployer( + reth_evm_optimism::ensure_create2_deployer( chain_spec.clone(), attributes.payload_attributes.timestamp, &mut db, ) - .map_err(|_| { + .map_err(|err| { + warn!(target: "payload_builder", %err, "missing create2 deployer, skipping block."); PayloadBuilderError::other(OptimismPayloadBuilderError::ForceCreate2DeployerFail) })?; - let mut receipts = Vec::new(); + let mut receipts = Vec::with_capacity(attributes.transactions.len()); for sequencer_tx in &attributes.transactions { // Check if the job was cancelled, if so we can exit early. if cancel.is_cancelled() { @@ -300,7 +321,7 @@ where } // A sequencer's block should never contain blob transactions. - if matches!(sequencer_tx.tx_type(), TxType::Eip4844) { + if sequencer_tx.is_eip4844() { return Err(PayloadBuilderError::other( OptimismPayloadBuilderError::BlobTransactionRejected, )) @@ -398,11 +419,9 @@ where continue } - // A sequencer's block should never contain blob transactions. - if pool_tx.tx_type() == TxType::Eip4844 as u8 { - return Err(PayloadBuilderError::other( - OptimismPayloadBuilderError::BlobTransactionRejected, - )) + // A sequencer's block should never contain blob or deposit transactions from the pool. + if pool_tx.is_eip4844() || pool_tx.tx_type() == TxType::Deposit as u8 { + best_txs.mark_invalid(&pool_tx) } // check if the job was cancelled, if so we can exit early @@ -509,7 +528,10 @@ where let logs_bloom = bundle.block_logs_bloom(block_number).expect("Number is in range"); // calculate the state root - let state_root = state_provider.state_root(bundle.state())?; + let state_root = { + let state_provider = db.database.0.inner.borrow_mut(); + state_provider.db.state_root(bundle.state())? + }; // create the block header let transactions_root = proofs::calculate_transaction_root(&executed_txs); diff --git a/crates/payload/optimism/src/payload.rs b/crates/payload/optimism/src/payload.rs index d753370fd..9cd47ef42 100644 --- a/crates/payload/optimism/src/payload.rs +++ b/crates/payload/optimism/src/payload.rs @@ -16,8 +16,7 @@ use reth_rpc_types::engine::{ OptimismPayloadAttributes, PayloadId, }; use reth_rpc_types_compat::engine::payload::{ - block_to_payload_v3, convert_block_to_payload_field_v2, - convert_standalone_withdraw_to_withdrawal, try_block_to_payload_v1, + block_to_payload_v1, block_to_payload_v3, convert_block_to_payload_field_v2, }; use revm::primitives::HandlerCfg; use std::sync::Arc; @@ -54,19 +53,13 @@ impl PayloadBuilderAttributes for OptimismPayloadBuilderAttributes { (payload_id_optimism(&parent, &attributes, &transactions), transactions) }; - let withdraw = attributes.payload_attributes.withdrawals.map(|withdrawals| { - Withdrawals::new( - withdrawals.into_iter().map(convert_standalone_withdraw_to_withdrawal).collect(), - ) - }); - let payload_attributes = EthPayloadBuilderAttributes { id, parent, timestamp: attributes.payload_attributes.timestamp, suggested_fee_recipient: attributes.payload_attributes.suggested_fee_recipient, prev_randao: attributes.payload_attributes.prev_randao, - withdrawals: withdraw.unwrap_or_default(), + withdrawals: attributes.payload_attributes.withdrawals.unwrap_or_default().into(), parent_beacon_block_root: attributes.payload_attributes.parent_beacon_block_root, }; @@ -237,7 +230,7 @@ impl<'a> BuiltPayload for &'a OptimismBuiltPayload { // V1 engine_getPayloadV1 response impl From for ExecutionPayloadV1 { fn from(value: OptimismBuiltPayload) -> Self { - try_block_to_payload_v1(value.block) + block_to_payload_v1(value.block) } } diff --git a/crates/payload/validator/src/lib.rs b/crates/payload/validator/src/lib.rs index 4f9f51507..6b95b0425 100644 --- a/crates/payload/validator/src/lib.rs +++ b/crates/payload/validator/src/lib.rs @@ -10,7 +10,7 @@ use reth_primitives::{ChainSpec, SealedBlock}; use reth_rpc_types::{engine::MaybeCancunPayloadFields, ExecutionPayload, PayloadError}; -use reth_rpc_types_compat::engine::payload::{try_into_block, validate_block_hash}; +use reth_rpc_types_compat::engine::payload::try_into_block; use std::sync::Arc; /// Execution payload validator. @@ -38,6 +38,12 @@ impl ExecutionPayloadValidator { self.chain_spec().is_cancun_active_at_timestamp(timestamp) } + /// Returns true if the Shanghai hardfork is active at the given timestamp. + #[inline] + fn is_shanghai_active_at_timestamp(&self, timestamp: u64) -> bool { + self.chain_spec().is_shanghai_active_at_timestamp(timestamp) + } + /// Cancun specific checks for EIP-4844 blob transactions. /// /// Ensures that the number of blob versioned hashes matches the number hashes included in the @@ -100,20 +106,57 @@ impl ExecutionPayloadValidator { payload: ExecutionPayload, cancun_fields: MaybeCancunPayloadFields, ) -> Result { - let block_hash = payload.block_hash(); + let expected_hash = payload.block_hash(); // First parse the block - let block = try_into_block(payload, cancun_fields.parent_beacon_block_root())?; + let sealed_block = + try_into_block(payload, cancun_fields.parent_beacon_block_root())?.seal_slow(); - let cancun_active = self.is_cancun_active_at_timestamp(block.timestamp); + // Ensure the hash included in the payload matches the block hash + if expected_hash != sealed_block.hash() { + return Err(PayloadError::BlockHash { + execution: sealed_block.hash(), + consensus: expected_hash, + }) + } - if !cancun_active && block.has_blob_transactions() { - // cancun not active but blob transactions present - return Err(PayloadError::PreCancunBlockWithBlobTransactions) + if self.is_cancun_active_at_timestamp(sealed_block.timestamp) { + if sealed_block.header.blob_gas_used.is_none() { + // cancun active but blob gas used not present + return Err(PayloadError::PostCancunBlockWithoutBlobGasUsed) + } + if sealed_block.header.excess_blob_gas.is_none() { + // cancun active but excess blob gas not present + return Err(PayloadError::PostCancunBlockWithoutExcessBlobGas) + } + if cancun_fields.as_ref().is_none() { + // cancun active but cancun fields not present + return Err(PayloadError::PostCancunWithoutCancunFields) + } + } else { + if sealed_block.has_blob_transactions() { + // cancun not active but blob transactions present + return Err(PayloadError::PreCancunBlockWithBlobTransactions) + } + if sealed_block.header.blob_gas_used.is_some() { + // cancun not active but blob gas used present + return Err(PayloadError::PreCancunBlockWithBlobGasUsed) + } + if sealed_block.header.excess_blob_gas.is_some() { + // cancun not active but excess blob gas present + return Err(PayloadError::PreCancunBlockWithExcessBlobGas) + } + if cancun_fields.as_ref().is_some() { + // cancun not active but cancun fields present + return Err(PayloadError::PreCancunWithCancunFields) + } } - // Ensure the hash included in the payload matches the block hash - let sealed_block = validate_block_hash(block_hash, block)?; + let shanghai_active = self.is_shanghai_active_at_timestamp(sealed_block.timestamp); + if !shanghai_active && sealed_block.withdrawals.is_some() { + // shanghai not active but withdrawals present + return Err(PayloadError::PreShanghaiBlockWithWitdrawals); + } // EIP-4844 checks self.ensure_matching_blob_versioned_hashes(&sealed_block, &cancun_fields)?; diff --git a/crates/primitives/Cargo.toml b/crates/primitives/Cargo.toml index 3e08655db..d9d6c592e 100644 --- a/crates/primitives/Cargo.toml +++ b/crates/primitives/Cargo.toml @@ -15,7 +15,7 @@ workspace = true # reth reth-codecs.workspace = true reth-ethereum-forks.workspace = true -reth-rpc-types.workspace = true +reth-network-types.workspace = true revm.workspace = true revm-primitives = { workspace = true, features = ["serde"] } @@ -24,10 +24,10 @@ alloy-chains = { workspace = true, features = ["serde", "rlp"] } alloy-primitives = { workspace = true, features = ["rand", "rlp"] } alloy-rlp = { workspace = true, features = ["arrayvec"] } alloy-trie = { workspace = true, features = ["serde"] } -nybbles = { workspace = true, features = ["serde", "rlp"] } +alloy-rpc-types = { workspace = true, optional = true } alloy-genesis.workspace = true -alloy-eips.workspace = true -enr = { workspace = true, features = ["rust-secp256k1"] } +alloy-eips = { workspace = true, features = ["serde"] } +nybbles = { workspace = true, features = ["serde", "rlp"] } # crypto secp256k1 = { workspace = true, features = ["global-context", "recovery", "rand"] } @@ -43,15 +43,12 @@ itertools.workspace = true modular-bitfield.workspace = true once_cell.workspace = true rayon.workspace = true -serde_with.workspace = true serde.workspace = true serde_json.workspace = true -sha2 = { version = "0.10.7", optional = true } tempfile = { workspace = true, optional = true } thiserror.workspace = true zstd = { version = "0.13", features = ["experimental"], optional = true } roaring = "0.10.2" -cfg-if = "1.0.0" # `test-utils` feature hash-db = { version = "~0.15", optional = true } @@ -70,8 +67,8 @@ nybbles = { workspace = true, features = ["arbitrary"] } alloy-trie = { workspace = true, features = ["arbitrary"] } alloy-eips = { workspace = true, features = ["arbitrary"] } -arbitrary = { workspace = true, features = ["derive"] } assert_matches.workspace = true +arbitrary = { workspace = true, features = ["derive"] } proptest.workspace = true proptest-derive.workspace = true rand.workspace = true @@ -84,20 +81,15 @@ hash-db = "~0.15" plain_hasher = "0.2" sucds = "0.8.1" -anyhow = "1.0.75" -# necessary so we don't hit a "undeclared 'std'": -# https://github.com/paradigmxyz/reth/pull/177#discussion_r1021172198 criterion.workspace = true pprof = { workspace = true, features = ["flamegraph", "frame-pointer", "criterion"] } -secp256k1.workspace = true [features] default = ["c-kzg", "zstd-codec"] asm-keccak = ["alloy-primitives/asm-keccak"] arbitrary = [ "revm-primitives/arbitrary", - "reth-rpc-types/arbitrary", "reth-ethereum-forks/arbitrary", "nybbles/arbitrary", "alloy-trie/arbitrary", @@ -106,19 +98,17 @@ arbitrary = [ "dep:arbitrary", "dep:proptest", "dep:proptest-derive", - "zstd-codec" -] -c-kzg = ["dep:c-kzg", "revm/c-kzg", "revm-primitives/c-kzg", "dep:sha2", "dep:tempfile"] -zstd-codec = [ - "dep:zstd" + "zstd-codec", ] +c-kzg = ["dep:c-kzg", "revm/c-kzg", "revm-primitives/c-kzg", "dep:tempfile", "alloy-eips/kzg"] +zstd-codec = ["dep:zstd"] clap = ["dep:clap"] optimism = [ "reth-codecs/optimism", - "revm-primitives/optimism", "reth-ethereum-forks/optimism", "revm/optimism", ] +alloy-compat = ["alloy-rpc-types"] test-utils = ["dep:plain_hasher", "dep:hash-db"] [[bench]] diff --git a/crates/primitives/benches/integer_list.rs b/crates/primitives/benches/integer_list.rs index 3945d48c9..56b0e9e38 100644 --- a/crates/primitives/benches/integer_list.rs +++ b/crates/primitives/benches/integer_list.rs @@ -121,7 +121,8 @@ mod elias_fano { let mut builder = EliasFanoBuilder::new( list.as_ref().iter().max().map_or(0, |max| max + 1), list.as_ref().len(), - )?; + ) + .map_err(|err| EliasFanoError::InvalidInput(err.to_string()))?; builder.extend(list.as_ref().iter().copied()); Ok(Self(builder.build())) } @@ -241,8 +242,8 @@ mod elias_fano { #[derive(Debug, thiserror::Error)] pub enum EliasFanoError { /// The provided input is invalid. - #[error(transparent)] - InvalidInput(#[from] anyhow::Error), + #[error("{0}")] + InvalidInput(String), /// Failed to deserialize data into type. #[error("failed to deserialize data into type")] FailedDeserialize, diff --git a/crates/primitives/benches/validate_blob_tx.rs b/crates/primitives/benches/validate_blob_tx.rs index 0bc2f04c6..ec62353fb 100644 --- a/crates/primitives/benches/validate_blob_tx.rs +++ b/crates/primitives/benches/validate_blob_tx.rs @@ -1,6 +1,7 @@ #![allow(missing_docs)] + use alloy_primitives::hex; -use c_kzg::{KzgCommitment, KzgSettings}; +use c_kzg::KzgSettings; use criterion::{ criterion_group, criterion_main, measurement::WallTime, BenchmarkGroup, Criterion, }; @@ -10,8 +11,7 @@ use proptest::{ test_runner::{RngAlgorithm, TestRng, TestRunner}, }; use reth_primitives::{ - constants::eip4844::MAINNET_KZG_TRUSTED_SETUP, eip4844::kzg_to_versioned_hash, - BlobTransactionSidecar, TxEip4844, + constants::eip4844::MAINNET_KZG_TRUSTED_SETUP, BlobTransactionSidecar, TxEip4844, }; use revm_primitives::MAX_BLOB_NUMBER_PER_BLOCK; use std::sync::Arc; @@ -62,13 +62,7 @@ fn validate_blob_tx( } } - tx.blob_versioned_hashes = blob_sidecar - .commitments - .iter() - .map(|commitment| { - kzg_to_versioned_hash(KzgCommitment::from_bytes(&commitment.into_inner()).unwrap()) - }) - .collect(); + tx.blob_versioned_hashes = blob_sidecar.versioned_hashes().collect(); (tx, blob_sidecar) }; diff --git a/crates/primitives/res/genesis/optimism.json b/crates/primitives/res/genesis/optimism.json index 2fb05781e..50c45b68e 100644 --- a/crates/primitives/res/genesis/optimism.json +++ b/crates/primitives/res/genesis/optimism.json @@ -12,10 +12,10 @@ "istanbulBlock": 0, "muirGlacierBlock": 0, "berlinBlock": 3950000, - "londonBlock": 3950000, - "arrowGlacierBlock": 3950000, - "grayGlacierBlock": 3950000, - "mergeNetsplitBlock": 3950000, + "londonBlock": 105235063, + "arrowGlacierBlock": 105235063, + "grayGlacierBlock": 105235063, + "mergeNetsplitBlock": 105235063, "bedrockBlock": 105235063, "terminalTotalDifficulty": 0, "terminalTotalDifficultyPassed": true, @@ -28,5 +28,6 @@ "difficulty": "1", "gasLimit": "15000000", "extradata": "0x000000000000000000000000000000000000000000000000000000000000000000000398232e2064f896018496b4b44b3d62751f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "stateRoot": "0xeddb4c1786789419153a27c4c80ff44a2226b6eda04f7e22ce5bae892ea568eb", "alloc": {} } \ No newline at end of file diff --git a/crates/primitives/src/block.rs b/crates/primitives/src/block.rs index 06c08db1f..8a029dc05 100644 --- a/crates/primitives/src/block.rs +++ b/crates/primitives/src/block.rs @@ -1,16 +1,15 @@ use crate::{ - Address, Bytes, GotExpected, Header, SealedHeader, Signature, TransactionSigned, + Address, Bytes, GotExpected, Header, SealedHeader, TransactionSigned, TransactionSignedEcRecovered, Withdrawals, B256, }; use alloy_rlp::{RlpDecodable, RlpEncodable}; #[cfg(any(test, feature = "arbitrary"))] use proptest::prelude::{any, prop_compose}; use reth_codecs::derive_arbitrary; -use reth_rpc_types::ConversionError; use serde::{Deserialize, Serialize}; use std::ops::Deref; -pub use reth_rpc_types::{ +pub use alloy_eips::eip1898::{ BlockHashOrNumber, BlockId, BlockNumHash, BlockNumberOrTag, ForkBlock, RpcBlockHash, }; @@ -49,7 +48,7 @@ pub struct Block { } impl Block { - /// Create SealedBLock that will create all header hashes. + /// Calculate the header hash and seal the block so that it can't be changed. pub fn seal_slow(self) -> SealedBlock { SealedBlock { header: self.header.seal_slow(), @@ -148,34 +147,37 @@ impl Deref for Block { } } -impl TryFrom for Block { - type Error = ConversionError; +#[cfg(feature = "alloy-compat")] +impl TryFrom for Block { + type Error = alloy_rpc_types::ConversionError; + + fn try_from(block: alloy_rpc_types::Block) -> Result { + use alloy_rpc_types::ConversionError; - fn try_from(block: reth_rpc_types::Block) -> Result { let body = { let transactions: Result, ConversionError> = match block .transactions { - reth_rpc_types::BlockTransactions::Full(transactions) => transactions + alloy_rpc_types::BlockTransactions::Full(transactions) => transactions .into_iter() .map(|tx| { let signature = tx.signature.ok_or(ConversionError::MissingSignature)?; Ok(TransactionSigned::from_transaction_and_signature( tx.try_into()?, - Signature { + crate::Signature { r: signature.r, s: signature.s, odd_y_parity: signature .y_parity - .unwrap_or(reth_rpc_types::Parity(false)) + .unwrap_or(alloy_rpc_types::Parity(false)) .0, }, )) }) .collect(), - reth_rpc_types::BlockTransactions::Hashes(_) | - reth_rpc_types::BlockTransactions::Uncle => { - return Err(ConversionError::MissingFullTransactions); + alloy_rpc_types::BlockTransactions::Hashes(_) | + alloy_rpc_types::BlockTransactions::Uncle => { + return Err(ConversionError::MissingFullTransactions) } }; transactions? @@ -214,6 +216,12 @@ impl BlockWithSenders { SealedBlockWithSenders { block: block.seal(hash), senders } } + /// Calculate the header hash and seal the block with senders so that it can't be changed. + #[inline] + pub fn seal_slow(self) -> SealedBlockWithSenders { + SealedBlockWithSenders { block: self.block.seal_slow(), senders: self.senders } + } + /// Split Structure to its components #[inline] pub fn into_components(self) -> (Block, Vec

) { @@ -296,66 +304,6 @@ pub struct SealedBlock { pub withdrawals: Option, } -/// Generates a header which is valid __with respect to past and future forks__. This means, for -/// example, that if the withdrawals root is present, the base fee per gas is also present. -/// -/// If blob gas used were present, then the excess blob gas and parent beacon block root are also -/// present. In this example, the withdrawals root would also be present. -/// -/// This __does not, and should not guarantee__ that the header is valid with respect to __anything -/// else__. -#[cfg(any(test, feature = "arbitrary"))] -pub fn generate_valid_header( - mut header: Header, - eip_4844_active: bool, - blob_gas_used: u64, - excess_blob_gas: u64, - parent_beacon_block_root: B256, -) -> Header { - // EIP-1559 logic - if header.base_fee_per_gas.is_none() { - // If EIP-1559 is not active, clear related fields - header.withdrawals_root = None; - header.blob_gas_used = None; - header.excess_blob_gas = None; - header.parent_beacon_block_root = None; - } else if header.withdrawals_root.is_none() { - // If EIP-4895 is not active, clear related fields - header.blob_gas_used = None; - header.excess_blob_gas = None; - header.parent_beacon_block_root = None; - } else if eip_4844_active { - // Set fields based on EIP-4844 being active - header.blob_gas_used = Some(blob_gas_used); - header.excess_blob_gas = Some(excess_blob_gas); - header.parent_beacon_block_root = Some(parent_beacon_block_root); - } else { - // If EIP-4844 is not active, clear related fields - header.blob_gas_used = None; - header.excess_blob_gas = None; - header.parent_beacon_block_root = None; - } - - header -} - -#[cfg(any(test, feature = "arbitrary"))] -prop_compose! { - /// Generates a proptest strategy for constructing an instance of a header which is valid __with - /// respect to past and future forks__. - /// - /// See docs for [generate_valid_header] for more information. - pub fn valid_header_strategy()( - header in any::
(), - eip_4844_active in any::(), - blob_gas_used in any::(), - excess_blob_gas in any::(), - parent_beacon_block_root in any::() - ) -> Header { - generate_valid_header(header, eip_4844_active, blob_gas_used, excess_blob_gas, parent_beacon_block_root) - } -} - impl SealedBlock { /// Create a new sealed block instance using the sealed header and block body. #[inline] @@ -458,6 +406,12 @@ impl SealedBlock { self.blob_transactions().iter().filter_map(|tx| tx.blob_gas_used()).sum() } + /// Returns whether or not the block contains any blob transactions. + #[inline] + pub fn has_blob_transactions(&self) -> bool { + self.body.iter().any(|tx| tx.is_eip4844()) + } + /// Ensures that the transaction root in the block header is valid. /// /// The transaction root is the Keccak 256-bit hash of the root node of the trie structure @@ -510,7 +464,7 @@ impl std::ops::DerefMut for SealedBlock { } /// Sealed block with senders recovered from transactions. -#[derive(Debug, Clone, PartialEq, Eq, Default)] +#[derive(Debug, Clone, PartialEq, Eq, Default, Serialize, Deserialize)] pub struct SealedBlockWithSenders { /// Sealed block pub block: SealedBlock, @@ -653,6 +607,66 @@ impl From for BlockBody { } } +/// Generates a header which is valid __with respect to past and future forks__. This means, for +/// example, that if the withdrawals root is present, the base fee per gas is also present. +/// +/// If blob gas used were present, then the excess blob gas and parent beacon block root are also +/// present. In this example, the withdrawals root would also be present. +/// +/// This __does not, and should not guarantee__ that the header is valid with respect to __anything +/// else__. +#[cfg(any(test, feature = "arbitrary"))] +pub fn generate_valid_header( + mut header: Header, + eip_4844_active: bool, + blob_gas_used: u64, + excess_blob_gas: u64, + parent_beacon_block_root: B256, +) -> Header { + // EIP-1559 logic + if header.base_fee_per_gas.is_none() { + // If EIP-1559 is not active, clear related fields + header.withdrawals_root = None; + header.blob_gas_used = None; + header.excess_blob_gas = None; + header.parent_beacon_block_root = None; + } else if header.withdrawals_root.is_none() { + // If EIP-4895 is not active, clear related fields + header.blob_gas_used = None; + header.excess_blob_gas = None; + header.parent_beacon_block_root = None; + } else if eip_4844_active { + // Set fields based on EIP-4844 being active + header.blob_gas_used = Some(blob_gas_used); + header.excess_blob_gas = Some(excess_blob_gas); + header.parent_beacon_block_root = Some(parent_beacon_block_root); + } else { + // If EIP-4844 is not active, clear related fields + header.blob_gas_used = None; + header.excess_blob_gas = None; + header.parent_beacon_block_root = None; + } + + header +} + +#[cfg(any(test, feature = "arbitrary"))] +prop_compose! { + /// Generates a proptest strategy for constructing an instance of a header which is valid __with + /// respect to past and future forks__. + /// + /// See docs for [generate_valid_header] for more information. + pub fn valid_header_strategy()( + header in any::
(), + eip_4844_active in any::(), + blob_gas_used in any::(), + excess_blob_gas in any::(), + parent_beacon_block_root in any::() + ) -> Header { + generate_valid_header(header, eip_4844_active, blob_gas_used, excess_blob_gas, parent_beacon_block_root) + } +} + #[cfg(test)] mod tests { use super::{BlockNumberOrTag::*, *}; diff --git a/crates/primitives/src/chain/mod.rs b/crates/primitives/src/chain/mod.rs index f8425f95e..b04e88ee0 100644 --- a/crates/primitives/src/chain/mod.rs +++ b/crates/primitives/src/chain/mod.rs @@ -1,9 +1,8 @@ -pub use alloy_chains::{Chain, NamedChain}; +pub use alloy_chains::{Chain, ChainKind, NamedChain}; pub use info::ChainInfo; pub use spec::{ AllGenesisFormats, BaseFeeParams, BaseFeeParamsKind, ChainSpec, ChainSpecBuilder, - DisplayHardforks, ForkBaseFeeParams, ForkCondition, ForkTimestamps, DEV, GOERLI, HOLESKY, - MAINNET, SEPOLIA, + DisplayHardforks, ForkBaseFeeParams, ForkCondition, DEV, GOERLI, HOLESKY, MAINNET, SEPOLIA, }; #[cfg(feature = "optimism")] pub use spec::{BASE_MAINNET, BASE_SEPOLIA, OP_MAINNET, OP_SEPOLIA}; diff --git a/crates/primitives/src/chain/spec.rs b/crates/primitives/src/chain/spec.rs index ee732a9bc..de56ff1fe 100644 --- a/crates/primitives/src/chain/spec.rs +++ b/crates/primitives/src/chain/spec.rs @@ -4,8 +4,9 @@ use crate::{ net::{goerli_nodes, mainnet_nodes, sepolia_nodes}, proofs::state_root_ref_unhashed, revm_primitives::{address, b256}, - Address, BlockNumber, Chain, ForkFilter, ForkFilterKey, ForkHash, ForkId, Genesis, Hardfork, - Head, Header, NamedChain, NodeRecord, SealedHeader, B256, EMPTY_OMMER_ROOT_HASH, U256, + Address, BlockNumber, Chain, ChainKind, ForkFilter, ForkFilterKey, ForkHash, ForkId, Genesis, + Hardfork, Head, Header, NamedChain, NodeRecord, SealedHeader, B256, EMPTY_OMMER_ROOT_HASH, + U256, }; use once_cell::sync::Lazy; use serde::{Deserialize, Serialize}; @@ -40,7 +41,6 @@ pub static MAINNET: Lazy> = Lazy::new(|| { 15537394, U256::from(58_750_003_716_598_352_816_469u128), )), - fork_timestamps: ForkTimestamps::default().shanghai(1681338455).cancun(1710338135), hardforks: BTreeMap::from([ (Hardfork::Frontier, ForkCondition::Block(0)), (Hardfork::Homestead, ForkCondition::Block(1150000)), @@ -89,7 +89,6 @@ pub static GOERLI: Lazy> = Lazy::new(|| { )), // paris_block_and_final_difficulty: Some((7382818, U256::from(10_790_000))), - fork_timestamps: ForkTimestamps::default().shanghai(1678832736).cancun(1705473120), hardforks: BTreeMap::from([ (Hardfork::Frontier, ForkCondition::Block(0)), (Hardfork::Homestead, ForkCondition::Block(0)), @@ -132,7 +131,6 @@ pub static SEPOLIA: Lazy> = Lazy::new(|| { )), // paris_block_and_final_difficulty: Some((1450409, U256::from(17_000_018_015_853_232u128))), - fork_timestamps: ForkTimestamps::default().shanghai(1677557088).cancun(1706655072), hardforks: BTreeMap::from([ (Hardfork::Frontier, ForkCondition::Block(0)), (Hardfork::Homestead, ForkCondition::Block(0)), @@ -178,7 +176,6 @@ pub static HOLESKY: Lazy> = Lazy::new(|| { "b5f7f912443c940f21fd611f12828d75b534364ed9e95ca4e307729a4661bde4" )), paris_block_and_final_difficulty: Some((0, U256::from(1))), - fork_timestamps: ForkTimestamps::default().shanghai(1696000704).cancun(1707305664), hardforks: BTreeMap::from([ (Hardfork::Frontier, ForkCondition::Block(0)), (Hardfork::Homestead, ForkCondition::Block(0)), @@ -223,7 +220,6 @@ pub static DEV: Lazy> = Lazy::new(|| { "2f980576711e3617a5e4d83dd539548ec0f7792007d505a3d2e9674833af2d7c" )), paris_block_and_final_difficulty: Some((0, U256::from(0))), - fork_timestamps: ForkTimestamps::default().shanghai(0).cancun(0), hardforks: BTreeMap::from([ (Hardfork::Frontier, ForkCondition::Block(0)), (Hardfork::Homestead, ForkCondition::Block(0)), @@ -269,11 +265,6 @@ pub static OP_MAINNET: Lazy> = Lazy::new(|| { genesis_hash: Some(b256!( "7ca38a1916c42007829c55e69d3e9a73265554b586a499015373241b8a3fa48b" )), - fork_timestamps: ForkTimestamps::default() - .shanghai(1699981200) - .canyon(1699981200) - .cancun(1707238800) - .ecotone(1707238800), paris_block_and_final_difficulty: Some((0, U256::from(0))), hardforks: BTreeMap::from([ (Hardfork::Frontier, ForkCondition::Block(0)), @@ -286,15 +277,19 @@ pub static OP_MAINNET: Lazy> = Lazy::new(|| { (Hardfork::Istanbul, ForkCondition::Block(0)), (Hardfork::MuirGlacier, ForkCondition::Block(0)), (Hardfork::Berlin, ForkCondition::Block(3950000)), - (Hardfork::London, ForkCondition::Block(3950000)), - (Hardfork::ArrowGlacier, ForkCondition::Block(3950000)), - (Hardfork::GrayGlacier, ForkCondition::Block(3950000)), + (Hardfork::London, ForkCondition::Block(105235063)), + (Hardfork::ArrowGlacier, ForkCondition::Block(105235063)), + (Hardfork::GrayGlacier, ForkCondition::Block(105235063)), ( Hardfork::Paris, - ForkCondition::TTD { fork_block: Some(3950000), total_difficulty: U256::from(0) }, + ForkCondition::TTD { fork_block: Some(105235063), total_difficulty: U256::from(0) }, ), (Hardfork::Bedrock, ForkCondition::Block(105235063)), (Hardfork::Regolith, ForkCondition::Timestamp(0)), + (Hardfork::Shanghai, ForkCondition::Timestamp(1704992401)), + (Hardfork::Canyon, ForkCondition::Timestamp(1704992401)), + (Hardfork::Cancun, ForkCondition::Timestamp(1710374401)), + (Hardfork::Ecotone, ForkCondition::Timestamp(1710374401)), ]), base_fee_params: BaseFeeParamsKind::Variable( vec![ @@ -319,11 +314,6 @@ pub static OP_SEPOLIA: Lazy> = Lazy::new(|| { genesis_hash: Some(b256!( "102de6ffb001480cc9b8b548fd05c34cd4f46ae4aa91759393db90ea0409887d" )), - fork_timestamps: ForkTimestamps::default() - .shanghai(1699981200) - .canyon(1699981200) - .cancun(1708534800) - .ecotone(1708534800), paris_block_and_final_difficulty: Some((0, U256::from(0))), hardforks: BTreeMap::from([ (Hardfork::Frontier, ForkCondition::Block(0)), @@ -373,11 +363,6 @@ pub static BASE_SEPOLIA: Lazy> = Lazy::new(|| { genesis_hash: Some(b256!( "0dcc9e089e30b90ddfc55be9a37dd15bc551aeee999d2e2b51414c54eaf934e4" )), - fork_timestamps: ForkTimestamps::default() - .shanghai(1699981200) - .canyon(1699981200) - .cancun(1708534800) - .ecotone(1708534800), paris_block_and_final_difficulty: Some((0, U256::from(0))), hardforks: BTreeMap::from([ (Hardfork::Frontier, ForkCondition::Block(0)), @@ -427,11 +412,6 @@ pub static BASE_MAINNET: Lazy> = Lazy::new(|| { genesis_hash: Some(b256!( "f712aa9241cc24369b143cf6dce85f0902a9731e70d66818a3a5845b296c73dd" )), - fork_timestamps: ForkTimestamps::default() - .shanghai(1704992401) - .canyon(1704992401) - .cancun(1710374401) - .ecotone(1710374401), paris_block_and_final_difficulty: Some((0, U256::from(0))), hardforks: BTreeMap::from([ (Hardfork::Frontier, ForkCondition::Block(0)), @@ -532,12 +512,6 @@ pub struct ChainSpec { #[serde(skip, default)] pub paris_block_and_final_difficulty: Option<(u64, U256)>, - /// Timestamps of various hardforks - /// - /// This caches entries in `hardforks` map - #[serde(skip, default)] - pub fork_timestamps: ForkTimestamps, - /// The active hard forks and their activation conditions pub hardforks: BTreeMap, @@ -562,7 +536,6 @@ impl Default for ChainSpec { genesis_hash: Default::default(), genesis: Default::default(), paris_block_and_final_difficulty: Default::default(), - fork_timestamps: Default::default(), hardforks: Default::default(), deposit_contract: Default::default(), base_fee_params: BaseFeeParamsKind::Constant(BaseFeeParams::ethereum()), @@ -577,6 +550,24 @@ impl ChainSpec { self.chain } + /// Returns `true` if this chain contains Ethereum configuration. + #[inline] + pub fn is_eth(&self) -> bool { + matches!( + self.chain.kind(), + ChainKind::Named( + NamedChain::Mainnet | + NamedChain::Morden | + NamedChain::Ropsten | + NamedChain::Rinkeby | + NamedChain::Goerli | + NamedChain::Kovan | + NamedChain::Holesky | + NamedChain::Sepolia + ) + ) + } + /// Returns `true` if this chain contains Optimism configuration. #[inline] pub fn is_optimism(&self) -> bool { @@ -798,19 +789,19 @@ impl ChainSpec { /// Convenience method to check if [Hardfork::Shanghai] is active at a given timestamp. #[inline] pub fn is_shanghai_active_at_timestamp(&self, timestamp: u64) -> bool { - self.fork_timestamps - .shanghai - .map(|shanghai| timestamp >= shanghai) - .unwrap_or_else(|| self.is_fork_active_at_timestamp(Hardfork::Shanghai, timestamp)) + self.is_fork_active_at_timestamp(Hardfork::Shanghai, timestamp) } /// Convenience method to check if [Hardfork::Cancun] is active at a given timestamp. #[inline] pub fn is_cancun_active_at_timestamp(&self, timestamp: u64) -> bool { - self.fork_timestamps - .cancun - .map(|cancun| timestamp >= cancun) - .unwrap_or_else(|| self.is_fork_active_at_timestamp(Hardfork::Cancun, timestamp)) + self.is_fork_active_at_timestamp(Hardfork::Cancun, timestamp) + } + + /// Convenience method to check if [Hardfork::Prague] is active at a given timestamp. + #[inline] + pub fn is_prague_active_at_timestamp(&self, timestamp: u64) -> bool { + self.is_fork_active_at_timestamp(Hardfork::Prague, timestamp) } /// Convenience method to check if [Hardfork::Byzantium] is active at a given block number. @@ -987,6 +978,9 @@ impl ChainSpec { impl From for ChainSpec { fn from(genesis: Genesis) -> Self { + #[cfg(feature = "optimism")] + let optimism_genesis_info = OptimismGenesisInfo::extract_from(&genesis); + // Block-based hardforks let hardfork_opts = [ (Hardfork::Homestead, genesis.config.homestead_block), @@ -1002,6 +996,8 @@ impl From for ChainSpec { (Hardfork::London, genesis.config.london_block), (Hardfork::ArrowGlacier, genesis.config.arrow_glacier_block), (Hardfork::GrayGlacier, genesis.config.gray_glacier_block), + #[cfg(feature = "optimism")] + (Hardfork::Bedrock, optimism_genesis_info.bedrock_block), ]; let mut hardforks = hardfork_opts .iter() @@ -1028,6 +1024,12 @@ impl From for ChainSpec { let time_hardfork_opts = [ (Hardfork::Shanghai, genesis.config.shanghai_time), (Hardfork::Cancun, genesis.config.cancun_time), + #[cfg(feature = "optimism")] + (Hardfork::Regolith, optimism_genesis_info.regolith_time), + #[cfg(feature = "optimism")] + (Hardfork::Ecotone, optimism_genesis_info.ecotone_time), + #[cfg(feature = "optimism")] + (Hardfork::Canyon, optimism_genesis_info.canyon_time), ]; let time_hardforks = time_hardfork_opts @@ -1043,7 +1045,6 @@ impl From for ChainSpec { chain: genesis.config.chain_id.into(), genesis, genesis_hash: None, - fork_timestamps: ForkTimestamps::from_hardforks(&hardforks), hardforks, paris_block_and_final_difficulty, deposit_contract: None, @@ -1052,83 +1053,6 @@ impl From for ChainSpec { } } -/// Various timestamps of forks -#[derive(Debug, Clone, Default, Eq, PartialEq)] -pub struct ForkTimestamps { - /// The timestamp of the shanghai fork - pub shanghai: Option, - /// The timestamp of the cancun fork - pub cancun: Option, - /// The timestamp of the Regolith fork - #[cfg(feature = "optimism")] - pub regolith: Option, - /// The timestamp of the Canyon fork - #[cfg(feature = "optimism")] - pub canyon: Option, - /// The timestamp of the Ecotone fork - #[cfg(feature = "optimism")] - pub ecotone: Option, -} - -impl ForkTimestamps { - /// Creates a new [`ForkTimestamps`] from the given hardforks by extracting the timestamps - fn from_hardforks(forks: &BTreeMap) -> Self { - let mut timestamps = ForkTimestamps::default(); - if let Some(shanghai) = forks.get(&Hardfork::Shanghai).and_then(|f| f.as_timestamp()) { - timestamps = timestamps.shanghai(shanghai); - } - if let Some(cancun) = forks.get(&Hardfork::Cancun).and_then(|f| f.as_timestamp()) { - timestamps = timestamps.cancun(cancun); - } - #[cfg(feature = "optimism")] - { - if let Some(regolith) = forks.get(&Hardfork::Regolith).and_then(|f| f.as_timestamp()) { - timestamps = timestamps.regolith(regolith); - } - if let Some(canyon) = forks.get(&Hardfork::Canyon).and_then(|f| f.as_timestamp()) { - timestamps = timestamps.canyon(canyon); - } - if let Some(ecotone) = forks.get(&Hardfork::Ecotone).and_then(|f| f.as_timestamp()) { - timestamps = timestamps.ecotone(ecotone); - } - } - timestamps - } - - /// Sets the given shanghai timestamp - pub fn shanghai(mut self, shanghai: u64) -> Self { - self.shanghai = Some(shanghai); - self - } - - /// Sets the given cancun timestamp - pub fn cancun(mut self, cancun: u64) -> Self { - self.cancun = Some(cancun); - self - } - - /// Sets the given regolith timestamp - #[cfg(feature = "optimism")] - pub fn regolith(mut self, regolith: u64) -> Self { - self.regolith = Some(regolith); - self - } - - /// Sets the given canyon timestamp - #[cfg(feature = "optimism")] - pub fn canyon(mut self, canyon: u64) -> Self { - self.canyon = Some(canyon); - self - } - - /// Sets the given ecotone timestamp - #[cfg(feature = "optimism")] - pub fn ecotone(mut self, ecotone: u64) -> Self { - self.ecotone = Some(ecotone); - self - } -} - /// A helper type for compatibility with geth's config #[derive(Debug, Clone, Deserialize, Serialize)] #[serde(untagged)] @@ -1366,7 +1290,6 @@ impl ChainSpecBuilder { chain: self.chain.expect("The chain is required"), genesis: self.genesis.expect("The genesis is required"), genesis_hash: None, - fork_timestamps: ForkTimestamps::from_hardforks(&self.hardforks), hardforks: self.hardforks, paris_block_and_final_difficulty, deposit_contract: None, @@ -1671,6 +1594,42 @@ impl DepositContract { } } +#[cfg(feature = "optimism")] +struct OptimismGenesisInfo { + bedrock_block: Option, + regolith_time: Option, + ecotone_time: Option, + canyon_time: Option, +} + +#[cfg(feature = "optimism")] +impl OptimismGenesisInfo { + fn extract_from(genesis: &Genesis) -> Self { + Self { + bedrock_block: genesis + .config + .extra_fields + .get("bedrockBlock") + .and_then(|value| value.as_u64()), + regolith_time: genesis + .config + .extra_fields + .get("regolithTime") + .and_then(|value| value.as_u64()), + ecotone_time: genesis + .config + .extra_fields + .get("ecotoneTime") + .and_then(|value| value.as_u64()), + canyon_time: genesis + .config + .extra_fields + .get("canyonTime") + .and_then(|value| value.as_u64()), + } + } +} + #[cfg(test)] mod tests { use super::*; @@ -1751,36 +1710,6 @@ Post-merge hard forks (timestamp based): ); } - // Tests that the ForkTimestamps are correctly set up. - #[test] - fn test_fork_timestamps() { - let spec = ChainSpec::builder().chain(Chain::mainnet()).genesis(Genesis::default()).build(); - assert!(spec.fork_timestamps.shanghai.is_none()); - - let spec = ChainSpec::builder() - .chain(Chain::mainnet()) - .genesis(Genesis::default()) - .with_fork(Hardfork::Shanghai, ForkCondition::Timestamp(1337)) - .build(); - assert_eq!(spec.fork_timestamps.shanghai, Some(1337)); - assert!(spec.is_shanghai_active_at_timestamp(1337)); - assert!(!spec.is_shanghai_active_at_timestamp(1336)); - } - - // Tests that all predefined timestamps are correctly set up in the chainspecs - #[test] - fn test_predefined_chain_spec_fork_timestamps() { - let predefined = [&MAINNET, &SEPOLIA, &HOLESKY, &GOERLI]; - - for spec in predefined.iter() { - let expected_timestamp_forks = &spec.fork_timestamps; - let got_timestamp_forks = ForkTimestamps::from_hardforks(&spec.hardforks); - - // make sure they're the same - assert_eq!(expected_timestamp_forks, &got_timestamp_forks); - } - } - // Tests that we skip any fork blocks in block #0 (the genesis ruleset) #[test] fn ignores_genesis_fork_blocks() { @@ -2383,6 +2312,25 @@ Post-merge hard forks (timestamp based): ); } + #[cfg(feature = "optimism")] + #[test] + fn op_mainnet_forkids() { + test_fork_ids( + &OP_MAINNET, + &[ + ( + Head { number: 0, ..Default::default() }, + ForkId { hash: ForkHash([0xca, 0xf5, 0x17, 0xed]), next: 3950000 }, + ), + // TODO: complete these, see https://github.com/paradigmxyz/reth/issues/8012 + ( + Head { number: 105235063, timestamp: 1710374401, ..Default::default() }, + ForkId { hash: ForkHash([0x19, 0xda, 0x4c, 0x52]), next: 0 }, + ), + ], + ); + } + #[cfg(feature = "optimism")] #[test] fn base_sepolia_forkids() { @@ -3252,4 +3200,54 @@ Post-merge hard forks (timestamp based): fn is_bedrock_active() { assert!(!OP_MAINNET.is_bedrock_active_at_block(1)) } + + #[cfg(feature = "optimism")] + #[test] + fn parse_optimism_hardforks() { + let geth_genesis = r#" + { + "config": { + "bedrockBlock": 10, + "regolithTime": 20, + "ecotoneTime": 30, + "canyonTime": 40, + "optimism": { + "eip1559Elasticity": 50, + "eip1559Denominator": 60, + "eip1559DenominatorCanyon": 70 + } + } + } + "#; + let genesis: Genesis = serde_json::from_str(geth_genesis).unwrap(); + + let actual_bedrock_block = genesis.config.extra_fields.get("bedrockBlock"); + assert_eq!(actual_bedrock_block, Some(serde_json::Value::from(10)).as_ref()); + let actual_regolith_timestamp = genesis.config.extra_fields.get("regolithTime"); + assert_eq!(actual_regolith_timestamp, Some(serde_json::Value::from(20)).as_ref()); + let actual_ecotone_timestamp = genesis.config.extra_fields.get("ecotoneTime"); + assert_eq!(actual_ecotone_timestamp, Some(serde_json::Value::from(30)).as_ref()); + let actual_canyon_timestamp = genesis.config.extra_fields.get("canyonTime"); + assert_eq!(actual_canyon_timestamp, Some(serde_json::Value::from(40)).as_ref()); + + let optimism_object = genesis.config.extra_fields.get("optimism").unwrap(); + assert_eq!( + optimism_object, + &serde_json::json!({ + "eip1559Elasticity": 50, + "eip1559Denominator": 60, + "eip1559DenominatorCanyon": 70 + }) + ); + let chain_spec: ChainSpec = genesis.into(); + assert!(!chain_spec.is_fork_active_at_block(Hardfork::Bedrock, 0)); + assert!(!chain_spec.is_fork_active_at_timestamp(Hardfork::Regolith, 0)); + assert!(!chain_spec.is_fork_active_at_timestamp(Hardfork::Ecotone, 0)); + assert!(!chain_spec.is_fork_active_at_timestamp(Hardfork::Canyon, 0)); + + assert!(chain_spec.is_fork_active_at_block(Hardfork::Bedrock, 10)); + assert!(chain_spec.is_fork_active_at_timestamp(Hardfork::Regolith, 20)); + assert!(chain_spec.is_fork_active_at_timestamp(Hardfork::Ecotone, 30)); + assert!(chain_spec.is_fork_active_at_timestamp(Hardfork::Canyon, 40)); + } } diff --git a/crates/primitives/src/eip4844.rs b/crates/primitives/src/eip4844.rs index 4f65cc7ee..0d228528f 100644 --- a/crates/primitives/src/eip4844.rs +++ b/crates/primitives/src/eip4844.rs @@ -1,21 +1,9 @@ //! Helpers for working with EIP-4844 blob fee. -#[cfg(feature = "c-kzg")] -use crate::{constants::eip4844::VERSIONED_HASH_VERSION_KZG, B256}; -#[cfg(feature = "c-kzg")] -use sha2::{Digest, Sha256}; - // re-exports from revm for calculating blob fee pub use crate::revm_primitives::{ calc_blob_gasprice, calc_excess_blob_gas as calculate_excess_blob_gas, }; -/// Calculates the versioned hash for a KzgCommitment -/// -/// Specified in [EIP-4844](https://eips.ethereum.org/EIPS/eip-4844#header-extension) -#[cfg(feature = "c-kzg")] -pub fn kzg_to_versioned_hash(commitment: c_kzg::KzgCommitment) -> B256 { - let mut res = Sha256::digest(commitment.as_slice()); - res[0] = VERSIONED_HASH_VERSION_KZG; - B256::new(res.into()) -} +#[doc(inline)] +pub use alloy_eips::eip4844::kzg_to_versioned_hash; diff --git a/crates/primitives/src/genesis.rs b/crates/primitives/src/genesis.rs index 52b24facb..c81d9488d 100644 --- a/crates/primitives/src/genesis.rs +++ b/crates/primitives/src/genesis.rs @@ -3,212 +3,3 @@ // re-export genesis types #[doc(inline)] pub use alloy_genesis::*; - -#[cfg(any(test, feature = "test-utils"))] -pub use allocator::GenesisAllocator; - -#[cfg(any(test, feature = "test-utils"))] -mod allocator { - use crate::{public_key_to_address, Address, Bytes, B256, U256}; - use alloy_genesis::GenesisAccount; - use secp256k1::{ - rand::{thread_rng, RngCore}, - KeyPair, Secp256k1, - }; - use std::collections::{hash_map::Entry, BTreeMap, HashMap}; - - /// This helps create a custom genesis alloc by making it easy to add funded accounts with known - /// signers to the genesis block. - /// - /// # Example - /// ``` - /// # use reth_primitives::{ genesis::GenesisAllocator, Address, U256, hex, Bytes}; - /// # use std::str::FromStr; - /// let mut allocator = GenesisAllocator::default(); - /// - /// // This will add a genesis account to the alloc builder, with the provided balance. The - /// // signer for the account will be returned. - /// let (_signer, _addr) = allocator.new_funded_account(U256::from(100_000_000_000_000_000u128)); - /// - /// // You can also provide code for the account. - /// let code = Bytes::from_str("0x1234").unwrap(); - /// let (_second_signer, _second_addr) = - /// allocator.new_funded_account_with_code(U256::from(100_000_000_000_000_000u128), code); - /// - /// // You can also add an account with a specific address. - /// // This will not return a signer, since the address is provided by the user and the signer - /// // may be unknown. - /// let addr = "0Ac1dF02185025F65202660F8167210A80dD5086".parse::
().unwrap(); - /// allocator.add_funded_account_with_address(addr, U256::from(100_000_000_000_000_000u128)); - /// - /// // Once you're done adding accounts, you can build the alloc. - /// let alloc = allocator.build(); - /// ``` - #[derive(Debug)] - pub struct GenesisAllocator<'a> { - /// The genesis alloc to be built. - alloc: HashMap, - /// The rng to use for generating key pairs. - rng: Box, - } - - impl<'a> GenesisAllocator<'a> { - /// Initialize a new alloc builder with the provided rng. - pub fn new_with_rng(rng: &'a mut R) -> Self - where - R: RngCore + std::fmt::Debug, - { - Self { alloc: HashMap::default(), rng: Box::new(rng) } - } - - /// Use the provided rng for generating key pairs. - pub fn with_rng(mut self, rng: &'a mut R) -> Self - where - R: RngCore + std::fmt::Debug, - { - self.rng = Box::new(rng); - self - } - - /// Add a funded account to the genesis alloc. - /// - /// Returns the key pair for the account and the account's address. - pub fn new_funded_account(&mut self, balance: U256) -> (KeyPair, Address) { - let secp = Secp256k1::new(); - let pair = KeyPair::new(&secp, &mut self.rng); - let address = public_key_to_address(pair.public_key()); - - self.alloc.insert(address, GenesisAccount::default().with_balance(balance)); - - (pair, address) - } - - /// Add a funded account to the genesis alloc with the provided code. - /// - /// Returns the key pair for the account and the account's address. - pub fn new_funded_account_with_code( - &mut self, - balance: U256, - code: Bytes, - ) -> (KeyPair, Address) { - let secp = Secp256k1::new(); - let pair = KeyPair::new(&secp, &mut self.rng); - let address = public_key_to_address(pair.public_key()); - - self.alloc.insert( - address, - GenesisAccount::default().with_balance(balance).with_code(Some(code)), - ); - - (pair, address) - } - - /// Adds a funded account to the genesis alloc with the provided storage. - /// - /// Returns the key pair for the account and the account's address. - pub fn new_funded_account_with_storage( - &mut self, - balance: U256, - storage: BTreeMap, - ) -> (KeyPair, Address) { - let secp = Secp256k1::new(); - let pair = KeyPair::new(&secp, &mut self.rng); - let address = public_key_to_address(pair.public_key()); - - self.alloc.insert( - address, - GenesisAccount::default().with_balance(balance).with_storage(Some(storage)), - ); - - (pair, address) - } - - /// Adds an account with code and storage to the genesis alloc. - /// - /// Returns the key pair for the account and the account's address. - pub fn new_account_with_code_and_storage( - &mut self, - code: Bytes, - storage: BTreeMap, - ) -> (KeyPair, Address) { - let secp = Secp256k1::new(); - let pair = KeyPair::new(&secp, &mut self.rng); - let address = public_key_to_address(pair.public_key()); - - self.alloc.insert( - address, - GenesisAccount::default().with_code(Some(code)).with_storage(Some(storage)), - ); - - (pair, address) - } - - /// Adds an account with code to the genesis alloc. - /// - /// Returns the key pair for the account and the account's address. - pub fn new_account_with_code(&mut self, code: Bytes) -> (KeyPair, Address) { - let secp = Secp256k1::new(); - let pair = KeyPair::new(&secp, &mut self.rng); - let address = public_key_to_address(pair.public_key()); - - self.alloc.insert(address, GenesisAccount::default().with_code(Some(code))); - - (pair, address) - } - - /// Add a funded account to the genesis alloc with the provided address. - /// - /// Neither the key pair nor the account will be returned, since the address is provided by - /// the user and the signer may be unknown. - pub fn add_funded_account_with_address(&mut self, address: Address, balance: U256) { - self.alloc.insert(address, GenesisAccount::default().with_balance(balance)); - } - - /// Adds the given [GenesisAccount] to the genesis alloc. - /// - /// Returns the key pair for the account and the account's address. - pub fn add_account(&mut self, account: GenesisAccount) -> Address { - let secp = Secp256k1::new(); - let pair = KeyPair::new(&secp, &mut self.rng); - let address = public_key_to_address(pair.public_key()); - - self.alloc.insert(address, account); - - address - } - - /// Gets the account for the provided address. - /// - /// If it does not exist, this returns `None`. - pub fn get_account(&self, address: &Address) -> Option<&GenesisAccount> { - self.alloc.get(address) - } - - /// Gets a mutable version of the account for the provided address, if it exists. - pub fn get_account_mut(&mut self, address: &Address) -> Option<&mut GenesisAccount> { - self.alloc.get_mut(address) - } - - /// Gets an [Entry] for the provided address. - pub fn account_entry(&mut self, address: Address) -> Entry<'_, Address, GenesisAccount> { - self.alloc.entry(address) - } - - /// Build the genesis alloc. - pub fn build(self) -> HashMap { - self.alloc - } - } - - impl Default for GenesisAllocator<'_> { - fn default() -> Self { - Self { alloc: HashMap::default(), rng: Box::new(thread_rng()) } - } - } - - /// Helper trait that encapsulates [RngCore], and [Debug](std::fmt::Debug) to get around rules - /// for auto traits (Opt-in built-in traits). - trait RngDebug: RngCore + std::fmt::Debug {} - - impl RngDebug for T where T: RngCore + std::fmt::Debug {} -} diff --git a/crates/primitives/src/header.rs b/crates/primitives/src/header.rs index 899fcb368..d0bd5baf8 100644 --- a/crates/primitives/src/header.rs +++ b/crates/primitives/src/header.rs @@ -16,7 +16,6 @@ use bytes::BufMut; #[cfg(any(test, feature = "arbitrary"))] use proptest::prelude::*; use reth_codecs::{add_arbitrary_tests, derive_arbitrary, main_codec, Compact}; -use reth_rpc_types::ConversionError; use serde::{Deserialize, Serialize}; use std::{mem, ops::Deref}; @@ -486,10 +485,13 @@ impl Decodable for Header { } } -impl TryFrom for Header { - type Error = ConversionError; +#[cfg(feature = "alloy-compat")] +impl TryFrom for Header { + type Error = alloy_rpc_types::ConversionError; + + fn try_from(header: alloy_rpc_types::Header) -> Result { + use alloy_rpc_types::ConversionError; - fn try_from(header: reth_rpc_types::Header) -> Result { Ok(Self { base_fee_per_gas: header .base_fee_per_gas @@ -776,6 +778,17 @@ impl SealedHeader { } // timestamp in past check + #[cfg(feature = "optimism")] + if chain_spec.is_bedrock_active_at_block(self.header.number) && + self.header.is_timestamp_in_past(parent.timestamp) + { + return Err(HeaderValidationError::TimestampIsInPast { + parent_timestamp: parent.timestamp, + timestamp: self.timestamp, + }) + } + + #[cfg(not(feature = "optimism"))] if self.header.is_timestamp_in_past(parent.timestamp) { return Err(HeaderValidationError::TimestampIsInPast { parent_timestamp: parent.timestamp, @@ -786,16 +799,14 @@ impl SealedHeader { // TODO Check difficulty increment between parent and self // Ace age did increment it by some formula that we need to follow. - cfg_if::cfg_if! { - if #[cfg(feature = "optimism")] { - // On Optimism, the gas limit can adjust instantly, so we skip this check - // if the optimism feature is enabled in the chain spec. - if !chain_spec.is_optimism() { - self.validate_gas_limit(parent, chain_spec)?; - } - } else { + if cfg!(feature = "optimism") { + // On Optimism, the gas limit can adjust instantly, so we skip this check + // if the optimism feature is enabled in the chain spec. + if !chain_spec.is_optimism() { self.validate_gas_limit(parent, chain_spec)?; } + } else { + self.validate_gas_limit(parent, chain_spec)?; } // EIP-1559 check base fee diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index 167a64545..27c66e69e 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -4,6 +4,7 @@ //! //! ## Feature Flags //! +//! - `alloy-compat`: Adds compatibility conversions for certain alloy types. //! - `arbitrary`: Adds `proptest` and `arbitrary` support for primitive types. //! - `test-utils`: Export utilities for testing @@ -33,13 +34,11 @@ mod header; mod integer_list; mod log; mod net; -mod peer; pub mod proofs; mod prune; mod receipt; /// Helpers for working with revm pub mod revm; -pub mod serde_helper; pub mod stage; pub mod static_file; mod storage; @@ -56,9 +55,9 @@ pub use block::{ ForkBlock, RpcBlockHash, SealedBlock, SealedBlockWithSenders, }; pub use chain::{ - AllGenesisFormats, BaseFeeParams, BaseFeeParamsKind, Chain, ChainInfo, ChainSpec, - ChainSpecBuilder, DisplayHardforks, ForkBaseFeeParams, ForkCondition, ForkTimestamps, - NamedChain, DEV, GOERLI, HOLESKY, MAINNET, SEPOLIA, + AllGenesisFormats, BaseFeeParams, BaseFeeParamsKind, Chain, ChainInfo, ChainKind, ChainSpec, + ChainSpecBuilder, DisplayHardforks, ForkBaseFeeParams, ForkCondition, NamedChain, DEV, GOERLI, + HOLESKY, MAINNET, SEPOLIA, }; #[cfg(feature = "zstd-codec")] pub use compression::*; @@ -77,7 +76,6 @@ pub use net::{ NodeRecordParseError, GOERLI_BOOTNODES, HOLESKY_BOOTNODES, MAINNET_BOOTNODES, SEPOLIA_BOOTNODES, }; -pub use peer::{id2pk, pk2id, AnyNode, PeerId, WithPeerId}; pub use prune::{ PruneCheckpoint, PruneInterruptReason, PruneLimiter, PruneMode, PruneModes, PruneProgress, PrunePurpose, PruneSegment, PruneSegmentError, ReceiptsLogPruneConfig, @@ -87,17 +85,18 @@ pub use receipt::{Receipt, ReceiptWithBloom, ReceiptWithBloomRef, Receipts}; pub use static_file::StaticFileSegment; pub use storage::StorageEntry; -#[cfg(feature = "c-kzg")] pub use transaction::{ - BlobTransaction, BlobTransactionSidecar, BlobTransactionValidationError, - FromRecoveredPooledTransaction, PooledTransactionsElement, - PooledTransactionsElementEcRecovered, + BlobTransaction, BlobTransactionSidecar, FromRecoveredPooledTransaction, + PooledTransactionsElement, PooledTransactionsElementEcRecovered, }; +#[cfg(feature = "c-kzg")] +pub use transaction::BlobTransactionValidationError; + pub use transaction::{ util::secp256k1::{public_key_to_address, recover_signer_unchecked, sign_message}, AccessList, AccessListItem, IntoRecoveredTransaction, InvalidTransactionError, Signature, - Transaction, TransactionKind, TransactionMeta, TransactionSigned, TransactionSignedEcRecovered, + Transaction, TransactionMeta, TransactionSigned, TransactionSignedEcRecovered, TransactionSignedNoHash, TryFromRecoveredTransaction, TxEip1559, TxEip2930, TxEip4844, TxHashOrNumber, TxLegacy, TxType, EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, LEGACY_TX_TYPE_ID, @@ -113,7 +112,7 @@ pub use alloy_primitives::{ eip191_hash_message, hex, hex_literal, keccak256, ruint, utils::format_ether, Address, BlockHash, BlockNumber, Bloom, BloomInput, Bytes, ChainId, Selector, StorageKey, - StorageValue, TxHash, TxIndex, TxNumber, B128, B256, B512, B64, U128, U256, U64, U8, + StorageValue, TxHash, TxIndex, TxKind, TxNumber, B128, B256, B512, B64, U128, U256, U64, U8, }; pub use reth_ethereum_forks::*; pub use revm_primitives::{self, JumpMap}; @@ -146,8 +145,8 @@ mod optimism { pub use crate::{ chain::{BASE_MAINNET, BASE_SEPOLIA, OP_MAINNET, OP_SEPOLIA}, net::{ - base_nodes, base_testnet_nodes, op_nodes, op_testnet_nodes, BASE_BOOTNODES, - BASE_TESTNET_BOOTNODES, OP_BOOTNODES, OP_TESTNET_BOOTNODES, + base_nodes, base_testnet_nodes, op_nodes, op_testnet_nodes, OP_BOOTNODES, + OP_TESTNET_BOOTNODES, }, transaction::{TxDeposit, DEPOSIT_TX_TYPE_ID}, }; diff --git a/crates/primitives/src/net.rs b/crates/primitives/src/net.rs index c8ff2a3cc..dcb10545f 100644 --- a/crates/primitives/src/net.rs +++ b/crates/primitives/src/net.rs @@ -1,4 +1,4 @@ -pub use reth_rpc_types::{NodeRecord, NodeRecordParseError}; +pub use reth_network_types::{NodeRecord, NodeRecordParseError}; // Ethereum bootnodes come from // OP bootnodes come from @@ -43,24 +43,13 @@ pub static HOLESKY_BOOTNODES : [&str; 2] = [ ]; #[cfg(feature = "optimism")] -/// OP Mainnet Bootnodes -pub static OP_BOOTNODES: [&str; 3] = [ +/// OP stack mainnet boot nodes. +pub static OP_BOOTNODES: &[&str] = &[ + // OP Labs "enode://ca2774c3c401325850b2477fd7d0f27911efbf79b1e8b335066516e2bd8c4c9e0ba9696a94b1cb030a88eac582305ff55e905e64fb77fe0edcd70a4e5296d3ec@34.65.175.185:30305", "enode://dd751a9ef8912be1bfa7a5e34e2c3785cc5253110bd929f385e07ba7ac19929fb0e0c5d93f77827291f4da02b2232240fbc47ea7ce04c46e333e452f8656b667@34.65.107.0:30305", "enode://c5d289b56a77b6a2342ca29956dfd07aadf45364dde8ab20d1dc4efd4d1bc6b4655d902501daea308f4d8950737a4e93a4dfedd17b49cd5760ffd127837ca965@34.65.202.239:30305", -]; - -#[cfg(feature = "optimism")] -/// OP Testnet Bootnodes -pub static OP_TESTNET_BOOTNODES: [&str; 3] = [ - "enode://2bd2e657bb3c8efffb8ff6db9071d9eb7be70d7c6d7d980ff80fc93b2629675c5f750bc0a5ef27cd788c2e491b8795a7e9a4a6e72178c14acc6753c0e5d77ae4@34.65.205.244:30305", - "enode://db8e1cab24624cc62fc35dbb9e481b88a9ef0116114cd6e41034c55b5b4f18755983819252333509bd8e25f6b12aadd6465710cd2e956558faf17672cce7551f@34.65.173.88:30305", - "enode://bfda2e0110cfd0f4c9f7aa5bf5ec66e6bd18f71a2db028d36b8bf8b0d6fdb03125c1606a6017b31311d96a36f5ef7e1ad11604d7a166745e6075a715dfa67f8a@34.65.229.245:30305", -]; - -#[cfg(feature = "optimism")] -/// Base Mainnet Bootnodes -pub static BASE_BOOTNODES: [&str; 5] = [ + // Base "enode://87a32fd13bd596b2ffca97020e31aef4ddcc1bbd4b95bb633d16c1329f654f34049ed240a36b449fda5e5225d70fe40bc667f53c304b71f8e68fc9d448690b51@3.231.138.188:30301", "enode://ca21ea8f176adb2e229ce2d700830c844af0ea941a1d8152a9513b966fe525e809c3a6c73a2c18a12b74ed6ec4380edf91662778fe0b79f6a591236e49e176f9@184.72.129.189:30301", "enode://acf4507a211ba7c1e52cdf4eef62cdc3c32e7c9c47998954f7ba024026f9a6b2150cd3f0b734d9c78e507ab70d59ba61dfe5c45e1078c7ad0775fb251d7735a2@3.220.145.177:30301", @@ -69,8 +58,13 @@ pub static BASE_BOOTNODES: [&str; 5] = [ ]; #[cfg(feature = "optimism")] -/// Base Testnet Bootnodes -pub static BASE_TESTNET_BOOTNODES: [&str; 2] = [ +/// OP stack testnet boot nodes. +pub static OP_TESTNET_BOOTNODES: &[&str] = &[ + // OP Labs + "enode://2bd2e657bb3c8efffb8ff6db9071d9eb7be70d7c6d7d980ff80fc93b2629675c5f750bc0a5ef27cd788c2e491b8795a7e9a4a6e72178c14acc6753c0e5d77ae4@34.65.205.244:30305", + "enode://db8e1cab24624cc62fc35dbb9e481b88a9ef0116114cd6e41034c55b5b4f18755983819252333509bd8e25f6b12aadd6465710cd2e956558faf17672cce7551f@34.65.173.88:30305", + "enode://bfda2e0110cfd0f4c9f7aa5bf5ec66e6bd18f71a2db028d36b8bf8b0d6fdb03125c1606a6017b31311d96a36f5ef7e1ad11604d7a166745e6075a715dfa67f8a@34.65.229.245:30305", + // Base "enode://548f715f3fc388a7c917ba644a2f16270f1ede48a5d88a4d14ea287cc916068363f3092e39936f1a3e7885198bef0e5af951f1d7b1041ce8ba4010917777e71f@18.210.176.114:30301", "enode://6f10052847a966a725c9f4adf6716f9141155b99a0fb487fea3f51498f4c2a2cb8d534e680ee678f9447db85b93ff7c74562762c3714783a7233ac448603b25f@107.21.251.55:30301", ]; @@ -98,25 +92,25 @@ pub fn holesky_nodes() -> Vec { #[cfg(feature = "optimism")] /// Returns parsed op-stack mainnet nodes pub fn op_nodes() -> Vec { - parse_nodes(&OP_BOOTNODES[..]) + parse_nodes(OP_BOOTNODES) } #[cfg(feature = "optimism")] /// Returns parsed op-stack testnet nodes pub fn op_testnet_nodes() -> Vec { - parse_nodes(&OP_TESTNET_BOOTNODES[..]) + parse_nodes(OP_TESTNET_BOOTNODES) } #[cfg(feature = "optimism")] /// Returns parsed op-stack base mainnet nodes pub fn base_nodes() -> Vec { - parse_nodes(&BASE_BOOTNODES[..]) + parse_nodes(OP_BOOTNODES) } #[cfg(feature = "optimism")] /// Returns parsed op-stack base testnet nodes pub fn base_testnet_nodes() -> Vec { - parse_nodes(&BASE_TESTNET_BOOTNODES[..]) + parse_nodes(OP_TESTNET_BOOTNODES) } /// Parses all the nodes @@ -126,15 +120,10 @@ pub fn parse_nodes(nodes: impl IntoIterator>) -> Vec(&node).expect("couldn't serialize"); assert_eq!(ser, "\"enode://6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0@10.3.58.6:30303?discport=30301\"") @@ -252,7 +241,7 @@ mod tests { address: IpAddr::V4([10, 3, 58, 6].into()), tcp_port: 30303u16, udp_port: 30301u16, - id: PeerId::from_str("6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0").unwrap(), + id: "6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0".parse().unwrap(), }) } } diff --git a/crates/primitives/src/prune/mode.rs b/crates/primitives/src/prune/mode.rs index c32f66d35..3454573b9 100644 --- a/crates/primitives/src/prune/mode.rs +++ b/crates/primitives/src/prune/mode.rs @@ -36,6 +36,7 @@ impl PruneMode { PruneMode::Distance(distance) if *distance >= segment.min_blocks(purpose) => { Some((tip - distance, *self)) } + PruneMode::Before(n) if *n == tip + 1 && purpose.is_static_file() => Some((tip, *self)), PruneMode::Before(n) if *n > tip => None, // Nothing to prune yet PruneMode::Before(n) if tip - n >= segment.min_blocks(purpose) => Some((n - 1, *self)), _ => return Err(PruneSegmentError::Configuration(segment)), diff --git a/crates/primitives/src/receipt.rs b/crates/primitives/src/receipt.rs index 2a25b2de8..63955a1d1 100644 --- a/crates/primitives/src/receipt.rs +++ b/crates/primitives/src/receipt.rs @@ -2,7 +2,7 @@ use crate::compression::{RECEIPT_COMPRESSOR, RECEIPT_DECOMPRESSOR}; use crate::{logs_bloom, Bloom, Bytes, PruneSegmentError, TxType, B256}; use alloy_primitives::Log; -use alloy_rlp::{length_of_length, Decodable, Encodable}; +use alloy_rlp::{length_of_length, Decodable, Encodable, RlpDecodable, RlpEncodable}; use bytes::{Buf, BufMut}; #[cfg(any(test, feature = "arbitrary"))] use proptest::strategy::Strategy; @@ -18,7 +18,8 @@ use std::{ #[cfg_attr(feature = "zstd-codec", main_codec(no_arbitrary, zstd))] #[cfg_attr(not(feature = "zstd-codec"), main_codec(no_arbitrary))] #[add_arbitrary_tests] -#[derive(Clone, Debug, PartialEq, Eq, Default)] +#[derive(Clone, Debug, PartialEq, Eq, Default, RlpEncodable, RlpDecodable)] +#[rlp(trailing)] pub struct Receipt { /// Receipt type. pub tx_type: TxType, diff --git a/crates/primitives/src/revm/compat.rs b/crates/primitives/src/revm/compat.rs index 6c9474f7c..972770882 100644 --- a/crates/primitives/src/revm/compat.rs +++ b/crates/primitives/src/revm/compat.rs @@ -1,4 +1,4 @@ -use crate::{revm_primitives::AccountInfo, Account, Address, TransactionKind, KECCAK_EMPTY, U256}; +use crate::{revm_primitives::AccountInfo, Account, Address, TxKind, KECCAK_EMPTY, U256}; use revm::{ interpreter::gas::validate_initial_tx_gas, primitives::{MergeSpec, ShanghaiSpec}, @@ -34,7 +34,7 @@ pub fn into_revm_acc(reth_acc: Account) -> AccountInfo { #[inline] pub fn calculate_intrinsic_gas_after_merge( input: &[u8], - kind: &TransactionKind, + kind: &TxKind, access_list: &[(Address, Vec)], is_shanghai: bool, ) -> u64 { diff --git a/crates/primitives/src/revm/env.rs b/crates/primitives/src/revm/env.rs index edfc07f80..b13a7018f 100644 --- a/crates/primitives/src/revm/env.rs +++ b/crates/primitives/src/revm/env.rs @@ -2,8 +2,8 @@ use crate::{ constants::{BEACON_ROOTS_ADDRESS, SYSTEM_ADDRESS}, recover_signer_unchecked, revm_primitives::{BlockEnv, Env, TransactTo, TxEnv}, - Address, Bytes, Chain, ChainSpec, Header, Transaction, TransactionKind, - TransactionSignedEcRecovered, B256, U256, + Address, Bytes, Chain, ChainSpec, Header, Transaction, TransactionSignedEcRecovered, TxKind, + B256, U256, }; #[cfg(feature = "optimism")] @@ -208,8 +208,8 @@ where tx_env.gas_price = U256::from(tx.gas_price); tx_env.gas_priority_fee = None; tx_env.transact_to = match tx.to { - TransactionKind::Call(to) => TransactTo::Call(to), - TransactionKind::Create => TransactTo::create(), + TxKind::Call(to) => TransactTo::Call(to), + TxKind::Create => TransactTo::create(), }; tx_env.value = tx.value; tx_env.data = tx.input.clone(); @@ -224,8 +224,8 @@ where tx_env.gas_price = U256::from(tx.gas_price); tx_env.gas_priority_fee = None; tx_env.transact_to = match tx.to { - TransactionKind::Call(to) => TransactTo::Call(to), - TransactionKind::Create => TransactTo::create(), + TxKind::Call(to) => TransactTo::Call(to), + TxKind::Create => TransactTo::create(), }; tx_env.value = tx.value; tx_env.data = tx.input.clone(); @@ -247,8 +247,8 @@ where tx_env.gas_price = U256::from(tx.max_fee_per_gas); tx_env.gas_priority_fee = Some(U256::from(tx.max_priority_fee_per_gas)); tx_env.transact_to = match tx.to { - TransactionKind::Call(to) => TransactTo::Call(to), - TransactionKind::Create => TransactTo::create(), + TxKind::Call(to) => TransactTo::Call(to), + TxKind::Create => TransactTo::create(), }; tx_env.value = tx.value; tx_env.data = tx.input.clone(); @@ -270,8 +270,8 @@ where tx_env.gas_price = U256::from(tx.max_fee_per_gas); tx_env.gas_priority_fee = Some(U256::from(tx.max_priority_fee_per_gas)); tx_env.transact_to = match tx.to { - TransactionKind::Call(to) => TransactTo::Call(to), - TransactionKind::Create => TransactTo::create(), + TxKind::Call(to) => TransactTo::Call(to), + TxKind::Create => TransactTo::create(), }; tx_env.value = tx.value; tx_env.data = tx.input.clone(); @@ -295,8 +295,8 @@ where tx_env.gas_price = U256::ZERO; tx_env.gas_priority_fee = None; match tx.to { - TransactionKind::Call(to) => tx_env.transact_to = TransactTo::Call(to), - TransactionKind::Create => tx_env.transact_to = TransactTo::create(), + TxKind::Call(to) => tx_env.transact_to = TransactTo::Call(to), + TxKind::Create => tx_env.transact_to = TransactTo::create(), } tx_env.value = tx.value; tx_env.data = tx.input.clone(); diff --git a/crates/primitives/src/serde_helper.rs b/crates/primitives/src/serde_helper.rs deleted file mode 100644 index b0d041fdc..000000000 --- a/crates/primitives/src/serde_helper.rs +++ /dev/null @@ -1,3 +0,0 @@ -//! [serde] utilities. - -pub use reth_rpc_types::serde_helpers::*; diff --git a/crates/primitives/src/stage/checkpoints.rs b/crates/primitives/src/stage/checkpoints.rs index 461e15401..d9c10605c 100644 --- a/crates/primitives/src/stage/checkpoints.rs +++ b/crates/primitives/src/stage/checkpoints.rs @@ -6,6 +6,8 @@ use bytes::Buf; use reth_codecs::{main_codec, Compact}; use std::ops::RangeInclusive; +use super::StageId; + /// Saves the progress of Merkle stage. #[derive(Default, Debug, Clone, PartialEq)] pub struct MerkleCheckpoint { @@ -201,6 +203,25 @@ impl StageCheckpoint { self } + /// Sets the block range, if checkpoint uses block range. + pub fn with_block_range(mut self, stage_id: &StageId, from: u64, to: u64) -> Self { + self.stage_checkpoint = Some(match stage_id { + StageId::Execution => StageUnitCheckpoint::Execution(ExecutionCheckpoint::default()), + StageId::AccountHashing => { + StageUnitCheckpoint::Account(AccountHashingCheckpoint::default()) + } + StageId::StorageHashing => { + StageUnitCheckpoint::Storage(StorageHashingCheckpoint::default()) + } + StageId::IndexStorageHistory | StageId::IndexAccountHistory => { + StageUnitCheckpoint::IndexHistory(IndexHistoryCheckpoint::default()) + } + _ => return self, + }); + _ = self.stage_checkpoint.map(|mut checkpoint| checkpoint.set_block_range(from, to)); + self + } + /// Get the underlying [`EntitiesCheckpoint`], if any, to determine the number of entities /// processed, and the number of total entities to process. pub fn entities(&self) -> Option { @@ -244,6 +265,25 @@ pub enum StageUnitCheckpoint { IndexHistory(IndexHistoryCheckpoint), } +impl StageUnitCheckpoint { + /// Sets the block range. Returns old block range, or `None` if checkpoint doesn't use block + /// range. + pub fn set_block_range(&mut self, from: u64, to: u64) -> Option { + match self { + Self::Account(AccountHashingCheckpoint { ref mut block_range, .. }) | + Self::Storage(StorageHashingCheckpoint { ref mut block_range, .. }) | + Self::Execution(ExecutionCheckpoint { ref mut block_range, .. }) | + Self::IndexHistory(IndexHistoryCheckpoint { ref mut block_range, .. }) => { + let old_range = *block_range; + *block_range = CheckpointBlockRange { from, to }; + + Some(old_range) + } + _ => None, + } + } +} + #[cfg(test)] impl Default for StageUnitCheckpoint { fn default() -> Self { diff --git a/crates/primitives/src/stage/id.rs b/crates/primitives/src/stage/id.rs index 2f5de34ee..2779c2608 100644 --- a/crates/primitives/src/stage/id.rs +++ b/crates/primitives/src/stage/id.rs @@ -53,6 +53,17 @@ impl StageId { StageId::Finish, ]; + /// Stages that require state. + pub const STATE_REQUIRED: [StageId; 7] = [ + StageId::Execution, + StageId::MerkleUnwind, + StageId::AccountHashing, + StageId::StorageHashing, + StageId::MerkleExecute, + StageId::IndexStorageHistory, + StageId::IndexAccountHistory, + ]; + /// Return stage id formatted as string. pub fn as_str(&self) -> &str { match self { @@ -79,6 +90,11 @@ impl StageId { matches!(self, StageId::Headers | StageId::Bodies) } + /// Returns `true` if it's [TransactionLookup](StageId::TransactionLookup) stage. + pub fn is_tx_lookup(&self) -> bool { + matches!(self, StageId::TransactionLookup) + } + /// Returns true indicating if it's the finish stage [StageId::Finish] pub fn is_finish(&self) -> bool { matches!(self, StageId::Finish) diff --git a/crates/primitives/src/stage/mod.rs b/crates/primitives/src/stage/mod.rs index ffe52554d..3c7c972bc 100644 --- a/crates/primitives/src/stage/mod.rs +++ b/crates/primitives/src/stage/mod.rs @@ -1,6 +1,7 @@ //! Staged sync primitives. mod id; +use crate::{BlockHash, BlockNumber}; pub use id::StageId; mod checkpoints; @@ -9,3 +10,46 @@ pub use checkpoints::{ HeadersCheckpoint, IndexHistoryCheckpoint, MerkleCheckpoint, StageCheckpoint, StageUnitCheckpoint, StorageHashingCheckpoint, }; + +/// Direction and target block for pipeline operations. +#[derive(Debug, Clone, Copy)] +pub enum PipelineTarget { + /// Target for forward synchronization, indicating a block hash to sync to. + Sync(BlockHash), + /// Target for backward unwinding, indicating a block number to unwind to. + Unwind(BlockNumber), +} + +impl PipelineTarget { + /// Returns the target block hash for forward synchronization, if applicable. + /// + /// # Returns + /// + /// - `Some(BlockHash)`: The target block hash for forward synchronization. + /// - `None`: If the target is for backward unwinding. + pub fn sync_target(self) -> Option { + match self { + PipelineTarget::Sync(hash) => Some(hash), + PipelineTarget::Unwind(_) => None, + } + } + + /// Returns the target block number for backward unwinding, if applicable. + /// + /// # Returns + /// + /// - `Some(BlockNumber)`: The target block number for backward unwinding. + /// - `None`: If the target is for forward synchronization. + pub fn unwind_target(self) -> Option { + match self { + PipelineTarget::Sync(_) => None, + PipelineTarget::Unwind(number) => Some(number), + } + } +} + +impl From for PipelineTarget { + fn from(hash: BlockHash) -> Self { + Self::Sync(hash) + } +} diff --git a/crates/primitives/src/transaction/eip1559.rs b/crates/primitives/src/transaction/eip1559.rs index 229da9983..5da0cd881 100644 --- a/crates/primitives/src/transaction/eip1559.rs +++ b/crates/primitives/src/transaction/eip1559.rs @@ -1,5 +1,5 @@ use super::access_list::AccessList; -use crate::{keccak256, Bytes, ChainId, Signature, TransactionKind, TxType, B256, U256}; +use crate::{keccak256, Bytes, ChainId, Signature, TxKind, TxType, B256, U256}; use alloy_rlp::{length_of_length, Decodable, Encodable, Header}; use bytes::BytesMut; use reth_codecs::{main_codec, Compact}; @@ -41,7 +41,7 @@ pub struct TxEip1559 { pub max_priority_fee_per_gas: u128, /// The 160-bit address of the message call’s recipient or, for a contract creation /// transaction, ∅, used here to denote the only member of B0 ; formally Tt. - pub to: TransactionKind, + pub to: TxKind, /// A scalar value equal to the number of Wei to /// be transferred to the message call’s recipient or, /// in the case of contract creation, as an endowment @@ -193,7 +193,7 @@ impl TxEip1559 { self.input.len() // input } - /// Encodes the legacy transaction in RLP for signing. + /// Encodes the EIP-1559 transaction in RLP for signing. /// /// This encodes the transaction as: /// `tx_type || rlp(chain_id, nonce, max_priority_fee_per_gas, max_fee_per_gas, gas_limit, to, @@ -226,7 +226,7 @@ impl TxEip1559 { mod tests { use super::TxEip1559; use crate::{ - transaction::{signature::Signature, TransactionKind}, + transaction::{signature::Signature, TxKind}, AccessList, Address, Transaction, TransactionSigned, B256, U256, }; use std::str::FromStr; @@ -243,7 +243,7 @@ mod tests { chain_id: 1, nonce: 0x42, gas_limit: 44386, - to: TransactionKind::Call( hex!("6069a6c32cf691f5982febae4faf8a6f3ab2f0f6").into()), + to: TxKind::Call(hex!("6069a6c32cf691f5982febae4faf8a6f3ab2f0f6").into()), value: U256::ZERO, input: hex!("a22cb4650000000000000000000000005eee75727d804a2b13038928d36f8b188945a57a0000000000000000000000000000000000000000000000000000000000000000").into(), max_fee_per_gas: 0x4a817c800, diff --git a/crates/primitives/src/transaction/eip2930.rs b/crates/primitives/src/transaction/eip2930.rs index fde594d7b..0604a7888 100644 --- a/crates/primitives/src/transaction/eip2930.rs +++ b/crates/primitives/src/transaction/eip2930.rs @@ -1,5 +1,5 @@ use super::access_list::AccessList; -use crate::{keccak256, Bytes, ChainId, Signature, TransactionKind, TxType, B256, U256}; +use crate::{keccak256, Bytes, ChainId, Signature, TxKind, TxType, B256, U256}; use alloy_rlp::{length_of_length, Decodable, Encodable, Header}; use bytes::BytesMut; use reth_codecs::{main_codec, Compact}; @@ -29,7 +29,7 @@ pub struct TxEip2930 { pub gas_limit: u64, /// The 160-bit address of the message call’s recipient or, for a contract creation /// transaction, ∅, used here to denote the only member of B0 ; formally Tt. - pub to: TransactionKind, + pub to: TxKind, /// A scalar value equal to the number of Wei to /// be transferred to the message call’s recipient or, /// in the case of contract creation, as an endowment @@ -157,7 +157,7 @@ impl TxEip2930 { TxType::Eip2930 } - /// Encodes the legacy transaction in RLP for signing. + /// Encodes the EIP-2930 transaction in RLP for signing. /// /// This encodes the transaction as: /// `tx_type || rlp(chain_id, nonce, gas_price, gas_limit, to, value, input, access_list)` @@ -189,7 +189,7 @@ impl TxEip2930 { mod tests { use super::TxEip2930; use crate::{ - transaction::{signature::Signature, TransactionKind}, + transaction::{signature::Signature, TxKind}, Address, Bytes, Transaction, TransactionSigned, U256, }; use alloy_rlp::{Decodable, Encodable}; @@ -202,7 +202,7 @@ mod tests { nonce: 0, gas_price: 1, gas_limit: 2, - to: TransactionKind::Create, + to: TxKind::Create, value: U256::from(3), input: Bytes::from(vec![1, 2]), access_list: Default::default(), @@ -225,7 +225,7 @@ mod tests { nonce: 0, gas_price: 1, gas_limit: 2, - to: TransactionKind::Call(Address::default()), + to: Address::default().into(), value: U256::from(3), input: Bytes::from(vec![1, 2]), access_list: Default::default(), diff --git a/crates/primitives/src/transaction/eip4844.rs b/crates/primitives/src/transaction/eip4844.rs index a24a87b11..f2130ce50 100644 --- a/crates/primitives/src/transaction/eip4844.rs +++ b/crates/primitives/src/transaction/eip4844.rs @@ -1,20 +1,14 @@ use super::access_list::AccessList; use crate::{ - constants::eip4844::DATA_GAS_PER_BLOB, keccak256, Bytes, ChainId, Signature, TransactionKind, - TxType, B256, U256, + constants::eip4844::DATA_GAS_PER_BLOB, keccak256, Bytes, ChainId, Signature, TxKind, TxType, + B256, U256, }; use alloy_rlp::{length_of_length, Decodable, Encodable, Header}; use reth_codecs::{main_codec, Compact}; use std::mem; #[cfg(feature = "c-kzg")] -use crate::eip4844::kzg_to_versioned_hash; -#[cfg(feature = "c-kzg")] -use crate::kzg::{self, KzgCommitment, KzgProof, KzgSettings}; -#[cfg(feature = "c-kzg")] -use crate::transaction::sidecar::*; -#[cfg(feature = "c-kzg")] -use std::ops::Deref; +use crate::kzg::KzgSettings; /// [EIP-4844 Blob Transaction](https://eips.ethereum.org/EIPS/eip-4844#blob-transaction) /// @@ -54,7 +48,7 @@ pub struct TxEip4844 { pub max_priority_fee_per_gas: u128, /// The 160-bit address of the message call’s recipient or, for a contract creation /// transaction, ∅, used here to denote the only member of B0 ; formally Tt. - pub to: TransactionKind, + pub to: TxKind, /// A scalar value equal to the number of Wei to /// be transferred to the message call’s recipient or, /// in the case of contract creation, as an endowment @@ -112,57 +106,16 @@ impl TxEip4844 { /// commitments, and proofs. Each blob data element is verified against its commitment and /// proof. /// - /// Returns [BlobTransactionValidationError::InvalidProof] if any blob KZG proof in the response + /// Returns `InvalidProof` if any blob KZG proof in the response /// fails to verify, or if the versioned hashes in the transaction do not match the actual /// commitment versioned hashes. #[cfg(feature = "c-kzg")] pub fn validate_blob( &self, - sidecar: &BlobTransactionSidecar, + sidecar: &crate::BlobTransactionSidecar, proof_settings: &KzgSettings, - ) -> Result<(), BlobTransactionValidationError> { - // Ensure the versioned hashes and commitments have the same length - if self.blob_versioned_hashes.len() != sidecar.commitments.len() { - return Err(kzg::Error::MismatchLength(format!( - "There are {} versioned commitment hashes and {} commitments", - self.blob_versioned_hashes.len(), - sidecar.commitments.len() - )) - .into()) - } - - // zip and iterate, calculating versioned hashes - for (versioned_hash, commitment) in - self.blob_versioned_hashes.iter().zip(sidecar.commitments.iter()) - { - // convert to KzgCommitment - let commitment = KzgCommitment::from(*commitment.deref()); - - // calculate & verify the versioned hash - // https://eips.ethereum.org/EIPS/eip-4844#execution-layer-validation - let calculated_versioned_hash = kzg_to_versioned_hash(commitment); - if *versioned_hash != calculated_versioned_hash { - return Err(BlobTransactionValidationError::WrongVersionedHash { - have: *versioned_hash, - expected: calculated_versioned_hash, - }) - } - } - - // Verify as a batch - let res = KzgProof::verify_blob_kzg_proof_batch( - sidecar.blobs.as_slice(), - sidecar.commitments.as_slice(), - sidecar.proofs.as_slice(), - proof_settings, - ) - .map_err(BlobTransactionValidationError::KZGError)?; - - if res { - Ok(()) - } else { - Err(BlobTransactionValidationError::InvalidProof) - } + ) -> Result<(), alloy_eips::eip4844::BlobTransactionValidationError> { + sidecar.validate(&self.blob_versioned_hashes, proof_settings) } /// Returns the total gas for all blobs in this transaction. @@ -291,7 +244,7 @@ impl TxEip4844 { TxType::Eip4844 } - /// Encodes the legacy transaction in RLP for signing. + /// Encodes the EIP-4844 transaction in RLP for signing. /// /// This encodes the transaction as: /// `tx_type || rlp(chain_id, nonce, max_priority_fee_per_gas, max_fee_per_gas, gas_limit, to, diff --git a/crates/primitives/src/transaction/legacy.rs b/crates/primitives/src/transaction/legacy.rs index eba89f93d..448662a24 100644 --- a/crates/primitives/src/transaction/legacy.rs +++ b/crates/primitives/src/transaction/legacy.rs @@ -1,4 +1,4 @@ -use crate::{keccak256, Bytes, ChainId, Signature, TransactionKind, TxType, B256, U256}; +use crate::{keccak256, Bytes, ChainId, Signature, TxKind, TxType, B256, U256}; use alloy_rlp::{length_of_length, Encodable, Header}; use bytes::BytesMut; use reth_codecs::{main_codec, Compact}; @@ -28,7 +28,7 @@ pub struct TxLegacy { pub gas_limit: u64, /// The 160-bit address of the message call’s recipient or, for a contract creation /// transaction, ∅, used here to denote the only member of B0 ; formally Tt. - pub to: TransactionKind, + pub to: TxKind, /// A scalar value equal to the number of Wei to /// be transferred to the message call’s recipient or, /// in the case of contract creation, as an endowment @@ -173,7 +173,7 @@ impl TxLegacy { mod tests { use super::TxLegacy; use crate::{ - transaction::{signature::Signature, TransactionKind}, + transaction::{signature::Signature, TxKind}, Address, Transaction, TransactionSigned, B256, U256, }; @@ -190,7 +190,7 @@ mod tests { nonce: 0x18, gas_price: 0xfa56ea00, gas_limit: 119902, - to: TransactionKind::Call( hex!("06012c8cf97bead5deae237070f9587f8e7a266d").into()), + to: TxKind::Call(hex!("06012c8cf97bead5deae237070f9587f8e7a266d").into()), value: U256::from(0x1c6bf526340000u64), input: hex!("f7d8c88300000000000000000000000000000000000000000000000000000000000cee6100000000000000000000000000000000000000000000000000000000000ac3e1").into(), }); diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index 31cb277f0..eda139ffd 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -1,8 +1,7 @@ #[cfg(any(feature = "arbitrary", feature = "zstd-codec"))] use crate::compression::{TRANSACTION_COMPRESSOR, TRANSACTION_DECOMPRESSOR}; -use crate::{keccak256, Address, BlockHashOrNumber, Bytes, TxHash, B256, U256}; +use crate::{keccak256, Address, BlockHashOrNumber, Bytes, TxHash, TxKind, B256, U256}; -use alloy_eips::eip2718::Eip2718Error; use alloy_rlp::{ Decodable, Encodable, Error as RlpError, Header, EMPTY_LIST_CODE, EMPTY_STRING_CODE, }; @@ -11,7 +10,6 @@ use derive_more::{AsRef, Deref}; use once_cell::sync::Lazy; use rayon::prelude::{IntoParallelIterator, ParallelIterator}; use reth_codecs::{add_arbitrary_tests, derive_arbitrary, Compact}; -use reth_rpc_types::ConversionError; use serde::{Deserialize, Serialize}; use std::mem; @@ -25,14 +23,14 @@ pub use error::{ }; pub use legacy::TxLegacy; pub use meta::TransactionMeta; -#[cfg(feature = "c-kzg")] pub use pooled::{PooledTransactionsElement, PooledTransactionsElementEcRecovered}; #[cfg(all(feature = "c-kzg", any(test, feature = "arbitrary")))] pub use sidecar::generate_blob_sidecar; #[cfg(feature = "c-kzg")] -pub use sidecar::{BlobTransaction, BlobTransactionSidecar, BlobTransactionValidationError}; +pub use sidecar::BlobTransactionValidationError; +pub use sidecar::{BlobTransaction, BlobTransactionSidecar}; -pub use signature::Signature; +pub use signature::{extract_chain_id, Signature}; pub use tx_type::{ TxType, EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, LEGACY_TX_TYPE_ID, }; @@ -45,9 +43,7 @@ mod eip4844; mod error; mod legacy; mod meta; -#[cfg(feature = "c-kzg")] mod pooled; -#[cfg(feature = "c-kzg")] mod sidecar; mod signature; mod tx_type; @@ -176,9 +172,9 @@ impl Transaction { } } - /// Gets the transaction's [`TransactionKind`], which is the address of the recipient or - /// [`TransactionKind::Create`] if the transaction is a contract creation. - pub fn kind(&self) -> &TransactionKind { + /// Gets the transaction's [`TxKind`], which is the address of the recipient or + /// [`TxKind::Create`] if the transaction is a contract creation. + pub fn kind(&self) -> &TxKind { match self { Transaction::Legacy(TxLegacy { to, .. }) | Transaction::Eip2930(TxEip2930 { to, .. }) | @@ -194,7 +190,7 @@ impl Transaction { /// /// Returns `None` if this is a `CREATE` transaction. pub fn to(&self) -> Option
{ - self.kind().to() + self.kind().to().copied() } /// Get the transaction's type @@ -616,10 +612,14 @@ impl From for Transaction { } } -impl TryFrom for Transaction { - type Error = ConversionError; +#[cfg(feature = "alloy-compat")] +impl TryFrom for Transaction { + type Error = alloy_rpc_types::ConversionError; + + fn try_from(tx: alloy_rpc_types::Transaction) -> Result { + use alloy_eips::eip2718::Eip2718Error; + use alloy_rpc_types::ConversionError; - fn try_from(tx: reth_rpc_types::Transaction) -> Result { match tx.transaction_type.map(TryInto::try_into).transpose().map_err(|_| { ConversionError::Eip2718Error(Eip2718Error::UnexpectedType( tx.transaction_type.unwrap(), @@ -641,7 +641,7 @@ impl TryFrom for Transaction { .gas .try_into() .map_err(|_| ConversionError::Eip2718Error(RlpError::Overflow.into()))?, - to: tx.to.map_or(TransactionKind::Create, TransactionKind::Call), + to: tx.to.map_or(TxKind::Create, TxKind::Call), value: tx.value, input: tx.input, })) @@ -655,7 +655,7 @@ impl TryFrom for Transaction { .gas .try_into() .map_err(|_| ConversionError::Eip2718Error(RlpError::Overflow.into()))?, - to: tx.to.map_or(TransactionKind::Create, TransactionKind::Call), + to: tx.to.map_or(TxKind::Create, TxKind::Call), value: tx.value, input: tx.input, access_list: tx.access_list.ok_or(ConversionError::MissingAccessList)?, @@ -677,7 +677,7 @@ impl TryFrom for Transaction { .gas .try_into() .map_err(|_| ConversionError::Eip2718Error(RlpError::Overflow.into()))?, - to: tx.to.map_or(TransactionKind::Create, TransactionKind::Call), + to: tx.to.map_or(TxKind::Create, TxKind::Call), value: tx.value, access_list: tx.access_list.ok_or(ConversionError::MissingAccessList)?, input: tx.input, @@ -698,7 +698,7 @@ impl TryFrom for Transaction { .gas .try_into() .map_err(|_| ConversionError::Eip2718Error(RlpError::Overflow.into()))?, - to: tx.to.map_or(TransactionKind::Create, TransactionKind::Call), + to: tx.to.map_or(TxKind::Create, TxKind::Call), value: tx.value, access_list: tx.access_list.ok_or(ConversionError::MissingAccessList)?, input: tx.input, @@ -829,109 +829,6 @@ impl Encodable for Transaction { } } -/// Whether or not the transaction is a contract creation. -#[derive_arbitrary(compact, rlp)] -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Default, Serialize, Deserialize)] -pub enum TransactionKind { - /// A transaction that creates a contract. - #[default] - Create, - /// A transaction that calls a contract or transfer. - Call(Address), -} - -impl TransactionKind { - /// Returns the address of the contract that will be called or will receive the transfer. - pub fn to(self) -> Option
{ - match self { - TransactionKind::Create => None, - TransactionKind::Call(to) => Some(to), - } - } - - /// Returns true if the transaction is a contract creation. - #[inline] - pub fn is_create(self) -> bool { - matches!(self, TransactionKind::Create) - } - - /// Returns true if the transaction is a contract call. - #[inline] - pub fn is_call(self) -> bool { - matches!(self, TransactionKind::Call(_)) - } - - /// Calculates a heuristic for the in-memory size of the [TransactionKind]. - #[inline] - fn size(self) -> usize { - mem::size_of::() - } -} - -impl Compact for TransactionKind { - fn to_compact(self, buf: &mut B) -> usize - where - B: bytes::BufMut + AsMut<[u8]>, - { - match self { - TransactionKind::Create => 0, - TransactionKind::Call(address) => { - address.to_compact(buf); - 1 - } - } - } - - fn from_compact(buf: &[u8], identifier: usize) -> (Self, &[u8]) { - match identifier { - 0 => (TransactionKind::Create, buf), - 1 => { - let (addr, buf) = Address::from_compact(buf, buf.len()); - (TransactionKind::Call(addr), buf) - } - _ => unreachable!("Junk data in database: unknown TransactionKind variant"), - } - } -} - -impl Encodable for TransactionKind { - /// This encodes the `to` field of a transaction request. - /// If the [TransactionKind] is a [TransactionKind::Call] it will encode the inner address: - /// `rlp(address)` - /// - /// If the [TransactionKind] is a [TransactionKind::Create] it will encode an empty list: - /// `rlp([])`, which is also - fn encode(&self, out: &mut dyn alloy_rlp::BufMut) { - match self { - TransactionKind::Call(to) => to.encode(out), - TransactionKind::Create => out.put_u8(EMPTY_STRING_CODE), - } - } - - fn length(&self) -> usize { - match self { - TransactionKind::Call(to) => to.length(), - TransactionKind::Create => 1, // EMPTY_STRING_CODE is a single byte - } - } -} - -impl Decodable for TransactionKind { - fn decode(buf: &mut &[u8]) -> alloy_rlp::Result { - if let Some(&first) = buf.first() { - if first == EMPTY_STRING_CODE { - buf.advance(1); - Ok(TransactionKind::Create) - } else { - let addr =
::decode(buf)?; - Ok(TransactionKind::Call(addr)) - } - } else { - Err(RlpError::InputTooShort) - } - } -} - /// Signed transaction without its Hash. Used type for inserting into the DB. /// /// This can by converted to [`TransactionSigned`] by calling [`TransactionSignedNoHash::hash`]. @@ -1801,7 +1698,6 @@ impl TryFromRecoveredTransaction for TransactionSignedEcRecovered { /// /// This is a conversion trait that'll ensure transactions received via P2P can be converted to the /// transaction type that the transaction pool uses. -#[cfg(feature = "c-kzg")] pub trait FromRecoveredPooledTransaction { /// Converts to this type from the given [`PooledTransactionsElementEcRecovered`]. fn from_recovered_pooled_transaction(tx: PooledTransactionsElementEcRecovered) -> Self; @@ -1823,18 +1719,35 @@ impl IntoRecoveredTransaction for TransactionSignedEcRecovered { } } -impl TryFrom for TransactionSignedEcRecovered { - type Error = ConversionError; +#[cfg(feature = "alloy-compat")] +impl TryFrom for TransactionSignedEcRecovered { + type Error = alloy_rpc_types::ConversionError; - fn try_from(tx: reth_rpc_types::Transaction) -> Result { + fn try_from(tx: alloy_rpc_types::Transaction) -> Result { + use alloy_rpc_types::ConversionError; let signature = tx.signature.ok_or(ConversionError::MissingSignature)?; + let transaction: Transaction = tx.try_into()?; + TransactionSigned::from_transaction_and_signature( - tx.try_into()?, + transaction.clone(), Signature { r: signature.r, s: signature.s, - odd_y_parity: signature.y_parity.ok_or(ConversionError::MissingYParity)?.0, + odd_y_parity: if let Some(y_parity) = signature.y_parity { + y_parity.0 + } else { + match transaction.tx_type() { + // If the transaction type is Legacy, adjust the v component of the + // signature according to the Ethereum specification + TxType::Legacy => { + extract_chain_id(signature.v.to()) + .map_err(|_| ConversionError::InvalidSignature)? + .0 + } + _ => !signature.v.is_zero(), + } + }, }, ) .try_into_ecrecovered() @@ -1847,10 +1760,10 @@ mod tests { use crate::{ hex, sign_message, transaction::{ - from_compact_zstd_unaware, signature::Signature, to_compact_ztd_unaware, - TransactionKind, TxEip1559, TxLegacy, MIN_LENGTH_EIP1559_TX_ENCODED, - MIN_LENGTH_EIP2930_TX_ENCODED, MIN_LENGTH_EIP4844_TX_ENCODED, - MIN_LENGTH_LEGACY_TX_ENCODED, PARALLEL_SENDER_RECOVERY_THRESHOLD, + from_compact_zstd_unaware, signature::Signature, to_compact_ztd_unaware, TxEip1559, + TxKind, TxLegacy, MIN_LENGTH_EIP1559_TX_ENCODED, MIN_LENGTH_EIP2930_TX_ENCODED, + MIN_LENGTH_EIP4844_TX_ENCODED, MIN_LENGTH_LEGACY_TX_ENCODED, + PARALLEL_SENDER_RECOVERY_THRESHOLD, }, Address, Bytes, Transaction, TransactionSigned, TransactionSignedEcRecovered, TransactionSignedNoHash, TxEip2930, TxEip4844, B256, U256, @@ -1858,7 +1771,7 @@ mod tests { use alloy_primitives::{address, b256, bytes}; use alloy_rlp::{Decodable, Encodable, Error as RlpError}; use reth_codecs::Compact; - use secp256k1::{KeyPair, Secp256k1}; + use secp256k1::{Keypair, Secp256k1}; use std::str::FromStr; #[test] @@ -1872,13 +1785,13 @@ mod tests { fn raw_kind_encoding_sanity() { // check the 0x80 encoding for Create let mut buf = Vec::new(); - TransactionKind::Create.encode(&mut buf); + TxKind::Create.encode(&mut buf); assert_eq!(buf, vec![0x80]); // check decoding let buf = [0x80]; - let decoded = TransactionKind::decode(&mut &buf[..]).unwrap(); - assert_eq!(decoded, TransactionKind::Create); + let decoded = TxKind::decode(&mut &buf[..]).unwrap(); + assert_eq!(decoded, TxKind::Create); } #[test] @@ -1954,9 +1867,7 @@ mod tests { nonce: 2, gas_price: 1000000000, gas_limit: 100000, - to: TransactionKind::Call( - Address::from_str("d3e8763675e4c425df46cc3b5c0f6cbdac396046").unwrap(), - ), + to: Address::from_str("d3e8763675e4c425df46cc3b5c0f6cbdac396046").unwrap().into(), value: U256::from(1000000000000000u64), input: Bytes::default(), }); @@ -1976,9 +1887,7 @@ mod tests { nonce: 1u64, gas_price: 1000000000, gas_limit: 100000u64, - to: TransactionKind::Call(Address::from_slice( - &hex!("d3e8763675e4c425df46cc3b5c0f6cbdac396046")[..], - )), + to: Address::from_slice(&hex!("d3e8763675e4c425df46cc3b5c0f6cbdac396046")[..]).into(), value: U256::from(693361000000000u64), input: Default::default(), }); @@ -1997,9 +1906,7 @@ mod tests { nonce: 3, gas_price: 2000000000, gas_limit: 10000000, - to: TransactionKind::Call(Address::from_slice( - &hex!("d3e8763675e4c425df46cc3b5c0f6cbdac396046")[..], - )), + to: Address::from_slice(&hex!("d3e8763675e4c425df46cc3b5c0f6cbdac396046")[..]).into(), value: U256::from(1000000000000000u64), input: Bytes::default(), }); @@ -2019,9 +1926,7 @@ mod tests { max_priority_fee_per_gas: 1500000000, max_fee_per_gas: 1500000013, gas_limit: 21000, - to: TransactionKind::Call(Address::from_slice( - &hex!("61815774383099e24810ab832a5b2a5425c154d5")[..], - )), + to: Address::from_slice(&hex!("61815774383099e24810ab832a5b2a5425c154d5")[..]).into(), value: U256::from(3000000000000000000u64), input: Default::default(), access_list: Default::default(), @@ -2041,9 +1946,7 @@ mod tests { nonce: 15, gas_price: 2200000000, gas_limit: 34811, - to: TransactionKind::Call(Address::from_slice( - &hex!("cf7f9e66af820a19257a2108375b180b0ec49167")[..], - )), + to: Address::from_slice(&hex!("cf7f9e66af820a19257a2108375b180b0ec49167")[..]).into(), value: U256::from(1234), input: Bytes::default(), }); @@ -2151,7 +2054,7 @@ mod tests { tx.set_chain_id(chain_id % (u64::MAX / 2 - 36)); } - let key_pair = KeyPair::new(&secp, &mut rng); + let key_pair = Keypair::new(&secp, &mut rng); let signature = sign_message(B256::from_slice(&key_pair.secret_bytes()[..]), tx.signature_hash()).unwrap(); @@ -2330,9 +2233,7 @@ mod tests { nonce: 2, gas_price: 1000000000, gas_limit: 100000, - to: TransactionKind::Call( - Address::from_str("d3e8763675e4c425df46cc3b5c0f6cbdac396046").unwrap(), - ), + to: Address::from_str("d3e8763675e4c425df46cc3b5c0f6cbdac396046").unwrap().into(), value: U256::from(1000000000000000u64), input: Bytes::from(input), }); @@ -2379,9 +2280,7 @@ mod tests { nonce: 2, gas_price: 1000000000, gas_limit: 100000, - to: TransactionKind::Call( - Address::from_str("d3e8763675e4c425df46cc3b5c0f6cbdac396046").unwrap(), - ), + to: Address::from_str("d3e8763675e4c425df46cc3b5c0f6cbdac396046").unwrap().into(), value: U256::from(1000000000000000u64), input: Bytes::from(vec![3u8; 64]), }); diff --git a/crates/primitives/src/transaction/optimism.rs b/crates/primitives/src/transaction/optimism.rs index 0001347b5..f553f2aa6 100644 --- a/crates/primitives/src/transaction/optimism.rs +++ b/crates/primitives/src/transaction/optimism.rs @@ -1,4 +1,4 @@ -use crate::{Address, Bytes, TransactionKind, TxType, B256, U256}; +use crate::{Address, Bytes, TxKind, TxType, B256, U256}; use alloy_rlp::{ length_of_length, Decodable, Encodable, Error as DecodeError, Header, EMPTY_STRING_CODE, }; @@ -16,7 +16,7 @@ pub struct TxDeposit { pub from: Address, /// The address of the recipient account, or the null (zero-length) address if the deposited /// transaction is a contract creation. - pub to: TransactionKind, + pub to: TxKind, /// The ETH value to mint on L2. pub mint: Option, /// The ETH value to send to the recipient account. @@ -169,7 +169,7 @@ mod tests { let original = TxDeposit { source_hash: B256::default(), from: Address::default(), - to: TransactionKind::default(), + to: TxKind::default(), mint: Some(100), value: U256::default(), gas_limit: 50000, @@ -189,7 +189,7 @@ mod tests { let tx_deposit = TxDeposit { source_hash: B256::default(), from: Address::default(), - to: TransactionKind::default(), + to: TxKind::default(), mint: Some(100), value: U256::default(), gas_limit: 50000, @@ -211,7 +211,7 @@ mod tests { let tx_deposit = TxDeposit { source_hash: B256::default(), from: Address::default(), - to: TransactionKind::default(), + to: TxKind::default(), mint: Some(100), value: U256::default(), gas_limit: 50000, diff --git a/crates/primitives/src/transaction/pooled.rs b/crates/primitives/src/transaction/pooled.rs index 5588d45a7..8323de470 100644 --- a/crates/primitives/src/transaction/pooled.rs +++ b/crates/primitives/src/transaction/pooled.rs @@ -1,8 +1,6 @@ //! Defines the types for blob transactions, legacy, and other EIP-2718 transactions included in a //! response to `GetPooledTransactions`. -#![cfg_attr(docsrs, doc(cfg(feature = "c-kzg")))] - use super::error::TransactionConversionError; use crate::{ Address, BlobTransaction, BlobTransactionSidecar, Bytes, Signature, Transaction, diff --git a/crates/primitives/src/transaction/sidecar.rs b/crates/primitives/src/transaction/sidecar.rs index 4c2751a86..b4c82b35a 100644 --- a/crates/primitives/src/transaction/sidecar.rs +++ b/crates/primitives/src/transaction/sidecar.rs @@ -1,48 +1,16 @@ #![cfg_attr(docsrs, doc(cfg(feature = "c-kzg")))] -#[cfg(any(test, feature = "arbitrary"))] use crate::{ - constants::eip4844::{FIELD_ELEMENTS_PER_BLOB, MAINNET_KZG_TRUSTED_SETUP}, - kzg::{KzgCommitment, KzgProof, BYTES_PER_FIELD_ELEMENT}, -}; -use crate::{ - keccak256, - kzg::{ - self, Blob, Bytes48, KzgSettings, BYTES_PER_BLOB, BYTES_PER_COMMITMENT, BYTES_PER_PROOF, - }, - Signature, Transaction, TransactionSigned, TxEip4844, TxHash, B256, EIP4844_TX_TYPE_ID, + keccak256, Signature, Transaction, TransactionSigned, TxEip4844, TxHash, EIP4844_TX_TYPE_ID, }; use alloy_rlp::{Decodable, Encodable, Error as RlpError, Header}; -use bytes::BufMut; -#[cfg(any(test, feature = "arbitrary"))] -use proptest::{ - arbitrary::{any as proptest_any, ParamsFor}, - collection::vec as proptest_vec, - strategy::{BoxedStrategy, Strategy}, -}; use serde::{Deserialize, Serialize}; -/// An error that can occur when validating a [BlobTransaction]. -#[derive(Debug, thiserror::Error)] -pub enum BlobTransactionValidationError { - /// Proof validation failed. - #[error("invalid KZG proof")] - InvalidProof, - /// An error returned by [`kzg`]. - #[error("KZG error: {0:?}")] - KZGError(#[from] kzg::Error), - /// The inner transaction is not a blob transaction. - #[error("unable to verify proof for non blob transaction: {0}")] - NotBlobTransaction(u8), - /// The versioned hash is incorrect. - #[error("wrong versioned hash: have {have}, expected {expected}")] - WrongVersionedHash { - /// The versioned hash we got - have: B256, - /// The versioned hash we expected - expected: B256, - }, -} +#[doc(inline)] +pub use alloy_eips::eip4844::BlobTransactionSidecar; + +#[cfg(feature = "c-kzg")] +pub use alloy_eips::eip4844::BlobTransactionValidationError; /// A response to `GetPooledTransactions` that includes blob data, their commitments, and their /// corresponding proofs. @@ -83,9 +51,10 @@ impl BlobTransaction { /// Verifies that the transaction's blob data, commitments, and proofs are all valid. /// /// See also [TxEip4844::validate_blob] + #[cfg(feature = "c-kzg")] pub fn validate( &self, - proof_settings: &KzgSettings, + proof_settings: &c_kzg::KzgSettings, ) -> Result<(), BlobTransactionValidationError> { self.transaction.validate_blob(&self.sidecar, proof_settings) } @@ -168,7 +137,7 @@ impl BlobTransaction { self.signature.encode(out); // Encode the blobs, commitments, and proofs - self.sidecar.encode_inner(out); + self.sidecar.encode(out); } /// Outputs the length of the RLP encoding of the blob transaction, including the tx type byte, @@ -274,7 +243,7 @@ impl BlobTransaction { } // All that's left are the blobs, commitments, and proofs - let sidecar = BlobTransactionSidecar::decode_inner(data)?; + let sidecar = BlobTransactionSidecar::decode(data)?; // # Calculating the hash // @@ -306,204 +275,21 @@ impl BlobTransaction { } } -/// This represents a set of blobs, and its corresponding commitments and proofs. -#[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize)] -#[repr(C)] -pub struct BlobTransactionSidecar { - /// The blob data. - pub blobs: Vec, - /// The blob commitments. - pub commitments: Vec, - /// The blob proofs. - pub proofs: Vec, -} - -impl BlobTransactionSidecar { - /// Creates a new [BlobTransactionSidecar] using the given blobs, commitments, and proofs. - pub fn new(blobs: Vec, commitments: Vec, proofs: Vec) -> Self { - Self { blobs, commitments, proofs } - } - - /// Encodes the inner [BlobTransactionSidecar] fields as RLP bytes, without a RLP header. - /// - /// This encodes the fields in the following order: - /// - `blobs` - /// - `commitments` - /// - `proofs` - #[inline] - pub(crate) fn encode_inner(&self, out: &mut dyn bytes::BufMut) { - BlobTransactionSidecarRlp::wrap_ref(self).encode(out); - } - - /// Outputs the RLP length of the [BlobTransactionSidecar] fields, without a RLP header. - pub fn fields_len(&self) -> usize { - BlobTransactionSidecarRlp::wrap_ref(self).fields_len() - } - - /// Decodes the inner [BlobTransactionSidecar] fields from RLP bytes, without a RLP header. - /// - /// This decodes the fields in the following order: - /// - `blobs` - /// - `commitments` - /// - `proofs` - #[inline] - pub(crate) fn decode_inner(buf: &mut &[u8]) -> alloy_rlp::Result { - Ok(BlobTransactionSidecarRlp::decode(buf)?.unwrap()) - } - - /// Calculates a size heuristic for the in-memory size of the [BlobTransactionSidecar]. - #[inline] - pub fn size(&self) -> usize { - self.blobs.len() * BYTES_PER_BLOB + // blobs - self.commitments.len() * BYTES_PER_COMMITMENT + // commitments - self.proofs.len() * BYTES_PER_PROOF // proofs - } -} - -impl From for BlobTransactionSidecar { - fn from(value: reth_rpc_types::BlobTransactionSidecar) -> Self { - // SAFETY: Same repr and size - unsafe { std::mem::transmute(value) } - } -} - -impl From for reth_rpc_types::BlobTransactionSidecar { - fn from(value: BlobTransactionSidecar) -> Self { - // SAFETY: Same repr and size - unsafe { std::mem::transmute(value) } - } -} - -impl Encodable for BlobTransactionSidecar { - /// Encodes the inner [BlobTransactionSidecar] fields as RLP bytes, without a RLP header. - fn encode(&self, out: &mut dyn BufMut) { - self.encode_inner(out) - } - - fn length(&self) -> usize { - self.fields_len() - } -} - -impl Decodable for BlobTransactionSidecar { - /// Decodes the inner [BlobTransactionSidecar] fields from RLP bytes, without a RLP header. - fn decode(buf: &mut &[u8]) -> alloy_rlp::Result { - Self::decode_inner(buf) - } -} - -// Wrapper for c-kzg rlp -#[repr(C)] -struct BlobTransactionSidecarRlp { - blobs: Vec<[u8; BYTES_PER_BLOB]>, - commitments: Vec<[u8; BYTES_PER_COMMITMENT]>, - proofs: Vec<[u8; BYTES_PER_PROOF]>, -} - -const _: [(); std::mem::size_of::()] = - [(); std::mem::size_of::()]; - -const _: [(); std::mem::size_of::()] = - [(); std::mem::size_of::()]; - -impl BlobTransactionSidecarRlp { - fn wrap_ref(other: &BlobTransactionSidecar) -> &Self { - // SAFETY: Same repr and size - unsafe { &*(other as *const BlobTransactionSidecar).cast::() } - } - - fn unwrap(self) -> BlobTransactionSidecar { - // SAFETY: Same repr and size - unsafe { std::mem::transmute(self) } - } - - fn encode(&self, out: &mut dyn bytes::BufMut) { - // Encode the blobs, commitments, and proofs - self.blobs.encode(out); - self.commitments.encode(out); - self.proofs.encode(out); - } - - fn fields_len(&self) -> usize { - self.blobs.length() + self.commitments.length() + self.proofs.length() - } - - fn decode(buf: &mut &[u8]) -> alloy_rlp::Result { - Ok(Self { - blobs: Decodable::decode(buf)?, - commitments: Decodable::decode(buf)?, - proofs: Decodable::decode(buf)?, - }) - } -} - -#[cfg(any(test, feature = "arbitrary"))] -impl<'a> arbitrary::Arbitrary<'a> for BlobTransactionSidecar { - fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result { - let mut arr = [0u8; BYTES_PER_BLOB]; - - // Note: the "fix" for this is kinda pointless. - #[allow(clippy::large_stack_frames)] - let blobs: Vec = (0..u.int_in_range(1..=16)?) - .map(|_| { - arr = arbitrary::Arbitrary::arbitrary(u).unwrap(); - - // Ensure that each blob is canonical by ensuring each field element contained in - // the blob is < BLS_MODULUS - for i in 0..(FIELD_ELEMENTS_PER_BLOB as usize) { - arr[i * BYTES_PER_FIELD_ELEMENT] = 0; - } - - Blob::from(arr) - }) - .collect(); - - Ok(generate_blob_sidecar(blobs)) - } -} - -#[cfg(any(test, feature = "arbitrary"))] -impl proptest::arbitrary::Arbitrary for BlobTransactionSidecar { - type Parameters = ParamsFor; - fn arbitrary_with(_args: Self::Parameters) -> Self::Strategy { - proptest_vec(proptest_vec(proptest_any::(), BYTES_PER_BLOB), 1..=5) - .prop_map(move |blobs| { - let blobs = blobs - .into_iter() - .map(|mut blob| { - let mut arr = [0u8; BYTES_PER_BLOB]; - - // Ensure that each blob is canonical by ensuring each field element - // contained in the blob is < BLS_MODULUS - for i in 0..(FIELD_ELEMENTS_PER_BLOB as usize) { - blob[i * BYTES_PER_FIELD_ELEMENT] = 0; - } - - arr.copy_from_slice(blob.as_slice()); - arr.into() - }) - .collect(); - - generate_blob_sidecar(blobs) - }) - .boxed() - } - - type Strategy = BoxedStrategy; -} - /// Generates a [`BlobTransactionSidecar`] structure containing blobs, commitments, and proofs. -#[cfg(any(test, feature = "arbitrary"))] -pub fn generate_blob_sidecar(blobs: Vec) -> BlobTransactionSidecar { +#[cfg(all(feature = "c-kzg", any(test, feature = "arbitrary")))] +pub fn generate_blob_sidecar(blobs: Vec) -> BlobTransactionSidecar { + use crate::constants::eip4844::MAINNET_KZG_TRUSTED_SETUP; + use c_kzg::{KzgCommitment, KzgProof}; + let kzg_settings = MAINNET_KZG_TRUSTED_SETUP.clone(); - let commitments: Vec = blobs + let commitments: Vec = blobs .iter() .map(|blob| KzgCommitment::blob_to_kzg_commitment(&blob.clone(), &kzg_settings).unwrap()) .map(|commitment| commitment.to_bytes()) .collect(); - let proofs: Vec = blobs + let proofs: Vec = blobs .iter() .zip(commitments.iter()) .map(|(blob, commitment)| { @@ -512,18 +298,15 @@ pub fn generate_blob_sidecar(blobs: Vec) -> BlobTransactionSidecar { .map(|proof| proof.to_bytes()) .collect(); - BlobTransactionSidecar { blobs, commitments, proofs } + BlobTransactionSidecar::from_kzg(blobs, commitments, proofs) } -#[cfg(test)] +#[cfg(all(test, feature = "c-kzg"))] mod tests { - use crate::{ - hex, - kzg::{Blob, Bytes48}, - transaction::sidecar::generate_blob_sidecar, - BlobTransactionSidecar, - }; - use std::{fs, path::PathBuf}; + use super::*; + use crate::{hex, kzg::Blob}; + use alloy_eips::eip4844::Bytes48; + use std::{fs, path::PathBuf, str::FromStr}; #[test] fn test_blob_transaction_sidecar_generation() { @@ -550,7 +333,7 @@ mod tests { assert_eq!( sidecar.commitments, vec![ - Bytes48::from_hex(json_value.get("commitment").unwrap().as_str().unwrap()).unwrap() + Bytes48::from_str(json_value.get("commitment").unwrap().as_str().unwrap()).unwrap() ] ); } @@ -624,7 +407,7 @@ mod tests { let mut encoded_rlp = Vec::new(); // Encode the inner data of the BlobTransactionSidecar into RLP - sidecar.encode_inner(&mut encoded_rlp); + sidecar.encode(&mut encoded_rlp); // Assert the equality between the expected RLP from the JSON and the encoded RLP assert_eq!(json_value.get("rlp").unwrap().as_str().unwrap(), hex::encode(&encoded_rlp)); @@ -655,11 +438,10 @@ mod tests { let mut encoded_rlp = Vec::new(); // Encode the inner data of the BlobTransactionSidecar into RLP - sidecar.encode_inner(&mut encoded_rlp); + sidecar.encode(&mut encoded_rlp); // Decode the RLP-encoded data back into a BlobTransactionSidecar - let decoded_sidecar = - BlobTransactionSidecar::decode_inner(&mut encoded_rlp.as_slice()).unwrap(); + let decoded_sidecar = BlobTransactionSidecar::decode(&mut encoded_rlp.as_slice()).unwrap(); // Assert the equality between the original BlobTransactionSidecar and the decoded one assert_eq!(sidecar, decoded_sidecar); diff --git a/crates/primitives/src/transaction/signature.rs b/crates/primitives/src/transaction/signature.rs index 84ae2915f..29db729e9 100644 --- a/crates/primitives/src/transaction/signature.rs +++ b/crates/primitives/src/transaction/signature.rs @@ -33,7 +33,7 @@ impl Signature { /// signature. #[cfg(feature = "optimism")] pub const fn optimism_deposit_tx_signature() -> Self { - Signature { r: U256::ZERO, s: U256::ZERO, odd_y_parity: false } + Self { r: U256::ZERO, s: U256::ZERO, odd_y_parity: false } } } @@ -52,7 +52,7 @@ impl Compact for Signature { let r = U256::from_le_slice(&buf[0..32]); let s = U256::from_le_slice(&buf[32..64]); buf.advance(64); - (Signature { r, s, odd_y_parity: identifier != 0 }, buf) + (Self { r, s, odd_y_parity: identifier != 0 }, buf) } } @@ -112,18 +112,13 @@ impl Signature { // // NOTE: this is very hacky and only relevant for op-mainnet pre bedrock if v == 0 && r.is_zero() && s.is_zero() { - return Ok((Signature { r, s, odd_y_parity: false }, None)) + return Ok((Self { r, s, odd_y_parity: false }, None)) } - return Err(RlpError::Custom("invalid Ethereum signature (V is not 27 or 28)")) } - let odd_y_parity = v == 28; - Ok((Signature { r, s, odd_y_parity }, None)) - } else { - // EIP-155: v = {0, 1} + CHAIN_ID * 2 + 35 - let odd_y_parity = ((v - 35) % 2) != 0; - let chain_id = (v - 35) >> 1; - Ok((Signature { r, s, odd_y_parity }, Some(chain_id))) } + + let (odd_y_parity, chain_id) = extract_chain_id(v)?; + Ok((Self { r, s, odd_y_parity }, chain_id)) } /// Output the length of the signature without the length of the RLP header @@ -140,7 +135,7 @@ impl Signature { /// Decodes the `odd_y_parity`, `r`, `s` values without a RLP header. pub fn decode(buf: &mut &[u8]) -> alloy_rlp::Result { - Ok(Signature { + Ok(Self { odd_y_parity: Decodable::decode(buf)?, r: Decodable::decode(buf)?, s: Decodable::decode(buf)?, @@ -201,6 +196,24 @@ impl Signature { } } +/// Outputs (odd_y_parity, chain_id) from the `v` value. +/// This doesn't check validity of the `v` value for optimism. +#[inline] +pub fn extract_chain_id(v: u64) -> alloy_rlp::Result<(bool, Option)> { + if v < 35 { + // non-EIP-155 legacy scheme, v = 27 for even y-parity, v = 28 for odd y-parity + if v != 27 && v != 28 { + return Err(RlpError::Custom("invalid Ethereum signature (V is not 27 or 28)")) + } + Ok((v == 28, None)) + } else { + // EIP-155: v = {0, 1} + CHAIN_ID * 2 + 35 + let odd_y_parity = ((v - 35) % 2) != 0; + let chain_id = (v - 35) >> 1; + Ok((odd_y_parity, Some(chain_id))) + } +} + #[cfg(test)] mod tests { use crate::{transaction::signature::SECP256K1N_HALF, Address, Signature, B256, U256}; diff --git a/crates/primitives/src/transaction/tx_type.rs b/crates/primitives/src/transaction/tx_type.rs index 84a099cb7..d203ecf77 100644 --- a/crates/primitives/src/transaction/tx_type.rs +++ b/crates/primitives/src/transaction/tx_type.rs @@ -1,4 +1,5 @@ use crate::{U64, U8}; +use alloy_rlp::{Decodable, Encodable}; use bytes::Buf; use reth_codecs::{derive_arbitrary, Compact}; use serde::{Deserialize, Serialize}; @@ -85,17 +86,17 @@ impl TryFrom for TxType { fn try_from(value: u8) -> Result { #[cfg(feature = "optimism")] - if value == TxType::Deposit as u8 { + if value == TxType::Deposit { return Ok(TxType::Deposit) } - if value == TxType::Legacy as u8 { + if value == TxType::Legacy { return Ok(TxType::Legacy) - } else if value == TxType::Eip2930 as u8 { + } else if value == TxType::Eip2930 { return Ok(TxType::Eip2930) - } else if value == TxType::Eip1559 as u8 { + } else if value == TxType::Eip1559 { return Ok(TxType::Eip1559) - } else if value == TxType::Eip4844 as u8 { + } else if value == TxType::Eip4844 { return Ok(TxType::Eip4844) } @@ -175,8 +176,36 @@ impl PartialEq for TxType { } } +impl PartialEq for u8 { + fn eq(&self, other: &TxType) -> bool { + *self == *other as u8 + } +} + +impl Encodable for TxType { + fn encode(&self, out: &mut dyn bytes::BufMut) { + (*self as u8).encode(out); + } + + fn length(&self) -> usize { + 1 + } +} + +impl Decodable for TxType { + fn decode(buf: &mut &[u8]) -> alloy_rlp::Result { + let ty = u8::decode(buf)?; + + TxType::try_from(ty).map_err(alloy_rlp::Error::Custom) + } +} + #[cfg(test)] mod tests { + use rand::Rng; + + use crate::hex; + use super::*; #[test] @@ -243,4 +272,36 @@ mod tests { ); } } + + #[test] + fn decode_tx_type() { + // Test for Legacy transaction + let tx_type = TxType::decode(&mut &hex!("80")[..]).unwrap(); + assert_eq!(tx_type, TxType::Legacy); + + // Test for EIP2930 transaction + let tx_type = TxType::decode(&mut &[1u8][..]).unwrap(); + assert_eq!(tx_type, TxType::Eip2930); + + // Test for EIP1559 transaction + let tx_type = TxType::decode(&mut &[2u8][..]).unwrap(); + assert_eq!(tx_type, TxType::Eip1559); + + // Test for EIP4844 transaction + let tx_type = TxType::decode(&mut &[3u8][..]).unwrap(); + assert_eq!(tx_type, TxType::Eip4844); + + // Test random byte not in range + let buf = [rand::thread_rng().gen_range(4..=u8::MAX)]; + println!("{buf:?}"); + assert!(TxType::decode(&mut &buf[..]).is_err()); + + // Test for Deposit transaction + #[cfg(feature = "optimism")] + { + let buf = [126u8]; + let tx_type = TxType::decode(&mut &buf[..]).unwrap(); + assert_eq!(tx_type, TxType::Deposit); + } + } } diff --git a/crates/primitives/src/transaction/util.rs b/crates/primitives/src/transaction/util.rs index 638064c12..b4a2db7f6 100644 --- a/crates/primitives/src/transaction/util.rs +++ b/crates/primitives/src/transaction/util.rs @@ -18,7 +18,7 @@ pub(crate) mod secp256k1 { let sig = RecoverableSignature::from_compact(&sig[0..64], RecoveryId::from_i32(sig[64] as i32)?)?; - let public = SECP256K1.recover_ecdsa(&Message::from_slice(&msg[..32])?, &sig)?; + let public = SECP256K1.recover_ecdsa(&Message::from_digest(*msg), &sig)?; Ok(public_key_to_address(public)) } @@ -26,7 +26,7 @@ pub(crate) mod secp256k1 { /// Returns the corresponding signature. pub fn sign_message(secret: B256, message: B256) -> Result { let sec = SecretKey::from_slice(secret.as_ref())?; - let s = SECP256K1.sign_ecdsa_recoverable(&Message::from_slice(&message[..])?, &sec); + let s = SECP256K1.sign_ecdsa_recoverable(&Message::from_digest(message.0), &sec); let (rec_id, data) = s.serialize_compact(); let signature = Signature { diff --git a/crates/primitives/src/withdrawal.rs b/crates/primitives/src/withdrawal.rs index 730fb291c..e4d1b37c0 100644 --- a/crates/primitives/src/withdrawal.rs +++ b/crates/primitives/src/withdrawal.rs @@ -1,51 +1,12 @@ -use crate::{constants::GWEI_TO_WEI, serde_helper::u64_hex, Address}; -use alloy_rlp::{RlpDecodable, RlpDecodableWrapper, RlpEncodable, RlpEncodableWrapper}; -use reth_codecs::{main_codec, Compact}; -use std::{ - mem, - ops::{Deref, DerefMut}, -}; +//! [EIP-4895](https://eips.ethereum.org/EIPS/eip-4895) Withdrawal types. -/// Withdrawal represents a validator withdrawal from the consensus layer. -#[main_codec] -#[derive(Debug, Clone, PartialEq, Eq, Default, Hash, RlpEncodable, RlpDecodable)] -pub struct Withdrawal { - /// Monotonically increasing identifier issued by consensus layer. - #[serde(with = "u64_hex")] - pub index: u64, - /// Index of validator associated with withdrawal. - #[serde(with = "u64_hex", rename = "validatorIndex")] - pub validator_index: u64, - /// Target address for withdrawn ether. - pub address: Address, - /// Value of the withdrawal in gwei. - #[serde(with = "u64_hex")] - pub amount: u64, -} +use alloy_rlp::{RlpDecodableWrapper, RlpEncodableWrapper}; +use reth_codecs::{main_codec, Compact}; +use std::ops::{Deref, DerefMut}; -impl Withdrawal { - /// Return the withdrawal amount in wei. - pub fn amount_wei(&self) -> u128 { - self.amount as u128 * GWEI_TO_WEI as u128 - } - - /// Calculate a heuristic for the in-memory size of the [Withdrawal]. - #[inline] - pub fn size(&self) -> usize { - mem::size_of::() - } -} - -impl From for Withdrawal { - fn from(withdrawal: reth_rpc_types::Withdrawal) -> Self { - Self { - index: withdrawal.index, - validator_index: withdrawal.index, - address: withdrawal.address, - amount: withdrawal.amount, - } - } -} +/// Re-export from `alloy_eips`. +#[doc(inline)] +pub use alloy_eips::eip4895::Withdrawal; /// Represents a collection of Withdrawals. #[main_codec] @@ -61,13 +22,13 @@ impl Withdrawals { /// Calculate the total size, including capacity, of the Withdrawals. #[inline] pub fn total_size(&self) -> usize { - self.size() + self.capacity() * std::mem::size_of::() + self.capacity() * std::mem::size_of::() } /// Calculate a heuristic for the in-memory size of the [Withdrawals]. #[inline] pub fn size(&self) -> usize { - self.iter().map(Withdrawal::size).sum() + self.len() * std::mem::size_of::() } /// Get an iterator over the Withdrawals. @@ -115,15 +76,42 @@ impl DerefMut for Withdrawals { } } -impl From> for Withdrawals { - fn from(withdrawals: Vec) -> Self { - Self(withdrawals.into_iter().map(Into::into).collect()) +impl From> for Withdrawals { + fn from(withdrawals: Vec) -> Self { + Self(withdrawals) } } #[cfg(test)] mod tests { use super::*; + use crate::Address; + use alloy_rlp::{RlpDecodable, RlpEncodable}; + use proptest::proptest; + + /// This type is kept for compatibility tests after the codec support was added to alloy-eips + /// Withdrawal type natively + #[main_codec] + #[derive(Debug, Clone, PartialEq, Eq, Default, Hash, RlpEncodable, RlpDecodable)] + struct RethWithdrawal { + /// Monotonically increasing identifier issued by consensus layer. + index: u64, + /// Index of validator associated with withdrawal. + validator_index: u64, + /// Target address for withdrawn ether. + address: Address, + /// Value of the withdrawal in gwei. + amount: u64, + } + + impl PartialEq for RethWithdrawal { + fn eq(&self, other: &Withdrawal) -> bool { + self.index == other.index && + self.validator_index == other.validator_index && + self.address == other.address && + self.amount == other.amount + } + } // #[test] @@ -134,4 +122,23 @@ mod tests { let s = serde_json::to_string(&withdrawals).unwrap(); assert_eq!(input, s); } + + proptest!( + #[test] + fn test_roundtrip_withdrawal_compat(withdrawal: RethWithdrawal) { + // Convert to buffer and then create alloy_access_list from buffer and + // compare + let mut compacted_reth_withdrawal = Vec::::new(); + let len = withdrawal.clone().to_compact(&mut compacted_reth_withdrawal); + + // decode the compacted buffer to AccessList + let alloy_withdrawal = Withdrawal::from_compact(&compacted_reth_withdrawal, len).0; + assert_eq!(withdrawal, alloy_withdrawal); + + let mut compacted_alloy_withdrawal = Vec::::new(); + let alloy_len = alloy_withdrawal.to_compact(&mut compacted_alloy_withdrawal); + assert_eq!(len, alloy_len); + assert_eq!(compacted_reth_withdrawal, compacted_alloy_withdrawal); + } + ); } diff --git a/crates/prune/src/builder.rs b/crates/prune/src/builder.rs index 8a14ccf4a..4e0ffd21a 100644 --- a/crates/prune/src/builder.rs +++ b/crates/prune/src/builder.rs @@ -1,10 +1,9 @@ -use std::time::Duration; - use crate::{segments::SegmentSet, Pruner}; use reth_config::PruneConfig; use reth_db::database::Database; use reth_primitives::{FinishedExExHeight, PruneModes, MAINNET}; use reth_provider::ProviderFactory; +use std::time::Duration; use tokio::sync::watch; /// Contains the information required to build a pruner @@ -102,7 +101,7 @@ impl Default for PrunerBuilder { segments: PruneModes::none(), max_reorg_depth: 64, prune_delete_limit: MAINNET.prune_delete_limit, - timeout: Some(Self::DEFAULT_TIMEOUT), + timeout: None, finished_exex_height: watch::channel(FinishedExExHeight::NoExExs).1, } } diff --git a/crates/prune/src/error.rs b/crates/prune/src/error.rs index e12320bc8..bdf5bacc1 100644 --- a/crates/prune/src/error.rs +++ b/crates/prune/src/error.rs @@ -21,3 +21,16 @@ pub enum PrunerError { #[error(transparent)] Provider(#[from] ProviderError), } + +impl From for RethError { + fn from(err: PrunerError) -> Self { + match err { + PrunerError::PruneSegment(_) | PrunerError::InconsistentData(_) => { + RethError::Custom(err.to_string()) + } + PrunerError::Interface(err) => err, + PrunerError::Database(err) => RethError::Database(err), + PrunerError::Provider(err) => RethError::Provider(err), + } + } +} diff --git a/crates/prune/src/pruner.rs b/crates/prune/src/pruner.rs index f3bf963e0..55a998709 100644 --- a/crates/prune/src/pruner.rs +++ b/crates/prune/src/pruner.rs @@ -10,7 +10,9 @@ use reth_primitives::{ BlockNumber, FinishedExExHeight, PruneLimiter, PruneMode, PruneProgress, PrunePurpose, PruneSegment, StaticFileSegment, }; -use reth_provider::{DatabaseProviderRW, ProviderFactory, PruneCheckpointReader}; +use reth_provider::{ + DatabaseProviderRW, ProviderFactory, PruneCheckpointReader, StaticFileProviderFactory, +}; use reth_tokio_util::EventListeners; use std::{ collections::BTreeMap, diff --git a/crates/revm/Cargo.toml b/crates/revm/Cargo.toml index 5c62f324e..2b621ed76 100644 --- a/crates/revm/Cargo.toml +++ b/crates/revm/Cargo.toml @@ -17,12 +17,10 @@ reth-primitives.workspace = true reth-interfaces.workspace = true reth-provider.workspace = true reth-consensus-common.workspace = true -reth-evm.workspace = true reth-trie = { workspace = true, optional = true } # revm revm.workspace = true -revm-inspectors.workspace = true # common tracing.workspace = true @@ -32,11 +30,3 @@ reth-trie.workspace = true [features] test-utils = ["dep:reth-trie"] -optimism = [ - "revm/optimism", - "reth-primitives/optimism", - "reth-provider/optimism", - "reth-consensus-common/optimism", - "reth-interfaces/optimism", -] -js-tracer = ["revm-inspectors/js-tracer"] diff --git a/crates/revm/src/database.rs b/crates/revm/src/database.rs index 36a7ec96f..93a22a068 100644 --- a/crates/revm/src/database.rs +++ b/crates/revm/src/database.rs @@ -1,19 +1,12 @@ -use reth_interfaces::RethError; use reth_primitives::{Address, B256, KECCAK_EMPTY, U256}; use reth_provider::{ProviderError, StateProvider}; use revm::{ - db::{CacheDB, DatabaseRef}, + db::DatabaseRef, primitives::{AccountInfo, Bytecode}, - Database, StateDBBox, + Database, }; use std::ops::{Deref, DerefMut}; -/// SubState of database. Uses revm internal cache with binding to reth StateProvider trait. -pub type SubState = CacheDB>; - -/// State boxed database with reth Error. -pub type RethStateDBBox<'a> = StateDBBox<'a, RethError>; - /// Wrapper around StateProvider that implements revm database trait #[derive(Debug, Clone)] pub struct StateProviderDatabase(pub DB); diff --git a/crates/revm/src/factory.rs b/crates/revm/src/factory.rs deleted file mode 100644 index 61e43cc18..000000000 --- a/crates/revm/src/factory.rs +++ /dev/null @@ -1,59 +0,0 @@ -use crate::{ - database::StateProviderDatabase, - processor::EVMProcessor, - stack::{InspectorStack, InspectorStackConfig}, -}; -use reth_evm::ConfigureEvm; -use reth_interfaces::executor::BlockExecutionError; -use reth_primitives::ChainSpec; -use reth_provider::{ExecutorFactory, PrunableBlockExecutor, StateProvider}; -use std::sync::Arc; - -/// Factory for creating [EVMProcessor]. -#[derive(Clone, Debug)] -pub struct EvmProcessorFactory { - chain_spec: Arc, - stack: Option, - /// Type that defines how the produced EVM should be configured. - evm_config: EvmConfig, -} - -impl EvmProcessorFactory { - /// Create new factory - pub fn new(chain_spec: Arc, evm_config: EvmConfig) -> Self { - Self { chain_spec, stack: None, evm_config } - } - - /// Sets the inspector stack for all generated executors. - pub fn with_stack(mut self, stack: InspectorStack) -> Self { - self.stack = Some(stack); - self - } - - /// Sets the inspector stack for all generated executors using the provided config. - pub fn with_stack_config(mut self, config: InspectorStackConfig) -> Self { - self.stack = Some(InspectorStack::new(config)); - self - } -} - -impl ExecutorFactory for EvmProcessorFactory -where - EvmConfig: ConfigureEvm + Send + Sync + Clone + 'static, -{ - fn with_state<'a, SP: StateProvider + 'a>( - &'a self, - sp: SP, - ) -> Box + 'a> { - let database_state = StateProviderDatabase::new(sp); - let mut evm = EVMProcessor::new_with_db( - self.chain_spec.clone(), - database_state, - self.evm_config.clone(), - ); - if let Some(stack) = &self.stack { - evm.set_stack(stack.clone()); - } - Box::new(evm) - } -} diff --git a/crates/revm/src/lib.rs b/crates/revm/src/lib.rs index f4ed01ada..8e5419567 100644 --- a/crates/revm/src/lib.rs +++ b/crates/revm/src/lib.rs @@ -11,36 +11,14 @@ /// Contains glue code for integrating reth database into revm's [Database]. pub mod database; -/// revm implementation of reth block and transaction executors. -mod factory; - pub mod batch; -/// new revm account state executor -pub mod processor; - /// State changes that are not related to transactions. pub mod state_change; -/// revm executor factory. -pub use factory::EvmProcessorFactory; - -/// Ethereum DAO hardfork state change data. -pub mod eth_dao_fork; - -/// An inspector stack abstracting the implementation details of -/// each inspector and allowing to hook on block/transaction execution, -/// used in the main Reth executor. -pub mod stack; - -/// Optimism-specific implementation and utilities for the executor -#[cfg(feature = "optimism")] -pub mod optimism; - /// Common test helpers #[cfg(any(test, feature = "test-utils"))] pub mod test_utils; // Convenience re-exports. pub use revm::{self, *}; -pub use revm_inspectors::*; diff --git a/crates/revm/src/optimism/processor.rs b/crates/revm/src/optimism/processor.rs deleted file mode 100644 index 78940c8b5..000000000 --- a/crates/revm/src/optimism/processor.rs +++ /dev/null @@ -1,400 +0,0 @@ -use crate::processor::{compare_receipts_root_and_logs_bloom, EVMProcessor}; -use reth_evm::ConfigureEvm; -use reth_interfaces::executor::{ - BlockExecutionError, BlockValidationError, OptimismBlockExecutionError, -}; -use reth_primitives::{ - proofs::calculate_receipt_root_optimism, revm_primitives::ResultAndState, BlockWithSenders, - Bloom, ChainSpec, Hardfork, Receipt, ReceiptWithBloom, TxType, B256, U256, -}; -use reth_provider::{BlockExecutor, BundleStateWithReceipts}; -use revm::DatabaseCommit; -use std::time::Instant; -use tracing::{debug, trace}; - -/// Verify the calculated receipts root against the expected receipts root. -pub fn verify_receipt_optimism<'a>( - expected_receipts_root: B256, - expected_logs_bloom: Bloom, - receipts: impl Iterator + Clone, - chain_spec: &ChainSpec, - timestamp: u64, -) -> Result<(), BlockExecutionError> { - // Calculate receipts root. - let receipts_with_bloom = receipts.map(|r| r.clone().into()).collect::>(); - let receipts_root = - calculate_receipt_root_optimism(&receipts_with_bloom, chain_spec, timestamp); - - // Create header log bloom. - let logs_bloom = receipts_with_bloom.iter().fold(Bloom::ZERO, |bloom, r| bloom | r.bloom); - - compare_receipts_root_and_logs_bloom( - receipts_root, - logs_bloom, - expected_receipts_root, - expected_logs_bloom, - )?; - - Ok(()) -} - -impl<'a, EvmConfig> BlockExecutor for EVMProcessor<'a, EvmConfig> -where - EvmConfig: ConfigureEvm, -{ - type Error = BlockExecutionError; - - fn execute_and_verify_receipt( - &mut self, - block: &BlockWithSenders, - total_difficulty: U256, - ) -> Result<(), BlockExecutionError> { - // execute block - let receipts = self.execute_inner(block, total_difficulty)?; - - // TODO Before Byzantium, receipts contained state root that would mean that expensive - // operation as hashing that is needed for state root got calculated in every - // transaction This was replaced with is_success flag. - // See more about EIP here: https://eips.ethereum.org/EIPS/eip-658 - if self.chain_spec.fork(Hardfork::Byzantium).active_at_block(block.header.number) { - let time = Instant::now(); - if let Err(error) = verify_receipt_optimism( - block.header.receipts_root, - block.header.logs_bloom, - receipts.iter(), - self.chain_spec.as_ref(), - block.timestamp, - ) { - debug!(target: "evm", %error, ?receipts, "receipts verification failed"); - return Err(error) - }; - self.stats.receipt_root_duration += time.elapsed(); - } - - self.batch_record.save_receipts(receipts) - } - - fn execute_transactions( - &mut self, - block: &BlockWithSenders, - total_difficulty: U256, - ) -> Result<(Vec, u64), BlockExecutionError> { - self.init_env(&block.header, total_difficulty); - - // perf: do not execute empty blocks - if block.body.is_empty() { - return Ok((Vec::new(), 0)) - } - - let is_regolith = - self.chain_spec.fork(Hardfork::Regolith).active_at_timestamp(block.timestamp); - - // Ensure that the create2deployer is force-deployed at the canyon transition. Optimism - // blocks will always have at least a single transaction in them (the L1 info transaction), - // so we can safely assume that this will always be triggered upon the transition and that - // the above check for empty blocks will never be hit on OP chains. - super::ensure_create2_deployer(self.chain_spec().clone(), block.timestamp, self.db_mut()) - .map_err(|_| { - BlockExecutionError::OptimismBlockExecution( - OptimismBlockExecutionError::ForceCreate2DeployerFail, - ) - })?; - - let mut cumulative_gas_used = 0; - let mut receipts = Vec::with_capacity(block.body.len()); - for (sender, transaction) in block.transactions_with_sender() { - let time = Instant::now(); - // The sum of the transaction’s gas limit, Tg, and the gas utilized in this block prior, - // must be no greater than the block’s gasLimit. - let block_available_gas = block.header.gas_limit - cumulative_gas_used; - if transaction.gas_limit() > block_available_gas && - (is_regolith || !transaction.is_system_transaction()) - { - return Err(BlockValidationError::TransactionGasLimitMoreThanAvailableBlockGas { - transaction_gas_limit: transaction.gas_limit(), - block_available_gas, - } - .into()) - } - - // An optimism block should never contain blob transactions. - if matches!(transaction.tx_type(), TxType::Eip4844) { - return Err(BlockExecutionError::OptimismBlockExecution( - OptimismBlockExecutionError::BlobTransactionRejected, - )) - } - - // Cache the depositor account prior to the state transition for the deposit nonce. - // - // Note that this *only* needs to be done post-regolith hardfork, as deposit nonces - // were not introduced in Bedrock. In addition, regular transactions don't have deposit - // nonces, so we don't need to touch the DB for those. - let depositor = (is_regolith && transaction.is_deposit()) - .then(|| { - self.db_mut() - .load_cache_account(*sender) - .map(|acc| acc.account_info().unwrap_or_default()) - }) - .transpose() - .map_err(|_| { - BlockExecutionError::OptimismBlockExecution( - OptimismBlockExecutionError::AccountLoadFailed(*sender), - ) - })?; - - // Execute transaction. - let ResultAndState { result, state } = self.transact(transaction, *sender)?; - trace!( - target: "evm", - ?transaction, ?result, ?state, - "Executed transaction" - ); - self.stats.execution_duration += time.elapsed(); - let time = Instant::now(); - - self.db_mut().commit(state); - - self.stats.apply_state_duration += time.elapsed(); - - // append gas used - cumulative_gas_used += result.gas_used(); - - // Push transaction changeset and calculate header bloom filter for receipt. - receipts.push(Receipt { - tx_type: transaction.tx_type(), - // Success flag was added in `EIP-658: Embedding transaction status code in - // receipts`. - success: result.is_success(), - cumulative_gas_used, - // convert to reth log - logs: result.into_logs().into_iter().map(Into::into).collect(), - #[cfg(feature = "optimism")] - deposit_nonce: depositor.map(|account| account.nonce), - // The deposit receipt version was introduced in Canyon to indicate an update to how - // receipt hashes should be computed when set. The state transition process ensures - // this is only set for post-Canyon deposit transactions. - #[cfg(feature = "optimism")] - deposit_receipt_version: (transaction.is_deposit() && - self.chain_spec() - .is_fork_active_at_timestamp(Hardfork::Canyon, block.timestamp)) - .then_some(1), - }); - } - - Ok((receipts, cumulative_gas_used)) - } - - fn take_output_state(&mut self) -> BundleStateWithReceipts { - BundleStateWithReceipts::new( - self.evm.context.evm.db.take_bundle(), - self.batch_record.take_receipts(), - self.batch_record.first_block().unwrap_or_default(), - ) - } - - fn size_hint(&self) -> Option { - Some(self.evm.context.evm.db.bundle_size_hint()) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::{ - database::StateProviderDatabase, - test_utils::{StateProviderTest, TestEvmConfig}, - }; - use reth_primitives::{ - b256, Account, Address, Block, ChainSpecBuilder, Header, Signature, StorageKey, - StorageValue, Transaction, TransactionKind, TransactionSigned, TxEip1559, BASE_MAINNET, - }; - use revm::L1_BLOCK_CONTRACT; - use std::{collections::HashMap, str::FromStr, sync::Arc}; - - fn create_op_state_provider() -> StateProviderTest { - let mut db = StateProviderTest::default(); - - let l1_block_contract_account = - Account { balance: U256::ZERO, bytecode_hash: None, nonce: 1 }; - - let mut l1_block_storage = HashMap::new(); - // base fee - l1_block_storage.insert(StorageKey::with_last_byte(1), StorageValue::from(1000000000)); - // l1 fee overhead - l1_block_storage.insert(StorageKey::with_last_byte(5), StorageValue::from(188)); - // l1 fee scalar - l1_block_storage.insert(StorageKey::with_last_byte(6), StorageValue::from(684000)); - // l1 free scalars post ecotone - l1_block_storage.insert( - StorageKey::with_last_byte(3), - StorageValue::from_str( - "0x0000000000000000000000000000000000001db0000d27300000000000000005", - ) - .unwrap(), - ); - - db.insert_account(L1_BLOCK_CONTRACT, l1_block_contract_account, None, l1_block_storage); - - db - } - - fn create_op_evm_processor<'a>( - chain_spec: Arc, - db: StateProviderTest, - ) -> EVMProcessor<'a, TestEvmConfig> { - let mut executor = EVMProcessor::new_with_db( - chain_spec, - StateProviderDatabase::new(db), - TestEvmConfig::default(), - ); - executor.evm.context.evm.db.load_cache_account(L1_BLOCK_CONTRACT).unwrap(); - executor - } - - #[test] - fn op_deposit_fields_pre_canyon() { - let header = Header { - timestamp: 1, - number: 1, - gas_limit: 1_000_000, - gas_used: 42_000, - receipts_root: b256!( - "83465d1e7d01578c0d609be33570f91242f013e9e295b0879905346abbd63731" - ), - ..Default::default() - }; - - let mut db = create_op_state_provider(); - - let addr = Address::ZERO; - let account = Account { balance: U256::MAX, ..Account::default() }; - db.insert_account(addr, account, None, HashMap::new()); - - let chain_spec = - Arc::new(ChainSpecBuilder::from(&*BASE_MAINNET).regolith_activated().build()); - - let tx = TransactionSigned::from_transaction_and_signature( - Transaction::Eip1559(TxEip1559 { - chain_id: chain_spec.chain.id(), - nonce: 0, - gas_limit: 21_000, - to: TransactionKind::Call(addr), - ..Default::default() - }), - Signature::default(), - ); - - let tx_deposit = TransactionSigned::from_transaction_and_signature( - Transaction::Deposit(reth_primitives::TxDeposit { - from: addr, - to: TransactionKind::Call(addr), - gas_limit: 21_000, - ..Default::default() - }), - Signature::default(), - ); - - let mut executor = create_op_evm_processor(chain_spec, db); - - // Attempt to execute a block with one deposit and one non-deposit transaction - executor - .execute_and_verify_receipt( - &BlockWithSenders { - block: Block { - header, - body: vec![tx, tx_deposit], - ommers: vec![], - withdrawals: None, - }, - senders: vec![addr, addr], - }, - U256::ZERO, - ) - .unwrap(); - - let tx_receipt = executor.receipts()[0][0].as_ref().unwrap(); - let deposit_receipt = executor.receipts()[0][1].as_ref().unwrap(); - - // deposit_receipt_version is not present in pre canyon transactions - assert!(deposit_receipt.deposit_receipt_version.is_none()); - assert!(tx_receipt.deposit_receipt_version.is_none()); - - // deposit_nonce is present only in deposit transactions - assert!(deposit_receipt.deposit_nonce.is_some()); - assert!(tx_receipt.deposit_nonce.is_none()); - } - - #[test] - fn op_deposit_fields_post_canyon() { - // ensure_create2_deployer will fail if timestamp is set to less then 2 - let header = Header { - timestamp: 2, - number: 1, - gas_limit: 1_000_000, - gas_used: 42_000, - receipts_root: b256!( - "fffc85c4004fd03c7bfbe5491fae98a7473126c099ac11e8286fd0013f15f908" - ), - ..Default::default() - }; - - let mut db = create_op_state_provider(); - let addr = Address::ZERO; - let account = Account { balance: U256::MAX, ..Account::default() }; - - db.insert_account(addr, account, None, HashMap::new()); - - let chain_spec = - Arc::new(ChainSpecBuilder::from(&*BASE_MAINNET).canyon_activated().build()); - - let tx = TransactionSigned::from_transaction_and_signature( - Transaction::Eip1559(TxEip1559 { - chain_id: chain_spec.chain.id(), - nonce: 0, - gas_limit: 21_000, - to: TransactionKind::Call(addr), - ..Default::default() - }), - Signature::default(), - ); - - let tx_deposit = TransactionSigned::from_transaction_and_signature( - Transaction::Deposit(reth_primitives::TxDeposit { - from: addr, - to: TransactionKind::Call(addr), - gas_limit: 21_000, - ..Default::default() - }), - Signature::optimism_deposit_tx_signature(), - ); - - let mut executor = create_op_evm_processor(chain_spec, db); - - // attempt to execute an empty block with parent beacon block root, this should not fail - executor - .execute_and_verify_receipt( - &BlockWithSenders { - block: Block { - header, - body: vec![tx, tx_deposit], - ommers: vec![], - withdrawals: None, - }, - senders: vec![addr, addr], - }, - U256::ZERO, - ) - .expect("Executing a block while canyon is active should not fail"); - - let tx_receipt = executor.receipts()[0][0].as_ref().unwrap(); - let deposit_receipt = executor.receipts()[0][1].as_ref().unwrap(); - - // deposit_receipt_version is set to 1 for post canyon deposit transactions - assert_eq!(deposit_receipt.deposit_receipt_version, Some(1)); - assert!(tx_receipt.deposit_receipt_version.is_none()); - - // deposit_nonce is present only in deposit transactions - assert!(deposit_receipt.deposit_nonce.is_some()); - assert!(tx_receipt.deposit_nonce.is_none()); - } -} diff --git a/crates/revm/src/processor.rs b/crates/revm/src/processor.rs deleted file mode 100644 index f467b22a0..000000000 --- a/crates/revm/src/processor.rs +++ /dev/null @@ -1,877 +0,0 @@ -#[cfg(not(feature = "optimism"))] -use revm::DatabaseCommit; -use revm::{ - db::StateDBBox, - inspector_handle_register, - interpreter::Host, - primitives::{CfgEnvWithHandlerCfg, ResultAndState}, - Evm, State, -}; -use std::{sync::Arc, time::Instant}; -#[cfg(not(feature = "optimism"))] -use tracing::{debug, trace}; - -use reth_evm::ConfigureEvm; -use reth_interfaces::executor::{BlockExecutionError, BlockValidationError}; -#[cfg(feature = "optimism")] -use reth_primitives::revm::env::fill_op_tx_env; -#[cfg(not(feature = "optimism"))] -use reth_primitives::revm::env::fill_tx_env; -use reth_primitives::{ - Address, Block, BlockNumber, BlockWithSenders, Bloom, ChainSpec, GotExpected, Hardfork, Header, - PruneModes, Receipt, ReceiptWithBloom, Receipts, TransactionSigned, Withdrawals, B256, U256, -}; -#[cfg(not(feature = "optimism"))] -use reth_provider::BundleStateWithReceipts; -use reth_provider::{BlockExecutor, ProviderError, PrunableBlockExecutor, StateProvider}; - -use crate::{ - batch::{BlockBatchRecord, BlockExecutorStats}, - database::StateProviderDatabase, - eth_dao_fork::{DAO_HARDFORK_BENEFICIARY, DAO_HARDKFORK_ACCOUNTS}, - stack::{InspectorStack, InspectorStackConfig}, - state_change::{apply_beacon_root_contract_call, post_block_balance_increments}, -}; - -/// EVMProcessor is a block executor that uses revm to execute blocks or multiple blocks. -/// -/// Output is obtained by calling `take_output_state` function. -/// -/// It is capable of pruning the data that will be written to the database -/// and implemented [PrunableBlockExecutor] traits. -/// -/// It implemented the [BlockExecutor] that give it the ability to take block -/// apply pre state (Cancun system contract call), execute transaction and apply -/// state change and then apply post execution changes (block reward, withdrawals, irregular DAO -/// hardfork state change). And if `execute_and_verify_receipt` is called it will verify the -/// receipt. -/// -/// InspectorStack are used for optional inspecting execution. And it contains -/// various duration of parts of execution. -#[allow(missing_debug_implementations)] -pub struct EVMProcessor<'a, EvmConfig> { - /// The configured chain-spec - pub(crate) chain_spec: Arc, - /// revm instance that contains database and env environment. - pub(crate) evm: Evm<'a, InspectorStack, StateDBBox<'a, ProviderError>>, - /// Keeps track of the recorded receipts and pruning configuration. - pub(crate) batch_record: BlockBatchRecord, - /// Execution stats - pub(crate) stats: BlockExecutorStats, - /// The type that is able to configure the EVM environment. - _evm_config: EvmConfig, -} - -impl<'a, EvmConfig> EVMProcessor<'a, EvmConfig> -where - EvmConfig: ConfigureEvm, -{ - /// Return chain spec. - pub fn chain_spec(&self) -> &Arc { - &self.chain_spec - } - - /// Creates a new executor from the given chain spec and database. - pub fn new_with_db( - chain_spec: Arc, - db: StateProviderDatabase, - evm_config: EvmConfig, - ) -> Self { - let state = State::builder() - .with_database_boxed(Box::new(db)) - .with_bundle_update() - .without_state_clear() - .build(); - EVMProcessor::new_with_state(chain_spec, state, evm_config) - } - - /// Create a new EVM processor with the given revm state. - pub fn new_with_state( - chain_spec: Arc, - revm_state: StateDBBox<'a, ProviderError>, - evm_config: EvmConfig, - ) -> Self { - let stack = InspectorStack::new(InspectorStackConfig::default()); - let evm = evm_config.evm_with_inspector(revm_state, stack); - EVMProcessor { - chain_spec, - evm, - batch_record: BlockBatchRecord::default(), - stats: BlockExecutorStats::default(), - _evm_config: evm_config, - } - } - - /// Configures the executor with the given inspectors. - pub fn set_stack(&mut self, stack: InspectorStack) { - self.evm.context.external = stack; - } - - /// Configure the executor with the given block. - pub fn set_first_block(&mut self, num: BlockNumber) { - self.batch_record.set_first_block(num); - } - - /// Saves the receipts to the batch record. - pub fn save_receipts(&mut self, receipts: Vec) -> Result<(), BlockExecutionError> { - self.batch_record.save_receipts(receipts) - } - - /// Returns the recorded receipts. - pub fn receipts(&self) -> &Receipts { - self.batch_record.receipts() - } - - /// Returns a reference to the database - pub fn db_mut(&mut self) -> &mut StateDBBox<'a, ProviderError> { - &mut self.evm.context.evm.db - } - - /// Initializes the config and block env. - pub(crate) fn init_env(&mut self, header: &Header, total_difficulty: U256) { - // Set state clear flag. - let state_clear_flag = - self.chain_spec.fork(Hardfork::SpuriousDragon).active_at_block(header.number); - - self.db_mut().set_state_clear_flag(state_clear_flag); - - let mut cfg = - CfgEnvWithHandlerCfg::new_with_spec_id(self.evm.cfg().clone(), self.evm.spec_id()); - EvmConfig::fill_cfg_and_block_env( - &mut cfg, - self.evm.block_mut(), - &self.chain_spec, - header, - total_difficulty, - ); - *self.evm.cfg_mut() = cfg.cfg_env; - - // This will update the spec in case it changed - self.evm.modify_spec_id(cfg.handler_cfg.spec_id); - } - - /// Applies the pre-block call to the EIP-4788 beacon block root contract. - /// - /// If cancun is not activated or the block is the genesis block, then this is a no-op, and no - /// state changes are made. - fn apply_beacon_root_contract_call( - &mut self, - block: &Block, - ) -> Result<(), BlockExecutionError> { - apply_beacon_root_contract_call( - &self.chain_spec, - block.timestamp, - block.number, - block.parent_beacon_block_root, - &mut self.evm, - )?; - Ok(()) - } - - /// Apply post execution state changes, including block rewards, withdrawals, and irregular DAO - /// hardfork state change. - pub fn apply_post_execution_state_change( - &mut self, - block: &Block, - total_difficulty: U256, - ) -> Result<(), BlockExecutionError> { - let mut balance_increments = post_block_balance_increments( - &self.chain_spec, - block.number, - block.difficulty, - block.beneficiary, - block.timestamp, - total_difficulty, - &block.ommers, - block.withdrawals.as_ref().map(Withdrawals::as_ref), - ); - - // Irregular state change at Ethereum DAO hardfork - if self.chain_spec.fork(Hardfork::Dao).transitions_at_block(block.number) { - // drain balances from hardcoded addresses. - let drained_balance: u128 = self - .db_mut() - .drain_balances(DAO_HARDKFORK_ACCOUNTS) - .map_err(|_| BlockValidationError::IncrementBalanceFailed)? - .into_iter() - .sum(); - - // return balance to DAO beneficiary. - *balance_increments.entry(DAO_HARDFORK_BENEFICIARY).or_default() += drained_balance; - } - // increment balances - self.db_mut() - .increment_balances(balance_increments) - .map_err(|_| BlockValidationError::IncrementBalanceFailed)?; - - Ok(()) - } - - /// Runs a single transaction in the configured environment and proceeds - /// to return the result and state diff (without applying it). - /// - /// Assumes the rest of the block environment has been filled via `init_block_env`. - pub fn transact( - &mut self, - transaction: &TransactionSigned, - sender: Address, - ) -> Result { - // Fill revm structure. - #[cfg(not(feature = "optimism"))] - fill_tx_env(self.evm.tx_mut(), transaction, sender); - - #[cfg(feature = "optimism")] - { - let mut envelope_buf = Vec::with_capacity(transaction.length_without_header()); - transaction.encode_enveloped(&mut envelope_buf); - fill_op_tx_env(self.evm.tx_mut(), transaction, sender, envelope_buf.into()); - } - - let hash = transaction.hash_ref(); - let should_inspect = self.evm.context.external.should_inspect(self.evm.env(), hash); - let out = if should_inspect { - // push inspector handle register. - self.evm.handler.append_handler_register_plain(inspector_handle_register); - let output = self.evm.transact(); - tracing::trace!( - target: "evm", - %hash, ?output, ?transaction, env = ?self.evm.context.evm.env, - "Executed transaction" - ); - // pop last handle register - self.evm.handler.pop_handle_register(); - output - } else { - // Main execution without needing the hash - self.evm.transact() - }; - - out.map_err(move |e| { - // Ensure hash is calculated for error log, if not already done - BlockValidationError::EVM { hash: transaction.recalculate_hash(), error: e.into() } - .into() - }) - } - - /// Execute the block, verify gas usage and apply post-block state changes. - pub(crate) fn execute_inner( - &mut self, - block: &BlockWithSenders, - total_difficulty: U256, - ) -> Result, BlockExecutionError> { - self.init_env(&block.header, total_difficulty); - self.apply_beacon_root_contract_call(block)?; - let (receipts, cumulative_gas_used) = self.execute_transactions(block, total_difficulty)?; - - // Check if gas used matches the value set in header. - if block.gas_used != cumulative_gas_used { - let receipts = Receipts::from_block_receipt(receipts); - return Err(BlockValidationError::BlockGasUsed { - gas: GotExpected { got: cumulative_gas_used, expected: block.gas_used }, - gas_spent_by_tx: receipts.gas_spent_by_tx()?, - } - .into()) - } - let time = Instant::now(); - self.apply_post_execution_state_change(block, total_difficulty)?; - self.stats.apply_post_execution_state_changes_duration += time.elapsed(); - - let time = Instant::now(); - let retention = self.batch_record.bundle_retention(block.number); - self.db_mut().merge_transitions(retention); - self.stats.merge_transitions_duration += time.elapsed(); - - if self.batch_record.first_block().is_none() { - self.batch_record.set_first_block(block.number); - } - - Ok(receipts) - } -} - -/// Default Ethereum implementation of the [BlockExecutor] trait for the [EVMProcessor]. -#[cfg(not(feature = "optimism"))] -impl<'a, EvmConfig> BlockExecutor for EVMProcessor<'a, EvmConfig> -where - EvmConfig: ConfigureEvm, -{ - type Error = BlockExecutionError; - - fn execute_and_verify_receipt( - &mut self, - block: &BlockWithSenders, - total_difficulty: U256, - ) -> Result<(), BlockExecutionError> { - // execute block - let receipts = self.execute_inner(block, total_difficulty)?; - - // TODO Before Byzantium, receipts contained state root that would mean that expensive - // operation as hashing that is needed for state root got calculated in every - // transaction This was replaced with is_success flag. - // See more about EIP here: https://eips.ethereum.org/EIPS/eip-658 - if self.chain_spec.fork(Hardfork::Byzantium).active_at_block(block.header.number) { - let time = Instant::now(); - if let Err(error) = - verify_receipt(block.header.receipts_root, block.header.logs_bloom, receipts.iter()) - { - debug!(target: "evm", %error, ?receipts, "receipts verification failed"); - return Err(error) - }; - self.stats.receipt_root_duration += time.elapsed(); - } - - self.batch_record.save_receipts(receipts)?; - Ok(()) - } - - fn execute_transactions( - &mut self, - block: &BlockWithSenders, - total_difficulty: U256, - ) -> Result<(Vec, u64), BlockExecutionError> { - self.init_env(&block.header, total_difficulty); - - // perf: do not execute empty blocks - if block.body.is_empty() { - return Ok((Vec::new(), 0)) - } - - let mut cumulative_gas_used = 0; - let mut receipts = Vec::with_capacity(block.body.len()); - for (sender, transaction) in block.transactions_with_sender() { - let time = Instant::now(); - // The sum of the transaction’s gas limit, Tg, and the gas utilized in this block prior, - // must be no greater than the block’s gasLimit. - let block_available_gas = block.header.gas_limit - cumulative_gas_used; - if transaction.gas_limit() > block_available_gas { - return Err(BlockValidationError::TransactionGasLimitMoreThanAvailableBlockGas { - transaction_gas_limit: transaction.gas_limit(), - block_available_gas, - } - .into()) - } - // Execute transaction. - let ResultAndState { result, state } = self.transact(transaction, *sender)?; - trace!( - target: "evm", - ?transaction, ?result, ?state, - "Executed transaction" - ); - self.stats.execution_duration += time.elapsed(); - let time = Instant::now(); - - self.db_mut().commit(state); - - self.stats.apply_state_duration += time.elapsed(); - - // append gas used - cumulative_gas_used += result.gas_used(); - - // Push transaction changeset and calculate header bloom filter for receipt. - receipts.push(Receipt { - tx_type: transaction.tx_type(), - // Success flag was added in `EIP-658: Embedding transaction status code in - // receipts`. - success: result.is_success(), - cumulative_gas_used, - // convert to reth log - logs: result.into_logs().into_iter().map(Into::into).collect(), - }); - } - - Ok((receipts, cumulative_gas_used)) - } - - fn take_output_state(&mut self) -> BundleStateWithReceipts { - self.stats.log_debug(); - BundleStateWithReceipts::new( - self.evm.context.evm.db.take_bundle(), - self.batch_record.take_receipts(), - self.batch_record.first_block().unwrap_or_default(), - ) - } - - fn size_hint(&self) -> Option { - Some(self.evm.context.evm.db.bundle_size_hint()) - } -} - -impl<'a, EvmConfig> PrunableBlockExecutor for EVMProcessor<'a, EvmConfig> -where - EvmConfig: ConfigureEvm, -{ - fn set_tip(&mut self, tip: BlockNumber) { - self.batch_record.set_tip(tip); - } - - fn set_prune_modes(&mut self, prune_modes: PruneModes) { - self.batch_record.set_prune_modes(prune_modes); - } -} - -/// Calculate the receipts root, and copmare it against against the expected receipts root and logs -/// bloom. -pub fn verify_receipt<'a>( - expected_receipts_root: B256, - expected_logs_bloom: Bloom, - receipts: impl Iterator + Clone, -) -> Result<(), BlockExecutionError> { - // Calculate receipts root. - let receipts_with_bloom = receipts.map(|r| r.clone().into()).collect::>(); - let receipts_root = reth_primitives::proofs::calculate_receipt_root(&receipts_with_bloom); - - // Create header log bloom. - let logs_bloom = receipts_with_bloom.iter().fold(Bloom::ZERO, |bloom, r| bloom | r.bloom); - - compare_receipts_root_and_logs_bloom( - receipts_root, - logs_bloom, - expected_receipts_root, - expected_logs_bloom, - )?; - - Ok(()) -} - -/// Compare the calculated receipts root with the expected receipts root, also copmare -/// the calculated logs bloom with the expected logs bloom. -pub fn compare_receipts_root_and_logs_bloom( - calculated_receipts_root: B256, - calculated_logs_bloom: Bloom, - expected_receipts_root: B256, - expected_logs_bloom: Bloom, -) -> Result<(), BlockExecutionError> { - if calculated_receipts_root != expected_receipts_root { - return Err(BlockValidationError::ReceiptRootDiff( - GotExpected { got: calculated_receipts_root, expected: expected_receipts_root }.into(), - ) - .into()) - } - - if calculated_logs_bloom != expected_logs_bloom { - return Err(BlockValidationError::BloomLogDiff( - GotExpected { got: calculated_logs_bloom, expected: expected_logs_bloom }.into(), - ) - .into()) - } - - Ok(()) -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::test_utils::{StateProviderTest, TestEvmConfig}; - use reth_primitives::{ - bytes, - constants::{BEACON_ROOTS_ADDRESS, EIP1559_INITIAL_BASE_FEE, SYSTEM_ADDRESS}, - keccak256, Account, Bytes, ChainSpecBuilder, ForkCondition, Signature, Transaction, - TransactionKind, TxEip1559, MAINNET, - }; - use revm::{Database, TransitionState}; - use std::collections::HashMap; - - static BEACON_ROOT_CONTRACT_CODE: Bytes = bytes!("3373fffffffffffffffffffffffffffffffffffffffe14604d57602036146024575f5ffd5b5f35801560495762001fff810690815414603c575f5ffd5b62001fff01545f5260205ff35b5f5ffd5b62001fff42064281555f359062001fff015500"); - - fn create_state_provider_with_beacon_root_contract() -> StateProviderTest { - let mut db = StateProviderTest::default(); - - let beacon_root_contract_account = Account { - balance: U256::ZERO, - bytecode_hash: Some(keccak256(BEACON_ROOT_CONTRACT_CODE.clone())), - nonce: 1, - }; - - db.insert_account( - BEACON_ROOTS_ADDRESS, - beacon_root_contract_account, - Some(BEACON_ROOT_CONTRACT_CODE.clone()), - HashMap::new(), - ); - - db - } - - #[test] - fn eip_4788_non_genesis_call() { - let mut header = - Header { timestamp: 1, number: 1, excess_blob_gas: Some(0), ..Header::default() }; - - let db = create_state_provider_with_beacon_root_contract(); - - let chain_spec = Arc::new( - ChainSpecBuilder::from(&*MAINNET) - .shanghai_activated() - .with_fork(Hardfork::Cancun, ForkCondition::Timestamp(1)) - .build(), - ); - - // execute invalid header (no parent beacon block root) - let mut executor = EVMProcessor::new_with_db( - chain_spec, - StateProviderDatabase::new(db), - TestEvmConfig::default(), - ); - - // attempt to execute a block without parent beacon block root, expect err - let err = executor - .execute_and_verify_receipt( - &BlockWithSenders { - block: Block { - header: header.clone(), - body: vec![], - ommers: vec![], - withdrawals: None, - }, - senders: vec![], - }, - U256::ZERO, - ) - .expect_err( - "Executing cancun block without parent beacon block root field should fail", - ); - assert_eq!( - err, - BlockExecutionError::Validation(BlockValidationError::MissingParentBeaconBlockRoot) - ); - - // fix header, set a gas limit - header.parent_beacon_block_root = Some(B256::with_last_byte(0x69)); - - // Now execute a block with the fixed header, ensure that it does not fail - executor - .execute_and_verify_receipt( - &BlockWithSenders { - block: Block { - header: header.clone(), - body: vec![], - ommers: vec![], - withdrawals: None, - }, - senders: vec![], - }, - U256::ZERO, - ) - .unwrap(); - - // check the actual storage of the contract - it should be: - // * The storage value at header.timestamp % HISTORY_BUFFER_LENGTH should be - // header.timestamp - // * The storage value at header.timestamp % HISTORY_BUFFER_LENGTH + HISTORY_BUFFER_LENGTH - // should be parent_beacon_block_root - let history_buffer_length = 8191u64; - let timestamp_index = header.timestamp % history_buffer_length; - let parent_beacon_block_root_index = - timestamp_index % history_buffer_length + history_buffer_length; - - // get timestamp storage and compare - let timestamp_storage = - executor.db_mut().storage(BEACON_ROOTS_ADDRESS, U256::from(timestamp_index)).unwrap(); - assert_eq!(timestamp_storage, U256::from(header.timestamp)); - - // get parent beacon block root storage and compare - let parent_beacon_block_root_storage = executor - .db_mut() - .storage(BEACON_ROOTS_ADDRESS, U256::from(parent_beacon_block_root_index)) - .expect("storage value should exist"); - assert_eq!(parent_beacon_block_root_storage, U256::from(0x69)); - } - - #[test] - fn eip_4788_no_code_cancun() { - // This test ensures that we "silently fail" when cancun is active and there is no code at - // BEACON_ROOTS_ADDRESS - let header = Header { - timestamp: 1, - number: 1, - parent_beacon_block_root: Some(B256::with_last_byte(0x69)), - excess_blob_gas: Some(0), - ..Header::default() - }; - - let db = StateProviderTest::default(); - - // DON'T deploy the contract at genesis - let chain_spec = Arc::new( - ChainSpecBuilder::from(&*MAINNET) - .shanghai_activated() - .with_fork(Hardfork::Cancun, ForkCondition::Timestamp(1)) - .build(), - ); - - let mut executor = EVMProcessor::new_with_db( - chain_spec, - StateProviderDatabase::new(db), - TestEvmConfig::default(), - ); - executor.init_env(&header, U256::ZERO); - - // get the env - let previous_env = executor.evm.context.evm.env.clone(); - - // attempt to execute an empty block with parent beacon block root, this should not fail - executor - .execute_and_verify_receipt( - &BlockWithSenders { - block: Block { - header: header.clone(), - body: vec![], - ommers: vec![], - withdrawals: None, - }, - senders: vec![], - }, - U256::ZERO, - ) - .expect( - "Executing a block with no transactions while cancun is active should not fail", - ); - - // ensure that the env has not changed - assert_eq!(executor.evm.context.evm.env, previous_env); - } - - #[test] - fn eip_4788_empty_account_call() { - // This test ensures that we do not increment the nonce of an empty SYSTEM_ADDRESS account - // during the pre-block call - - let mut db = create_state_provider_with_beacon_root_contract(); - - // insert an empty SYSTEM_ADDRESS - db.insert_account(SYSTEM_ADDRESS, Account::default(), None, HashMap::new()); - - let chain_spec = Arc::new( - ChainSpecBuilder::from(&*MAINNET) - .shanghai_activated() - .with_fork(Hardfork::Cancun, ForkCondition::Timestamp(1)) - .build(), - ); - - let mut executor = EVMProcessor::new_with_db( - chain_spec, - StateProviderDatabase::new(db), - TestEvmConfig::default(), - ); - - // construct the header for block one - let header = Header { - timestamp: 1, - number: 1, - parent_beacon_block_root: Some(B256::with_last_byte(0x69)), - excess_blob_gas: Some(0), - ..Header::default() - }; - - executor.init_env(&header, U256::ZERO); - - // attempt to execute an empty block with parent beacon block root, this should not fail - executor - .execute_and_verify_receipt( - &BlockWithSenders { - block: Block { - header: header.clone(), - body: vec![], - ommers: vec![], - withdrawals: None, - }, - senders: vec![], - }, - U256::ZERO, - ) - .expect( - "Executing a block with no transactions while cancun is active should not fail", - ); - - // ensure that the nonce of the system address account has not changed - let nonce = executor.db_mut().basic(SYSTEM_ADDRESS).unwrap().unwrap().nonce; - assert_eq!(nonce, 0); - } - - #[test] - fn eip_4788_genesis_call() { - let db = create_state_provider_with_beacon_root_contract(); - - // activate cancun at genesis - let chain_spec = Arc::new( - ChainSpecBuilder::from(&*MAINNET) - .shanghai_activated() - .with_fork(Hardfork::Cancun, ForkCondition::Timestamp(0)) - .build(), - ); - - let mut header = chain_spec.genesis_header(); - - let mut executor = EVMProcessor::new_with_db( - chain_spec, - StateProviderDatabase::new(db), - TestEvmConfig::default(), - ); - executor.init_env(&header, U256::ZERO); - - // attempt to execute the genesis block with non-zero parent beacon block root, expect err - header.parent_beacon_block_root = Some(B256::with_last_byte(0x69)); - let _err = executor - .execute_and_verify_receipt( - &BlockWithSenders { - block: Block { - header: header.clone(), - body: vec![], - ommers: vec![], - withdrawals: None, - }, - senders: vec![], - }, - U256::ZERO, - ) - .expect_err( - "Executing genesis cancun block with non-zero parent beacon block root field should fail", - ); - - // fix header - header.parent_beacon_block_root = Some(B256::ZERO); - - // now try to process the genesis block again, this time ensuring that a system contract - // call does not occur - executor - .execute_and_verify_receipt( - &BlockWithSenders { - block: Block { - header: header.clone(), - body: vec![], - ommers: vec![], - withdrawals: None, - }, - senders: vec![], - }, - U256::ZERO, - ) - .unwrap(); - - // there is no system contract call so there should be NO STORAGE CHANGES - // this means we'll check the transition state - let state = executor.evm.context.evm.inner.db; - let transition_state = - state.transition_state.expect("the evm should be initialized with bundle updates"); - - // assert that it is the default (empty) transition state - assert_eq!(transition_state, TransitionState::default()); - } - - #[test] - fn eip_4788_high_base_fee() { - // This test ensures that if we have a base fee, then we don't return an error when the - // system contract is called, due to the gas price being less than the base fee. - let header = Header { - timestamp: 1, - number: 1, - parent_beacon_block_root: Some(B256::with_last_byte(0x69)), - base_fee_per_gas: Some(u64::MAX), - excess_blob_gas: Some(0), - ..Header::default() - }; - - let db = create_state_provider_with_beacon_root_contract(); - - let chain_spec = Arc::new( - ChainSpecBuilder::from(&*MAINNET) - .shanghai_activated() - .with_fork(Hardfork::Cancun, ForkCondition::Timestamp(1)) - .build(), - ); - - // execute header - let mut executor = EVMProcessor::new_with_db( - chain_spec, - StateProviderDatabase::new(db), - TestEvmConfig::default(), - ); - executor.init_env(&header, U256::ZERO); - - // ensure that the env is configured with a base fee - assert_eq!(executor.evm.block().basefee, U256::from(u64::MAX)); - - // Now execute a block with the fixed header, ensure that it does not fail - executor - .execute_and_verify_receipt( - &BlockWithSenders { - block: Block { - header: header.clone(), - body: vec![], - ommers: vec![], - withdrawals: None, - }, - senders: vec![], - }, - U256::ZERO, - ) - .unwrap(); - - // check the actual storage of the contract - it should be: - // * The storage value at header.timestamp % HISTORY_BUFFER_LENGTH should be - // header.timestamp - // * The storage value at header.timestamp % HISTORY_BUFFER_LENGTH + HISTORY_BUFFER_LENGTH - // should be parent_beacon_block_root - let history_buffer_length = 8191u64; - let timestamp_index = header.timestamp % history_buffer_length; - let parent_beacon_block_root_index = - timestamp_index % history_buffer_length + history_buffer_length; - - // get timestamp storage and compare - let timestamp_storage = - executor.db_mut().storage(BEACON_ROOTS_ADDRESS, U256::from(timestamp_index)).unwrap(); - assert_eq!(timestamp_storage, U256::from(header.timestamp)); - - // get parent beacon block root storage and compare - let parent_beacon_block_root_storage = executor - .db_mut() - .storage(BEACON_ROOTS_ADDRESS, U256::from(parent_beacon_block_root_index)) - .unwrap(); - assert_eq!(parent_beacon_block_root_storage, U256::from(0x69)); - } - - #[test] - fn test_transact_error_includes_correct_hash() { - let chain_spec = Arc::new( - ChainSpecBuilder::from(&*MAINNET) - .shanghai_activated() - .with_fork(Hardfork::Cancun, ForkCondition::Timestamp(1)) - .build(), - ); - - let db = StateProviderTest::default(); - let chain_id = chain_spec.chain.id(); - - // execute header - let mut executor = EVMProcessor::new_with_db( - chain_spec, - StateProviderDatabase::new(db), - TestEvmConfig::default(), - ); - - // Create a test transaction that gonna fail - let transaction = TransactionSigned::from_transaction_and_signature( - Transaction::Eip1559(TxEip1559 { - chain_id, - nonce: 1, - gas_limit: 21_000, - to: TransactionKind::Call(Address::ZERO), - max_fee_per_gas: EIP1559_INITIAL_BASE_FEE as u128, - ..Default::default() - }), - Signature::default(), - ); - - let result = executor.transact(&transaction, Address::random()); - - let expected_hash = transaction.recalculate_hash(); - - // Check the error - match result { - Err(BlockExecutionError::Validation(BlockValidationError::EVM { hash, error: _ })) => { - assert_eq!(hash, expected_hash, "The EVM error does not include the correct transaction hash."); - }, - _ => panic!("Expected a BlockExecutionError::Validation error, but transaction did not fail as expected."), - } - } -} diff --git a/crates/revm/src/stack.rs b/crates/revm/src/stack.rs deleted file mode 100644 index 8f8bfa5ce..000000000 --- a/crates/revm/src/stack.rs +++ /dev/null @@ -1,202 +0,0 @@ -use revm::{ - inspectors::CustomPrintTracer, - interpreter::{CallInputs, CallOutcome, CreateInputs, CreateOutcome, Interpreter}, - primitives::{Address, Env, Log, B256, U256}, - Database, EvmContext, Inspector, -}; -use std::fmt::Debug; - -/// A hook to inspect the execution of the EVM. -#[derive(Clone, Copy, Debug, Default, PartialEq, Eq)] -pub enum Hook { - /// No hook. - #[default] - None, - /// Hook on a specific block. - Block(u64), - /// Hook on a specific transaction hash. - Transaction(B256), - /// Hooks on every transaction in a block. - All, -} - -impl Hook { - /// Returns `true` if this hook should be used. - #[inline] - pub fn is_enabled(&self, block_number: u64, tx_hash: &B256) -> bool { - match self { - Hook::None => false, - Hook::Block(block) => block_number == *block, - Hook::Transaction(hash) => hash == tx_hash, - Hook::All => true, - } - } -} - -/// An inspector that calls multiple inspectors in sequence. -#[derive(Clone, Default)] -pub struct InspectorStack { - /// An inspector that prints the opcode traces to the console. - pub custom_print_tracer: Option, - /// The provided hook - pub hook: Hook, -} - -impl Debug for InspectorStack { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_struct("InspectorStack") - .field("custom_print_tracer", &self.custom_print_tracer.is_some()) - .field("hook", &self.hook) - .finish() - } -} - -impl InspectorStack { - /// Creates a new inspector stack with the given configuration. - #[inline] - pub fn new(config: InspectorStackConfig) -> Self { - Self { - hook: config.hook, - custom_print_tracer: config.use_printer_tracer.then(Default::default), - } - } - - /// Returns `true` if this inspector should be used. - #[inline] - pub fn should_inspect(&self, env: &Env, tx_hash: &B256) -> bool { - self.custom_print_tracer.is_some() && - self.hook.is_enabled(env.block.number.saturating_to(), tx_hash) - } -} - -/// Configuration for the inspectors. -#[derive(Clone, Copy, Debug, Default)] -pub struct InspectorStackConfig { - /// Enable revm inspector printer. - /// In execution this will print opcode level traces directly to console. - pub use_printer_tracer: bool, - - /// Hook on a specific block or transaction. - pub hook: Hook, -} - -/// Helper macro to call the same method on multiple inspectors without resorting to dynamic -/// dispatch. -#[macro_export] -macro_rules! call_inspectors { - ([$($inspector:expr),+ $(,)?], |$id:ident $(,)?| $call:expr $(,)?) => {{$( - if let Some($id) = $inspector { - $call - } - )+}} -} - -impl Inspector for InspectorStack -where - DB: Database, -{ - #[inline] - fn initialize_interp(&mut self, interp: &mut Interpreter, context: &mut EvmContext) { - call_inspectors!([&mut self.custom_print_tracer], |inspector| { - inspector.initialize_interp(interp, context); - }); - } - - #[inline] - fn step(&mut self, interp: &mut Interpreter, context: &mut EvmContext) { - call_inspectors!([&mut self.custom_print_tracer], |inspector| { - inspector.step(interp, context); - }); - } - - #[inline] - fn step_end(&mut self, interp: &mut Interpreter, context: &mut EvmContext) { - call_inspectors!([&mut self.custom_print_tracer], |inspector| { - inspector.step_end(interp, context); - }); - } - - #[inline] - fn log(&mut self, context: &mut EvmContext, log: &Log) { - call_inspectors!([&mut self.custom_print_tracer], |inspector| { - inspector.log(context, log); - }); - } - - #[inline] - fn call( - &mut self, - context: &mut EvmContext, - inputs: &mut CallInputs, - ) -> Option { - call_inspectors!([&mut self.custom_print_tracer], |inspector| { - if let Some(outcome) = inspector.call(context, inputs) { - return Some(outcome) - } - }); - - None - } - - #[inline] - fn call_end( - &mut self, - context: &mut EvmContext, - inputs: &CallInputs, - outcome: CallOutcome, - ) -> CallOutcome { - call_inspectors!([&mut self.custom_print_tracer], |inspector| { - let new_ret = inspector.call_end(context, inputs, outcome.clone()); - - // If the inspector returns a different ret or a revert with a non-empty message, - // we assume it wants to tell us something - if new_ret != outcome { - return new_ret - } - }); - - outcome - } - - #[inline] - fn create( - &mut self, - context: &mut EvmContext, - inputs: &mut CreateInputs, - ) -> Option { - call_inspectors!([&mut self.custom_print_tracer], |inspector| { - if let Some(out) = inspector.create(context, inputs) { - return Some(out) - } - }); - - None - } - - #[inline] - fn create_end( - &mut self, - context: &mut EvmContext, - inputs: &CreateInputs, - outcome: CreateOutcome, - ) -> CreateOutcome { - call_inspectors!([&mut self.custom_print_tracer], |inspector| { - let new_ret = inspector.create_end(context, inputs, outcome.clone()); - - // If the inspector returns a different ret or a revert with a non-empty message, - // we assume it wants to tell us something - if new_ret != outcome { - return new_ret - } - }); - - outcome - } - - #[inline] - fn selfdestruct(&mut self, contract: Address, target: Address, value: U256) { - call_inspectors!([&mut self.custom_print_tracer], |inspector| { - Inspector::::selfdestruct(inspector, contract, target, value); - }); - } -} diff --git a/crates/revm/src/state_change.rs b/crates/revm/src/state_change.rs index 5d38c656e..d2b0a6b5b 100644 --- a/crates/revm/src/state_change.rs +++ b/crates/revm/src/state_change.rs @@ -152,7 +152,7 @@ pub fn insert_post_block_withdrawals_balance_increments( for withdrawal in withdrawals.iter() { if withdrawal.amount > 0 { *balance_increments.entry(withdrawal.address).or_default() += - withdrawal.amount_wei(); + withdrawal.amount_wei().to::(); } } } diff --git a/crates/revm/src/test_utils.rs b/crates/revm/src/test_utils.rs index 193736987..8c4d1894c 100644 --- a/crates/revm/src/test_utils.rs +++ b/crates/revm/src/test_utils.rs @@ -1,30 +1,13 @@ -use reth_evm::{ConfigureEvm, ConfigureEvmEnv}; use reth_interfaces::provider::ProviderResult; use reth_primitives::{ - keccak256, revm::config::revm_spec, trie::AccountProof, Account, Address, BlockNumber, - Bytecode, Bytes, ChainSpec, Head, Header, StorageKey, Transaction, B256, U256, + keccak256, trie::AccountProof, Account, Address, BlockNumber, Bytecode, Bytes, StorageKey, + B256, U256, }; - -#[cfg(not(feature = "optimism"))] -use reth_primitives::revm::env::fill_tx_env; use reth_provider::{AccountReader, BlockHashReader, StateProvider, StateRootProvider}; use reth_trie::updates::TrieUpdates; -use revm::{ - db::BundleState, - primitives::{AnalysisKind, CfgEnvWithHandlerCfg, TxEnv}, -}; +use revm::db::BundleState; use std::collections::HashMap; -#[cfg(feature = "optimism")] -use { - reth_primitives::revm::env::fill_op_tx_env, - revm::{ - inspector_handle_register, - primitives::{HandlerCfg, SpecId}, - Database, Evm, EvmBuilder, GetInspector, - }, -}; - /// Mock state for testing #[derive(Debug, Default, Clone, Eq, PartialEq)] pub struct StateProviderTest { @@ -106,76 +89,3 @@ impl StateProvider for StateProviderTest { unimplemented!("proof generation is not supported") } } - -/// Test EVM configuration. -#[derive(Debug, Default, Clone, Copy)] -#[non_exhaustive] -pub struct TestEvmConfig; - -impl ConfigureEvmEnv for TestEvmConfig { - #[cfg(not(feature = "optimism"))] - type TxMeta = (); - #[cfg(feature = "optimism")] - type TxMeta = Bytes; - - #[allow(unused_variables)] - fn fill_tx_env(tx_env: &mut TxEnv, transaction: T, sender: Address, meta: Self::TxMeta) - where - T: AsRef, - { - #[cfg(not(feature = "optimism"))] - fill_tx_env(tx_env, transaction, sender); - #[cfg(feature = "optimism")] - fill_op_tx_env(tx_env, transaction, sender, meta); - } - - fn fill_cfg_env( - cfg_env: &mut CfgEnvWithHandlerCfg, - chain_spec: &ChainSpec, - header: &Header, - total_difficulty: U256, - ) { - let spec_id = revm_spec( - chain_spec, - Head { - number: header.number, - timestamp: header.timestamp, - difficulty: header.difficulty, - total_difficulty, - hash: Default::default(), - }, - ); - - cfg_env.chain_id = chain_spec.chain().id(); - cfg_env.perf_analyse_created_bytecodes = AnalysisKind::Analyse; - - cfg_env.handler_cfg.spec_id = spec_id; - #[cfg(feature = "optimism")] - { - cfg_env.handler_cfg.is_optimism = chain_spec.is_optimism(); - } - } -} - -impl ConfigureEvm for TestEvmConfig { - #[cfg(feature = "optimism")] - fn evm<'a, DB: Database + 'a>(&self, db: DB) -> Evm<'a, (), DB> { - let handler_cfg = HandlerCfg { spec_id: SpecId::LATEST, is_optimism: true }; - EvmBuilder::default().with_db(db).with_handler_cfg(handler_cfg).build() - } - - #[cfg(feature = "optimism")] - fn evm_with_inspector<'a, DB, I>(&self, db: DB, inspector: I) -> Evm<'a, I, DB> - where - DB: Database + 'a, - I: GetInspector, - { - let handler_cfg = HandlerCfg { spec_id: SpecId::LATEST, is_optimism: true }; - EvmBuilder::default() - .with_db(db) - .with_external_context(inspector) - .with_handler_cfg(handler_cfg) - .append_handler_register(inspector_handle_register) - .build() - } -} diff --git a/crates/rpc/ipc/Cargo.toml b/crates/rpc/ipc/Cargo.toml index 8d93a275c..af6e64db1 100644 --- a/crates/rpc/ipc/Cargo.toml +++ b/crates/rpc/ipc/Cargo.toml @@ -15,7 +15,6 @@ workspace = true # async/net futures.workspace = true -parity-tokio-ipc = "0.9.0" tokio = { workspace = true, features = ["net", "time", "rt-multi-thread"] } tokio-util = { workspace = true, features = ["codec"] } tokio-stream.workspace = true @@ -29,6 +28,10 @@ serde_json.workspace = true tracing.workspace = true bytes.workspace = true thiserror.workspace = true +futures-util = "0.3.30" +interprocess = { version = "1.2.1", features = ["tokio_support"] } [dev-dependencies] tokio-stream = { workspace = true, features = ["sync"] } +reth-tracing.workspace = true +rand.workspace = true diff --git a/crates/rpc/ipc/src/client.rs b/crates/rpc/ipc/src/client/mod.rs similarity index 72% rename from crates/rpc/ipc/src/client.rs rename to crates/rpc/ipc/src/client/mod.rs index f4454958f..05ea7ed58 100644 --- a/crates/rpc/ipc/src/client.rs +++ b/crates/rpc/ipc/src/client/mod.rs @@ -2,43 +2,22 @@ use crate::stream_codec::StreamCodec; use futures::StreamExt; +use interprocess::local_socket::tokio::{LocalSocketStream, OwnedReadHalf, OwnedWriteHalf}; use jsonrpsee::{ async_client::{Client, ClientBuilder}, core::client::{ReceivedMessage, TransportReceiverT, TransportSenderT}, }; -use std::{ - io, - path::{Path, PathBuf}, +use std::io; +use tokio::io::AsyncWriteExt; +use tokio_util::{ + codec::FramedRead, + compat::{Compat, FuturesAsyncReadCompatExt, FuturesAsyncWriteCompatExt}, }; -use tokio::{io::AsyncWriteExt, net::UnixStream}; -use tokio_util::codec::FramedRead; - -/// Builder type for [`Client`] -#[derive(Clone, Default, Debug)] -#[non_exhaustive] -pub struct IpcClientBuilder; - -impl IpcClientBuilder { - /// Connects to a IPC socket - pub async fn build(self, path: impl AsRef) -> Result { - let (tx, rx) = IpcTransportClientBuilder::default().build(path).await?; - Ok(self.build_with_tokio(tx, rx)) - } - - /// Uses the sender and receiver channels to connect to the socket. - pub fn build_with_tokio(self, sender: S, receiver: R) -> Client - where - S: TransportSenderT + Send, - R: TransportReceiverT + Send, - { - ClientBuilder::default().build_with_tokio(sender, receiver) - } -} /// Sending end of IPC transport. #[derive(Debug)] -pub struct Sender { - inner: tokio::net::unix::OwnedWriteHalf, +pub(crate) struct Sender { + inner: Compat, } #[async_trait::async_trait] @@ -64,8 +43,8 @@ impl TransportSenderT for Sender { /// Receiving end of IPC transport. #[derive(Debug)] -pub struct Receiver { - inner: FramedRead, +pub(crate) struct Receiver { + pub(crate) inner: FramedRead, StreamCodec>, } #[async_trait::async_trait] @@ -81,10 +60,34 @@ impl TransportReceiverT for Receiver { /// Builder for IPC transport [`Sender`] and [`Receiver`] pair. #[derive(Debug, Clone, Default)] #[non_exhaustive] -pub struct IpcTransportClientBuilder; +pub(crate) struct IpcTransportClientBuilder; impl IpcTransportClientBuilder { - /// Try to establish the connection. + pub(crate) async fn build( + self, + endpoint: impl AsRef, + ) -> Result<(Sender, Receiver), IpcError> { + let endpoint = endpoint.as_ref().to_string(); + let conn = LocalSocketStream::connect(endpoint.clone()) + .await + .map_err(|err| IpcError::FailedToConnect { path: endpoint, err })?; + + let (rhlf, whlf) = conn.into_split(); + + Ok(( + Sender { inner: whlf.compat_write() }, + Receiver { inner: FramedRead::new(rhlf.compat(), StreamCodec::stream_incoming()) }, + )) + } +} + +/// Builder type for [`Client`] +#[derive(Clone, Default, Debug)] +#[non_exhaustive] +pub struct IpcClientBuilder; + +impl IpcClientBuilder { + /// Connects to a IPC socket /// /// ``` /// use jsonrpsee::{core::client::ClientT, rpc_params}; @@ -95,19 +98,18 @@ impl IpcTransportClientBuilder { /// # Ok(()) /// # } /// ``` - pub async fn build(self, path: impl AsRef) -> Result<(Sender, Receiver), IpcError> { - let path = path.as_ref(); - - let stream = UnixStream::connect(path) - .await - .map_err(|err| IpcError::FailedToConnect { path: path.to_path_buf(), err })?; - - let (rhlf, whlf) = stream.into_split(); + pub async fn build(self, path: impl AsRef) -> Result { + let (tx, rx) = IpcTransportClientBuilder::default().build(path).await?; + Ok(self.build_with_tokio(tx, rx)) + } - Ok(( - Sender { inner: whlf }, - Receiver { inner: FramedRead::new(rhlf, StreamCodec::stream_incoming()) }, - )) + /// Uses the sender and receiver channels to connect to the socket. + pub fn build_with_tokio(self, sender: S, receiver: R) -> Client + where + S: TransportSenderT + Send, + R: TransportReceiverT + Send, + { + ClientBuilder::default().build_with_tokio(sender, receiver) } } @@ -125,7 +127,7 @@ pub enum IpcError { FailedToConnect { /// The path of the socket. #[doc(hidden)] - path: PathBuf, + path: String, /// The error occurred while connecting. #[doc(hidden)] err: io::Error, @@ -137,13 +139,18 @@ pub enum IpcError { #[cfg(test)] mod tests { + use crate::server::dummy_endpoint; + use interprocess::local_socket::tokio::LocalSocketListener; + use super::*; - use parity_tokio_ipc::{dummy_endpoint, Endpoint}; #[tokio::test] async fn test_connect() { let endpoint = dummy_endpoint(); - let _incoming = Endpoint::new(endpoint.clone()).incoming().unwrap(); + let binding = LocalSocketListener::bind(endpoint.clone()).unwrap(); + tokio::spawn(async move { + let _x = binding.accept().await; + }); let (tx, rx) = IpcTransportClientBuilder::default().build(endpoint).await.unwrap(); let _ = IpcClientBuilder::default().build_with_tokio(tx, rx); diff --git a/crates/rpc/ipc/src/lib.rs b/crates/rpc/ipc/src/lib.rs index 2d0193ed6..ae7a8b221 100644 --- a/crates/rpc/ipc/src/lib.rs +++ b/crates/rpc/ipc/src/lib.rs @@ -12,7 +12,6 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -#[cfg(unix)] pub mod client; pub mod server; diff --git a/crates/rpc/ipc/src/server/connection.rs b/crates/rpc/ipc/src/server/connection.rs index abeba7bbf..2aadc6e2b 100644 --- a/crates/rpc/ipc/src/server/connection.rs +++ b/crates/rpc/ipc/src/server/connection.rs @@ -1,84 +1,22 @@ //! A IPC connection. use crate::stream_codec::StreamCodec; -use futures::{ready, stream::FuturesUnordered, FutureExt, Sink, Stream, StreamExt}; +use futures::{stream::FuturesUnordered, FutureExt, Sink, Stream}; use std::{ collections::VecDeque, future::Future, io, - marker::PhantomData, pin::Pin, task::{Context, Poll}, }; -use tokio::io::{AsyncRead, AsyncWrite, AsyncWriteExt}; +use tokio::io::{AsyncRead, AsyncWrite}; use tokio_util::codec::Framed; use tower::Service; pub(crate) type JsonRpcStream = Framed; -/// Wraps a stream of incoming connections. #[pin_project::pin_project] -pub(crate) struct Incoming { - #[pin] - inner: T, - _marker: PhantomData, -} -impl Incoming -where - T: Stream> + Unpin + 'static, - Item: AsyncRead + AsyncWrite, -{ - /// Create a new instance. - pub(crate) fn new(inner: T) -> Self { - Self { inner, _marker: Default::default() } - } - - /// Polls to accept a new incoming connection to the endpoint. - pub(crate) fn poll_accept(&mut self, cx: &mut Context<'_>) -> Poll<::Item> { - Poll::Ready(ready!(self.poll_next_unpin(cx)).map_or( - Err(io::Error::new(io::ErrorKind::ConnectionAborted, "ipc connection closed")), - |conn| conn, - )) - } -} - -impl Stream for Incoming -where - T: Stream> + 'static, - Item: AsyncRead + AsyncWrite, -{ - type Item = io::Result>>; - - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let this = self.project(); - let res = match ready!(this.inner.poll_next(cx)) { - Some(Ok(item)) => { - let framed = IpcConn(tokio_util::codec::Decoder::framed( - StreamCodec::stream_incoming(), - item, - )); - Ok(framed) - } - Some(Err(err)) => Err(err), - None => return Poll::Ready(None), - }; - Poll::Ready(Some(res)) - } -} - -#[pin_project::pin_project] -pub(crate) struct IpcConn(#[pin] T); - -impl IpcConn> -where - T: AsyncRead + AsyncWrite + Unpin, -{ - /// Create a response for when the server is busy and can't accept more requests. - pub(crate) async fn reject_connection(self) { - let mut parts = self.0.into_parts(); - let _ = parts.io.write_all(b"Too many connections. Please try again later.").await; - } -} +pub(crate) struct IpcConn(#[pin] pub(crate) T); impl Stream for IpcConn> where diff --git a/crates/rpc/ipc/src/server/future.rs b/crates/rpc/ipc/src/server/future.rs index 84df306a5..85c69c2a6 100644 --- a/crates/rpc/ipc/src/server/future.rs +++ b/crates/rpc/ipc/src/server/future.rs @@ -26,127 +26,8 @@ //! Utilities for handling async code. -use futures::FutureExt; -use std::{ - future::Future, - pin::Pin, - sync::Arc, - task::{Context, Poll}, -}; -use tokio::{ - sync::{watch, OwnedSemaphorePermit, Semaphore, TryAcquireError}, - time::{self, Duration, Interval}, -}; - -/// Polling for server stop monitor interval in milliseconds. -const STOP_MONITOR_POLLING_INTERVAL: Duration = Duration::from_millis(1000); - -/// This is a flexible collection of futures that need to be driven to completion -/// alongside some other future, such as connection handlers that need to be -/// handled along with a listener for new connections. -/// -/// In order to `.await` on these futures and drive them to completion, call -/// `select_with` providing some other future, the result of which you need. -pub(crate) struct FutureDriver { - futures: Vec, - stop_monitor_heartbeat: Interval, -} - -impl Default for FutureDriver { - fn default() -> Self { - let mut heartbeat = time::interval(STOP_MONITOR_POLLING_INTERVAL); - - heartbeat.set_missed_tick_behavior(time::MissedTickBehavior::Skip); - - FutureDriver { futures: Vec::new(), stop_monitor_heartbeat: heartbeat } - } -} - -impl FutureDriver { - /// Add a new future to this driver - pub(crate) fn add(&mut self, future: F) { - self.futures.push(future); - } -} - -impl FutureDriver -where - F: Future + Unpin, -{ - pub(crate) async fn select_with(&mut self, selector: S) -> S::Output { - tokio::pin!(selector); - - DriverSelect { selector, driver: self }.await - } - - fn drive(&mut self, cx: &mut Context<'_>) { - let mut i = 0; - - while i < self.futures.len() { - if self.futures[i].poll_unpin(cx).is_ready() { - // Using `swap_remove` since we don't care about ordering - // but we do care about removing being `O(1)`. - // - // We don't increment `i` in this branch, since we now - // have a shorter length, and potentially a new value at - // current index - self.futures.swap_remove(i); - } else { - i += 1; - } - } - } - - fn poll_stop_monitor_heartbeat(&mut self, cx: &mut Context<'_>) { - // We don't care about the ticks of the heartbeat, it's here only - // to periodically wake the `Waker` on `cx`. - let _ = self.stop_monitor_heartbeat.poll_tick(cx); - } -} - -impl Future for FutureDriver -where - F: Future + Unpin, -{ - type Output = (); - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let this = Pin::into_inner(self); - - this.drive(cx); - - if this.futures.is_empty() { - Poll::Ready(()) - } else { - Poll::Pending - } - } -} - -/// This is a glorified select `Future` that will attempt to drive all -/// connection futures `F` to completion on each `poll`, while also -/// handling incoming connections. -struct DriverSelect<'a, S, F> { - selector: S, - driver: &'a mut FutureDriver, -} - -impl<'a, R, F> Future for DriverSelect<'a, R, F> -where - R: Future + Unpin, - F: Future + Unpin, -{ - type Output = R::Output; - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let this = Pin::into_inner(self); - - this.driver.drive(cx); - this.driver.poll_stop_monitor_heartbeat(cx); - - this.selector.poll_unpin(cx) - } -} +use std::sync::Arc; +use tokio::sync::watch; #[derive(Debug, Clone)] pub(crate) struct StopHandle(watch::Receiver<()>); @@ -156,12 +37,7 @@ impl StopHandle { Self(rx) } - pub(crate) fn shutdown_requested(&self) -> bool { - // if a message has been seen, it means that `stop` has been called. - self.0.has_changed().unwrap_or(true) - } - - pub(crate) async fn shutdown(&mut self) { + pub(crate) async fn shutdown(mut self) { // Err(_) implies that the `sender` has been dropped. // Ok(_) implies that `stop` has been called. let _ = self.0.changed().await; @@ -182,27 +58,3 @@ impl ServerHandle { self.0.closed().await } } - -/// Limits the number of connections. -pub(crate) struct ConnectionGuard(Arc); - -impl ConnectionGuard { - pub(crate) fn new(limit: usize) -> Self { - Self(Arc::new(Semaphore::new(limit))) - } - - pub(crate) fn try_acquire(&self) -> Option { - match self.0.clone().try_acquire_owned() { - Ok(guard) => Some(guard), - Err(TryAcquireError::Closed) => { - unreachable!("Semaphore::Close is never called and can't be closed") - } - Err(TryAcquireError::NoPermits) => None, - } - } - - #[allow(dead_code)] - pub(crate) fn available_connections(&self) -> usize { - self.0.available_permits() - } -} diff --git a/crates/rpc/ipc/src/server/ipc.rs b/crates/rpc/ipc/src/server/ipc.rs index 8ce4502a2..c73d9bb93 100644 --- a/crates/rpc/ipc/src/server/ipc.rs +++ b/crates/rpc/ipc/src/server/ipc.rs @@ -8,15 +8,14 @@ use jsonrpsee::{ tracing::server::{rx_log_from_json, tx_log_from_str}, JsonRawValue, }, - server::IdProvider, + server::middleware::rpc::RpcServiceT, types::{ - error::{reject_too_many_subscriptions, ErrorCode}, - ErrorObject, Id, InvalidRequest, Notification, Params, Request, + error::{reject_too_big_request, ErrorCode}, + ErrorObject, Id, InvalidRequest, Notification, Request, }, - BatchResponseBuilder, BoundedSubscriptions, CallOrSubscription, MethodCallback, MethodResponse, - MethodSink, Methods, ResponsePayload, SubscriptionState, + BatchResponseBuilder, MethodResponse, ResponsePayload, }; -use std::{sync::Arc, time::Instant}; +use std::sync::Arc; use tokio::sync::OwnedSemaphorePermit; use tokio_util::either::Either; use tracing::instrument; @@ -24,42 +23,33 @@ use tracing::instrument; type Notif<'a> = Notification<'a, Option<&'a JsonRawValue>>; #[derive(Debug, Clone)] -pub(crate) struct Batch<'a> { +pub(crate) struct Batch { data: Vec, - call: CallData<'a>, -} - -#[derive(Debug, Clone)] -pub(crate) struct CallData<'a> { - conn_id: usize, - methods: &'a Methods, - id_provider: &'a dyn IdProvider, - sink: &'a MethodSink, - max_response_body_size: u32, - max_log_length: u32, - request_start: Instant, - bounded_subscriptions: BoundedSubscriptions, + rpc_service: S, } // Batch responses must be sent back as a single message so we read the results from each // request in the batch and read the results off of a new channel, `rx_batch`, and then send the // complete batch response back to the client over `tx`. #[instrument(name = "batch", skip(b), level = "TRACE")] -pub(crate) async fn process_batch_request(b: Batch<'_>) -> Option { - let Batch { data, call } = b; +pub(crate) async fn process_batch_request( + b: Batch, + max_response_body_size: usize, +) -> Option +where + for<'a> S: RpcServiceT<'a> + Send, +{ + let Batch { data, rpc_service } = b; if let Ok(batch) = serde_json::from_slice::>(&data) { let mut got_notif = false; - let mut batch_response = - BatchResponseBuilder::new_with_limit(call.max_response_body_size as usize); + let mut batch_response = BatchResponseBuilder::new_with_limit(max_response_body_size); let mut pending_calls: FuturesOrdered<_> = batch .into_iter() .filter_map(|v| { if let Ok(req) = serde_json::from_str::>(v.get()) { - Some(Either::Right(async { - execute_call(req, call.clone()).await.into_response() - })) + Some(Either::Right(rpc_service.call(req))) } else if let Ok(_notif) = serde_json::from_str::>(v.get()) { // notifications should not be answered. got_notif = true; @@ -95,92 +85,32 @@ pub(crate) async fn process_batch_request(b: Batch<'_>) -> Option { } } -pub(crate) async fn process_single_request( +pub(crate) async fn process_single_request( data: Vec, - call: CallData<'_>, -) -> Option { + rpc_service: &S, +) -> Option +where + for<'a> S: RpcServiceT<'a> + Send, +{ if let Ok(req) = serde_json::from_slice::>(&data) { - Some(execute_call_with_tracing(req, call).await) + Some(execute_call_with_tracing(req, rpc_service).await) } else if serde_json::from_slice::>(&data).is_ok() { None } else { let (id, code) = prepare_error(&data); - Some(CallOrSubscription::Call(MethodResponse::error(id, ErrorObject::from(code)))) + Some(MethodResponse::error(id, ErrorObject::from(code))) } } -#[instrument(name = "method_call", fields(method = req.method.as_ref()), skip(call, req), level = "TRACE")] -pub(crate) async fn execute_call_with_tracing<'a>( +#[instrument(name = "method_call", fields(method = req.method.as_ref()), skip(req, rpc_service), level = "TRACE")] +pub(crate) async fn execute_call_with_tracing<'a, S>( req: Request<'a>, - call: CallData<'_>, -) -> CallOrSubscription { - execute_call(req, call).await -} - -pub(crate) async fn execute_call(req: Request<'_>, call: CallData<'_>) -> CallOrSubscription { - let CallData { - methods, - max_response_body_size, - max_log_length, - conn_id, - id_provider, - sink, - request_start, - bounded_subscriptions, - } = call; - - rx_log_from_json(&req, call.max_log_length); - - let params = Params::new(req.params.as_ref().map(|params| params.get())); - let name = &req.method; - let id = req.id; - - let response = match methods.method_with_name(name) { - None => { - let response = MethodResponse::error(id, ErrorObject::from(ErrorCode::MethodNotFound)); - CallOrSubscription::Call(response) - } - Some((_name, method)) => match method { - MethodCallback::Sync(callback) => { - let response = (callback)(id, params, max_response_body_size as usize); - CallOrSubscription::Call(response) - } - MethodCallback::Async(callback) => { - let id = id.into_owned(); - let params = params.into_owned(); - let response = - (callback)(id, params, conn_id, max_response_body_size as usize).await; - CallOrSubscription::Call(response) - } - MethodCallback::AsyncWithDetails(_callback) => { - unimplemented!() - } - MethodCallback::Subscription(callback) => { - if let Some(p) = bounded_subscriptions.acquire() { - let conn_state = - SubscriptionState { conn_id, id_provider, subscription_permit: p }; - let response = callback(id, params, sink.clone(), conn_state).await; - CallOrSubscription::Subscription(response) - } else { - let response = MethodResponse::error( - id, - reject_too_many_subscriptions(bounded_subscriptions.max()), - ); - CallOrSubscription::Call(response) - } - } - MethodCallback::Unsubscription(callback) => { - // Don't adhere to any resource or subscription limits; always let unsubscribing - // happen! - let result = callback(id, params, conn_id, max_response_body_size as usize); - CallOrSubscription::Call(result) - } - }, - }; - - tx_log_from_str(response.as_response().as_result(), max_log_length); - let _ = request_start; - response + rpc_service: &S, +) -> MethodResponse +where + for<'b> S: RpcServiceT<'b> + Send, +{ + rpc_service.call(req).await } #[instrument(name = "notification", fields(method = notif.method.as_ref()), skip(notif, max_log_length), level = "TRACE")] @@ -192,31 +122,16 @@ fn execute_notification(notif: &Notif<'_>, max_log_length: u32) -> MethodRespons response } -#[allow(dead_code)] -pub(crate) struct HandleRequest { - pub(crate) methods: Methods, - pub(crate) max_request_body_size: u32, - pub(crate) max_response_body_size: u32, - pub(crate) max_log_length: u32, - pub(crate) batch_requests_supported: bool, - pub(crate) conn: Arc, - pub(crate) bounded_subscriptions: BoundedSubscriptions, - pub(crate) method_sink: MethodSink, - pub(crate) id_provider: Arc, -} - -pub(crate) async fn handle_request(request: String, input: HandleRequest) -> Option { - let HandleRequest { - methods, - max_response_body_size, - max_log_length, - conn, - bounded_subscriptions, - method_sink, - id_provider, - .. - } = input; - +pub(crate) async fn call_with_service( + request: String, + rpc_service: S, + max_response_body_size: usize, + max_request_body_size: usize, + conn: Arc, +) -> Option +where + for<'a> S: RpcServiceT<'a> + Send, +{ enum Kind { Single, Batch, @@ -231,31 +146,27 @@ pub(crate) async fn handle_request(request: String, input: HandleRequest) -> Opt }) .unwrap_or(Kind::Single); - let call = CallData { - conn_id: 0, - methods: &methods, - id_provider: &*id_provider, - sink: &method_sink, - max_response_body_size, - max_log_length, - request_start: Instant::now(), - bounded_subscriptions, - }; + let data = request.into_bytes(); + if data.len() > max_request_body_size { + return Some(batch_response_error( + Id::Null, + reject_too_big_request(max_request_body_size as u32), + )); + } // Single request or notification let res = if matches!(request_kind, Kind::Single) { - let response = process_single_request(request.into_bytes(), call).await; + let response = process_single_request(data, &rpc_service).await; match response { - Some(CallOrSubscription::Call(response)) => Some(response.to_result()), - Some(CallOrSubscription::Subscription(_)) => { + Some(response) if response.is_method_call() => Some(response.to_result()), + _ => { // subscription responses are sent directly over the sink, return a response here // would lead to duplicate responses for the subscription response None } - None => None, } } else { - process_batch_request(Batch { data: request.into_bytes(), call }).await + process_batch_request(Batch { data, rpc_service }, max_response_body_size).await }; drop(conn); diff --git a/crates/rpc/ipc/src/server/mod.rs b/crates/rpc/ipc/src/server/mod.rs index e6d1a6051..046087454 100644 --- a/crates/rpc/ipc/src/server/mod.rs +++ b/crates/rpc/ipc/src/server/mod.rs @@ -1,56 +1,85 @@ //! JSON-RPC IPC server implementation use crate::server::{ - connection::{Incoming, IpcConn, JsonRpcStream}, - future::{ConnectionGuard, FutureDriver, StopHandle}, + connection::{IpcConn, JsonRpcStream}, + future::StopHandle, }; -use futures::{FutureExt, Stream, StreamExt}; +use futures::StreamExt; +use futures_util::{future::Either, AsyncWriteExt}; +use interprocess::local_socket::tokio::{LocalSocketListener, LocalSocketStream}; use jsonrpsee::{ core::TEN_MB_SIZE_BYTES, - server::{AlreadyStoppedError, IdProvider, RandomIntegerIdProvider}, + server::{ + middleware::rpc::{RpcLoggerLayer, RpcServiceT}, + AlreadyStoppedError, ConnectionGuard, ConnectionPermit, IdProvider, + RandomIntegerIdProvider, + }, BoundedSubscriptions, MethodSink, Methods, }; use std::{ future::Future, io, - pin::Pin, + pin::{pin, Pin}, sync::Arc, task::{Context, Poll}, }; use tokio::{ io::{AsyncRead, AsyncWrite}, - sync::{oneshot, watch, OwnedSemaphorePermit}, + sync::{oneshot, watch}, }; -use tower::{layer::util::Identity, Service}; -use tracing::{debug, trace, warn}; - +use tower::{layer::util::Identity, Layer, Service}; +use tracing::{debug, instrument, trace, warn, Instrument}; // re-export so can be used during builder setup -use crate::server::connection::IpcConnDriver; -pub use parity_tokio_ipc::Endpoint; +use crate::{ + server::{ + connection::IpcConnDriver, + rpc_service::{RpcService, RpcServiceCfg}, + }, + stream_codec::StreamCodec, +}; use tokio::sync::mpsc; use tokio_stream::wrappers::ReceiverStream; +use tokio_util::compat::FuturesAsyncReadCompatExt; +use tower::layer::{util::Stack, LayerFn}; mod connection; mod future; mod ipc; +mod rpc_service; /// Ipc Server implementation // This is an adapted `jsonrpsee` Server, but for `Ipc` connections. -pub struct IpcServer { +pub struct IpcServer { /// The endpoint we listen for incoming transactions - endpoint: Endpoint, + endpoint: String, id_provider: Arc, cfg: Settings, - service_builder: tower::ServiceBuilder, + rpc_middleware: RpcServiceBuilder, + http_middleware: tower::ServiceBuilder, } -impl IpcServer { - /// Returns the configured [Endpoint] - pub fn endpoint(&self) -> &Endpoint { - &self.endpoint +impl IpcServer { + /// Returns the configured endpoint + pub fn endpoint(&self) -> String { + self.endpoint.clone() } +} +impl IpcServer +where + RpcMiddleware: Layer + Clone + Send + 'static, + for<'a> >::Service: RpcServiceT<'a>, + HttpMiddleware: Layer> + Send + 'static, + >>::Service: Send + + Service< + String, + Response = Option, + Error = Box, + >, + <>>::Service as Service>::Future: + Send + Unpin, +{ /// Start responding to connections requests. /// /// This will run on the tokio runtime until the server is stopped or the ServerHandle is @@ -99,104 +128,116 @@ impl IpcServer { stop_handle: StopHandle, on_ready: oneshot::Sender>, ) { - trace!(endpoint = ?self.endpoint.path(), "starting ipc server"); + trace!(endpoint = ?self.endpoint, "starting ipc server"); if cfg!(unix) { // ensure the file does not exist - if std::fs::remove_file(self.endpoint.path()).is_ok() { - debug!(endpoint = ?self.endpoint.path(), "removed existing IPC endpoint file"); + if std::fs::remove_file(&self.endpoint).is_ok() { + debug!(endpoint = ?self.endpoint, "removed existing IPC endpoint file"); } } - let message_buffer_capacity = self.cfg.message_buffer_capacity; - let max_request_body_size = self.cfg.max_request_body_size; - let max_response_body_size = self.cfg.max_response_body_size; - let max_log_length = self.cfg.max_log_length; - let id_provider = self.id_provider; - let max_subscriptions_per_connection = self.cfg.max_subscriptions_per_connection; - - let mut id: u32 = 0; - let connection_guard = ConnectionGuard::new(self.cfg.max_connections as usize); - - let mut connections = FutureDriver::default(); - let endpoint_path = self.endpoint.path().to_string(); - let incoming = match self.endpoint.incoming() { - Ok(connections) => { - #[cfg(windows)] - let connections = Box::pin(connections); - Incoming::new(connections) - } + let listener = match LocalSocketListener::bind(self.endpoint.clone()) { Err(err) => { on_ready - .send(Err(IpcServerStartError { endpoint: endpoint_path, source: err })) + .send(Err(IpcServerStartError { endpoint: self.endpoint.clone(), source: err })) .ok(); - return + return; } + + Ok(listener) => listener, }; + // signal that we're ready to accept connections on_ready.send(Ok(())).ok(); - let mut incoming = Monitored::new(incoming, &stop_handle); + let mut id: u32 = 0; + let connection_guard = ConnectionGuard::new(self.cfg.max_connections as usize); + + let stopped = stop_handle.clone().shutdown(); + let mut stopped = pin!(stopped); + + let (drop_on_completion, mut process_connection_awaiter) = mpsc::channel::<()>(1); trace!("accepting ipc connections"); loop { - match connections.select_with(&mut incoming).await { - Ok(ipc) => { - trace!("established new connection"); - let conn = match connection_guard.try_acquire() { - Some(conn) => conn, - None => { - warn!("Too many IPC connections. Please try again later."); - connections.add(ipc.reject_connection().boxed()); - continue - } + match try_accept_conn(&listener, stopped).await { + AcceptConnection::Established { local_socket_stream, stop } => { + let Some(conn_permit) = connection_guard.try_acquire() else { + let (mut _reader, mut writer) = local_socket_stream.into_split(); + let _ = writer.write_all(b"Too many connections. Please try again later.").await; + drop((_reader, writer)); + stopped = stop; + continue; }; - let (tx, rx) = mpsc::channel::(message_buffer_capacity as usize); - let method_sink = MethodSink::new_with_limit(tx, max_response_body_size); - let tower_service = TowerService { - inner: ServiceData { - methods: methods.clone(), - max_request_body_size, - max_response_body_size, - max_log_length, - id_provider: id_provider.clone(), - stop_handle: stop_handle.clone(), - max_subscriptions_per_connection, - conn_id: id, - conn: Arc::new(conn), - bounded_subscriptions: BoundedSubscriptions::new( - max_subscriptions_per_connection, - ), - method_sink, - }, - }; - - let service = self.service_builder.service(tower_service); - connections.add(Box::pin(spawn_connection( - ipc, - service, - stop_handle.clone(), - rx, - ))); + let max_conns = connection_guard.max_connections(); + let curr_conns = max_conns - connection_guard.available_connections(); + trace!("Accepting new connection {}/{}", curr_conns, max_conns); + + let conn_permit = Arc::new(conn_permit); + + process_connection(ProcessConnection{ + http_middleware: &self.http_middleware, + rpc_middleware: self.rpc_middleware.clone(), + conn_permit, + conn_id: id, + server_cfg: self.cfg.clone(), + stop_handle: stop_handle.clone(), + drop_on_completion: drop_on_completion.clone(), + methods: methods.clone(), + id_provider: self.id_provider.clone(), + local_socket_stream, + }); id = id.wrapping_add(1); + stopped = stop; } - Err(MonitoredError::Selector(err)) => { - tracing::error!("Error while awaiting a new IPC connection: {:?}", err); + AcceptConnection::Shutdown => { break; } + AcceptConnection::Err((e, stop)) => { + tracing::error!("Error while awaiting a new IPC connection: {:?}", e); + stopped = stop; } - Err(MonitoredError::Shutdown) => break, } } - connections.await; + // Drop the last Sender + drop(drop_on_completion); + + // Once this channel is closed it is safe to assume that all connections have been gracefully shutdown + while process_connection_awaiter.recv().await.is_some() { + // Generally, messages should not be sent across this channel, + // but we'll loop here to wait for `None` just to be on the safe side + } + } +} + +enum AcceptConnection { + Shutdown, + Established { local_socket_stream: LocalSocketStream, stop: S }, + Err((io::Error, S)), +} + +async fn try_accept_conn(listener: &LocalSocketListener, stopped: S) -> AcceptConnection +where + S: Future + Unpin, +{ + let accept = listener.accept(); + let accept = pin!(accept); + + match futures_util::future::select(accept, stopped).await { + Either::Left((res, stop)) => match res { + Ok(local_socket_stream) => AcceptConnection::Established { local_socket_stream, stop }, + Err(e) => AcceptConnection::Err((e, stop)), + }, + Either::Right(_) => AcceptConnection::Shutdown, } } impl std::fmt::Debug for IpcServer { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("IpcServer") - .field("endpoint", &self.endpoint.path()) + .field("endpoint", &self.endpoint) .field("cfg", &self.cfg) .field("id_provider", &self.id_provider) .finish() @@ -218,42 +259,105 @@ pub struct IpcServerStartError { pub(crate) struct ServiceData { /// Registered server methods. pub(crate) methods: Methods, - /// Max request body size. - pub(crate) max_request_body_size: u32, - /// Max request body size. - pub(crate) max_response_body_size: u32, - /// Max length for logging for request and response - /// - /// Logs bigger than this limit will be truncated. - pub(crate) max_log_length: u32, /// Subscription ID provider. pub(crate) id_provider: Arc, /// Stop handle. pub(crate) stop_handle: StopHandle, - /// Max subscriptions per connection. - pub(crate) max_subscriptions_per_connection: u32, /// Connection ID pub(crate) conn_id: u32, - /// Handle to hold a `connection permit`. - pub(crate) conn: Arc, + /// Connection Permit. + pub(crate) conn_permit: Arc, /// Limits the number of subscriptions for this connection pub(crate) bounded_subscriptions: BoundedSubscriptions, /// Sink that is used to send back responses to the connection. /// /// This is used for subscriptions. pub(crate) method_sink: MethodSink, + /// ServerConfig + pub(crate) server_cfg: Settings, +} + +/// Similar to [`tower::ServiceBuilder`] but doesn't +/// support any tower middleware implementations. +#[derive(Debug, Clone)] +pub struct RpcServiceBuilder(tower::ServiceBuilder); + +impl Default for RpcServiceBuilder { + fn default() -> Self { + RpcServiceBuilder(tower::ServiceBuilder::new()) + } +} + +impl RpcServiceBuilder { + /// Create a new [`RpcServiceBuilder`]. + pub fn new() -> Self { + Self(tower::ServiceBuilder::new()) + } +} + +impl RpcServiceBuilder { + /// Optionally add a new layer `T` to the [`RpcServiceBuilder`]. + /// + /// See the documentation for [`tower::ServiceBuilder::option_layer`] for more details. + pub fn option_layer( + self, + layer: Option, + ) -> RpcServiceBuilder, L>> { + let layer = if let Some(layer) = layer { + Either::Left(layer) + } else { + Either::Right(Identity::new()) + }; + self.layer(layer) + } + + /// Add a new layer `T` to the [`RpcServiceBuilder`]. + /// + /// See the documentation for [`tower::ServiceBuilder::layer`] for more details. + pub fn layer(self, layer: T) -> RpcServiceBuilder> { + RpcServiceBuilder(self.0.layer(layer)) + } + + /// Add a [`tower::Layer`] built from a function that accepts a service and returns another + /// service. + /// + /// See the documentation for [`tower::ServiceBuilder::layer_fn`] for more details. + pub fn layer_fn(self, f: F) -> RpcServiceBuilder, L>> { + RpcServiceBuilder(self.0.layer_fn(f)) + } + + /// Add a logging layer to [`RpcServiceBuilder`] + /// + /// This logs each request and response for every call. + pub fn rpc_logger(self, max_log_len: u32) -> RpcServiceBuilder> { + RpcServiceBuilder(self.0.layer(RpcLoggerLayer::new(max_log_len))) + } + + /// Wrap the service `S` with the middleware. + pub(crate) fn service(&self, service: S) -> L::Service + where + L: tower::Layer, + { + self.0.service(service) + } } /// JsonRPSee service compatible with `tower`. /// /// # Note /// This is similar to [`hyper::service::service_fn`](https://docs.rs/hyper/latest/hyper/service/fn.service_fn.html). -#[derive(Debug)] -pub struct TowerService { +#[derive(Debug, Clone)] +pub struct TowerServiceNoHttp { inner: ServiceData, + rpc_middleware: RpcServiceBuilder, } -impl Service for TowerService { +impl Service for TowerServiceNoHttp +where + RpcMiddleware: for<'a> Layer, + >::Service: Send + Sync + 'static, + for<'a> >::Service: RpcServiceT<'a>, +{ /// The response of a handled RPC call /// /// This is an `Option` because subscriptions and call responses are handled differently. @@ -273,35 +377,121 @@ impl Service for TowerService { fn call(&mut self, request: String) -> Self::Future { trace!("{:?}", request); - // handle the request - let data = ipc::HandleRequest { - methods: self.inner.methods.clone(), - max_request_body_size: self.inner.max_request_body_size, - max_response_body_size: self.inner.max_response_body_size, - max_log_length: self.inner.max_log_length, - batch_requests_supported: true, - conn: self.inner.conn.clone(), - bounded_subscriptions: self.inner.bounded_subscriptions.clone(), - method_sink: self.inner.method_sink.clone(), + let cfg = RpcServiceCfg::CallsAndSubscriptions { + bounded_subscriptions: BoundedSubscriptions::new( + self.inner.server_cfg.max_subscriptions_per_connection, + ), id_provider: self.inner.id_provider.clone(), + sink: self.inner.method_sink.clone(), }; + let max_response_body_size = self.inner.server_cfg.max_response_body_size as usize; + let max_request_body_size = self.inner.server_cfg.max_request_body_size as usize; + let conn = self.inner.conn_permit.clone(); + let rpc_service = self.rpc_middleware.service(RpcService::new( + self.inner.methods.clone(), + max_response_body_size, + self.inner.conn_id as usize, + cfg, + )); // an ipc connection needs to handle read+write concurrently // even if the underlying rpc handler spawns the actual work or is does a lot of async any // additional overhead performed by `handle_request` can result in I/O latencies, for // example tracing calls are relatively CPU expensive on serde::serialize alone, moving this // work to a separate task takes the pressure off the connection so all concurrent responses // are also serialized concurrently and the connection can focus on read+write - let f = tokio::task::spawn(async move { ipc::handle_request(request, data).await }); + let f = tokio::task::spawn(async move { + ipc::call_with_service( + request, + rpc_service, + max_response_body_size, + max_request_body_size, + conn, + ) + .await + }); + Box::pin(async move { f.await.map_err(|err| err.into()) }) } } +struct ProcessConnection<'a, HttpMiddleware, RpcMiddleware> { + http_middleware: &'a tower::ServiceBuilder, + rpc_middleware: RpcServiceBuilder, + conn_permit: Arc, + conn_id: u32, + server_cfg: Settings, + stop_handle: StopHandle, + drop_on_completion: mpsc::Sender<()>, + methods: Methods, + id_provider: Arc, + local_socket_stream: LocalSocketStream, +} + /// Spawns the IPC connection onto a new task -async fn spawn_connection( - conn: IpcConn>, +#[instrument(name = "connection", skip_all, fields(conn_id = %params.conn_id), level = "INFO")] +fn process_connection<'b, RpcMiddleware, HttpMiddleware>( + params: ProcessConnection<'_, HttpMiddleware, RpcMiddleware>, +) where + RpcMiddleware: Layer + Clone + Send + 'static, + for<'a> >::Service: RpcServiceT<'a>, + HttpMiddleware: Layer> + Send + 'static, + >>::Service: Send + + Service< + String, + Response = Option, + Error = Box, + >, + <>>::Service as Service>::Future: + Send + Unpin, + { + let ProcessConnection { + http_middleware, + rpc_middleware, + conn_permit, + conn_id, + server_cfg, + stop_handle, + drop_on_completion, + id_provider, + methods, + local_socket_stream, + } = params; + + let ipc = IpcConn(tokio_util::codec::Decoder::framed( + StreamCodec::stream_incoming(), + local_socket_stream.compat(), + )); + + let (tx, rx) = mpsc::channel::(server_cfg.message_buffer_capacity as usize); + let method_sink = MethodSink::new_with_limit(tx, server_cfg.max_response_body_size); + let tower_service = TowerServiceNoHttp { + inner: ServiceData { + methods, + id_provider, + stop_handle: stop_handle.clone(), + server_cfg: server_cfg.clone(), + conn_id, + conn_permit, + bounded_subscriptions: BoundedSubscriptions::new( + server_cfg.max_subscriptions_per_connection, + ), + method_sink, + }, + rpc_middleware, + }; + + let service = http_middleware.service(tower_service); + tokio::spawn(async { + to_ipc_service(ipc, service, stop_handle, rx).in_current_span().await; + drop(drop_on_completion) + }); +} + +async fn to_ipc_service( + ipc: IpcConn>, service: S, - mut stop_handle: StopHandle, + stop_handle: StopHandle, rx: mpsc::Receiver, ) where S: Service> + Send + 'static, @@ -309,70 +499,34 @@ async fn spawn_connection( S::Future: Send + Unpin, T: AsyncRead + AsyncWrite + Unpin + Send + 'static, { - let task = tokio::task::spawn(async move { - let rx_item = ReceiverStream::new(rx); - let conn = IpcConnDriver { - conn, - service, - pending_calls: Default::default(), - items: Default::default(), - }; - tokio::pin!(conn, rx_item); + let rx_item = ReceiverStream::new(rx); + let conn = IpcConnDriver { + conn: ipc, + service, + pending_calls: Default::default(), + items: Default::default(), + }; + let stopped = stop_handle.shutdown(); - loop { - tokio::select! { - _ = &mut conn => { - break - } - item = rx_item.next() => { - if let Some(item) = item { - conn.push_back(item); - } - } - _ = stop_handle.shutdown() => { - // shutdown - break + let mut conn = pin!(conn); + let mut rx_item = pin!(rx_item); + let mut stopped = pin!(stopped); + + loop { + tokio::select! { + _ = &mut conn => { + break + } + item = rx_item.next() => { + if let Some(item) = item { + conn.push_back(item); } } + _ = &mut stopped => { + // shutdown + break + } } - }); - - task.await.ok(); -} - -/// This is a glorified select listening for new messages, while also checking the `stop_receiver` -/// signal. -struct Monitored<'a, F> { - future: F, - stop_monitor: &'a StopHandle, -} - -impl<'a, F> Monitored<'a, F> { - fn new(future: F, stop_monitor: &'a StopHandle) -> Self { - Monitored { future, stop_monitor } - } -} - -enum MonitoredError { - Shutdown, - Selector(E), -} - -impl<'a, T, Item> Future for Monitored<'a, Incoming> -where - T: Stream> + Unpin + 'static, - Item: AsyncRead + AsyncWrite, -{ - type Output = Result>, MonitoredError>; - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let this = self.get_mut(); - - if this.stop_monitor.shutdown_requested() { - return Poll::Ready(Err(MonitoredError::Shutdown)) - } - - this.future.poll_accept(cx).map_err(MonitoredError::Selector) } } @@ -413,24 +567,26 @@ impl Default for Settings { /// Builder to configure and create a JSON-RPC server #[derive(Debug)] -pub struct Builder { +pub struct Builder { settings: Settings, /// Subscription ID provider. id_provider: Arc, - service_builder: tower::ServiceBuilder, + rpc_middleware: RpcServiceBuilder, + http_middleware: tower::ServiceBuilder, } -impl Default for Builder { +impl Default for Builder { fn default() -> Self { Builder { settings: Settings::default(), id_provider: Arc::new(RandomIntegerIdProvider), - service_builder: tower::ServiceBuilder::new(), + rpc_middleware: RpcServiceBuilder::new(), + http_middleware: tower::ServiceBuilder::new(), } } } -impl Builder { +impl Builder { /// Set the maximum size of a request body in bytes. Default is 10 MiB. pub fn max_request_body_size(mut self, size: u32) -> Self { self.settings.max_request_body_size = size; @@ -529,26 +685,105 @@ impl Builder { /// let builder = tower::ServiceBuilder::new(); /// /// let server = - /// reth_ipc::server::Builder::default().set_middleware(builder).build("/tmp/my-uds"); + /// reth_ipc::server::Builder::default().set_http_middleware(builder).build("/tmp/my-uds"); /// } /// ``` - pub fn set_middleware(self, service_builder: tower::ServiceBuilder) -> Builder { - Builder { settings: self.settings, id_provider: self.id_provider, service_builder } + pub fn set_http_middleware( + self, + service_builder: tower::ServiceBuilder, + ) -> Builder { + Builder { + settings: self.settings, + id_provider: self.id_provider, + http_middleware: service_builder, + rpc_middleware: self.rpc_middleware, + } } - /// Finalize the configuration of the server. Consumes the [`Builder`]. - pub fn build(self, endpoint: impl AsRef) -> IpcServer { - let endpoint = Endpoint::new(endpoint.as_ref().to_string()); - self.build_with_endpoint(endpoint) + /// Enable middleware that is invoked on every JSON-RPC call. + /// + /// The middleware itself is very similar to the `tower middleware` but + /// it has a different service trait which takes &self instead &mut self + /// which means that you can't use built-in middleware from tower. + /// + /// Another consequence of `&self` is that you must wrap any of the middleware state in + /// a type which is Send and provides interior mutability such `Arc`. + /// + /// The builder itself exposes a similar API as the [`tower::ServiceBuilder`] + /// where it is possible to compose layers to the middleware. + /// + /// ``` + /// use std::{ + /// net::SocketAddr, + /// sync::{ + /// atomic::{AtomicUsize, Ordering}, + /// Arc, + /// }, + /// time::Instant, + /// }; + /// + /// use futures_util::future::BoxFuture; + /// use jsonrpsee::{ + /// server::{middleware::rpc::RpcServiceT, ServerBuilder}, + /// types::Request, + /// MethodResponse, + /// }; + /// use reth_ipc::server::{Builder, RpcServiceBuilder}; + /// + /// #[derive(Clone)] + /// struct MyMiddleware { + /// service: S, + /// count: Arc, + /// } + /// + /// impl<'a, S> RpcServiceT<'a> for MyMiddleware + /// where + /// S: RpcServiceT<'a> + Send + Sync + Clone + 'static, + /// { + /// type Future = BoxFuture<'a, MethodResponse>; + /// + /// fn call(&self, req: Request<'a>) -> Self::Future { + /// tracing::info!("MyMiddleware processed call {}", req.method); + /// let count = self.count.clone(); + /// let service = self.service.clone(); + /// + /// Box::pin(async move { + /// let rp = service.call(req).await; + /// // Modify the state. + /// count.fetch_add(1, Ordering::Relaxed); + /// rp + /// }) + /// } + /// } + /// + /// // Create a state per connection + /// // NOTE: The service type can be omitted once `start` is called on the server. + /// let m = RpcServiceBuilder::new().layer_fn(move |service: ()| MyMiddleware { + /// service, + /// count: Arc::new(AtomicUsize::new(0)), + /// }); + /// let builder = Builder::default().set_rpc_middleware(m); + /// ``` + pub fn set_rpc_middleware( + self, + rpc_middleware: RpcServiceBuilder, + ) -> Builder { + Builder { + settings: self.settings, + id_provider: self.id_provider, + rpc_middleware, + http_middleware: self.http_middleware, + } } /// Finalize the configuration of the server. Consumes the [`Builder`]. - pub fn build_with_endpoint(self, endpoint: Endpoint) -> IpcServer { + pub fn build(self, endpoint: impl AsRef) -> IpcServer { IpcServer { - endpoint, + endpoint: endpoint.as_ref().to_string(), cfg: self.settings, id_provider: self.id_provider, - service_builder: self.service_builder, + http_middleware: self.http_middleware, + rpc_middleware: self.rpc_middleware, } } } @@ -582,16 +817,34 @@ impl ServerHandle { } } -#[cfg(all(test, unix))] +/// For testing/examples +#[cfg(test)] +pub fn dummy_endpoint() -> String { + let num: u64 = rand::Rng::gen(&mut rand::thread_rng()); + if cfg!(windows) { + format!(r"\\.\pipe\my-pipe-{}", num) + } else { + format!(r"/tmp/my-uds-{}", num) + } +} + +#[cfg(test)] mod tests { use super::*; use crate::client::IpcClientBuilder; - use futures::future::{select, Either}; + use futures::future::select; use jsonrpsee::{ - core::client::{ClientT, Subscription, SubscriptionClientT}, - rpc_params, PendingSubscriptionSink, RpcModule, SubscriptionMessage, + core::{ + client, + client::{ClientT, Error, Subscription, SubscriptionClientT}, + params::BatchRequestBuilder, + }, + rpc_params, + types::Request, + PendingSubscriptionSink, RpcModule, SubscriptionMessage, }; - use parity_tokio_ipc::dummy_endpoint; + use reth_tracing::init_test_tracing; + use std::pin::pin; use tokio::sync::broadcast; use tokio_stream::wrappers::BroadcastStream; @@ -602,7 +855,8 @@ mod tests { let sink = pending.accept().await.unwrap(); let closed = sink.closed(); - futures::pin_mut!(closed, stream); + let mut closed = pin!(closed); + let mut stream = pin!(stream); loop { match select(closed, stream.next()).await { @@ -617,7 +871,7 @@ mod tests { // and you might want to do something smarter if it's // critical that "the most recent item" must be sent when it is produced. if sink.send(notif).await.is_err() { - break Ok(()) + break Ok(()); } closed = c; @@ -640,8 +894,85 @@ mod tests { } } + #[tokio::test] + async fn can_set_the_max_response_body_size() { + // init_test_tracing(); + let endpoint = dummy_endpoint(); + let server = Builder::default().max_response_body_size(100).build(&endpoint); + let mut module = RpcModule::new(()); + module.register_method("anything", |_, _| "a".repeat(101)).unwrap(); + let handle = server.start(module).await.unwrap(); + tokio::spawn(handle.stopped()); + + let client = IpcClientBuilder::default().build(endpoint).await.unwrap(); + let response: Result = client.request("anything", rpc_params![]).await; + assert!(response.unwrap_err().to_string().contains("Exceeded max limit of")); + } + + #[tokio::test] + async fn can_set_the_max_request_body_size() { + init_test_tracing(); + let endpoint = dummy_endpoint(); + let server = Builder::default().max_request_body_size(100).build(&endpoint); + let mut module = RpcModule::new(()); + module.register_method("anything", |_, _| "succeed").unwrap(); + let handle = server.start(module).await.unwrap(); + tokio::spawn(handle.stopped()); + + let client = IpcClientBuilder::default().build(endpoint).await.unwrap(); + let response: Result = + client.request("anything", rpc_params!["a".repeat(101)]).await; + assert!(response.is_err()); + let mut batch_request_builder = BatchRequestBuilder::new(); + let _ = batch_request_builder.insert("anything", rpc_params![]); + let _ = batch_request_builder.insert("anything", rpc_params![]); + let _ = batch_request_builder.insert("anything", rpc_params![]); + // the raw request string is: + // [{"jsonrpc":"2.0","id":0,"method":"anything"},{"jsonrpc":"2.0","id":1, \ + // "method":"anything"},{"jsonrpc":"2.0","id":2,"method":"anything"}]" + // which is 136 bytes, more than 100 bytes. + let response: Result, Error> = + client.batch_request(batch_request_builder).await; + assert!(response.is_err()); + } + + #[tokio::test] + async fn can_set_max_connections() { + init_test_tracing(); + + let endpoint = dummy_endpoint(); + let server = Builder::default().max_connections(2).build(&endpoint); + let mut module = RpcModule::new(()); + module.register_method("anything", |_, _| "succeed").unwrap(); + let handle = server.start(module).await.unwrap(); + tokio::spawn(handle.stopped()); + + let client1 = IpcClientBuilder::default().build(endpoint.clone()).await.unwrap(); + let client2 = IpcClientBuilder::default().build(endpoint.clone()).await.unwrap(); + let client3 = IpcClientBuilder::default().build(endpoint.clone()).await.unwrap(); + + let response1: Result = client1.request("anything", rpc_params![]).await; + let response2: Result = client2.request("anything", rpc_params![]).await; + let response3: Result = client3.request("anything", rpc_params![]).await; + + assert!(response1.is_ok()); + assert!(response2.is_ok()); + // Third connection is rejected + assert!(response3.is_err()); + + // Decrement connection count + drop(client2); + tokio::time::sleep(std::time::Duration::from_millis(100)).await; + + // Can connect again + let client4 = IpcClientBuilder::default().build(endpoint.clone()).await.unwrap(); + let response4: Result = client4.request("anything", rpc_params![]).await; + assert!(response4.is_ok()); + } + #[tokio::test] async fn test_rpc_request() { + init_test_tracing(); let endpoint = dummy_endpoint(); let server = Builder::default().build(&endpoint); let mut module = RpcModule::new(()); @@ -655,8 +986,33 @@ mod tests { assert_eq!(response, msg); } + #[tokio::test] + async fn test_batch_request() { + let endpoint = dummy_endpoint(); + let server = Builder::default().build(&endpoint); + let mut module = RpcModule::new(()); + module.register_method("anything", |_, _| "ok").unwrap(); + let handle = server.start(module).await.unwrap(); + tokio::spawn(handle.stopped()); + + let client = IpcClientBuilder::default().build(endpoint).await.unwrap(); + let mut batch_request_builder = BatchRequestBuilder::new(); + let _ = batch_request_builder.insert("anything", rpc_params![]); + let _ = batch_request_builder.insert("anything", rpc_params![]); + let _ = batch_request_builder.insert("anything", rpc_params![]); + let result = client + .batch_request(batch_request_builder) + .await + .unwrap() + .into_ok() + .unwrap() + .collect::>(); + assert_eq!(result, vec!["ok", "ok", "ok"]); + } + #[tokio::test] async fn test_ipc_modules() { + reth_tracing::init_test_tracing(); let endpoint = dummy_endpoint(); let server = Builder::default().build(&endpoint); let mut module = RpcModule::new(()); @@ -703,4 +1059,50 @@ mod tests { let items = sub.take(16).collect::>().await; assert_eq!(items.len(), 16); } + + #[tokio::test] + async fn test_rpc_middleware() { + #[derive(Clone)] + struct ModifyRequestIf(S); + + impl<'a, S> RpcServiceT<'a> for ModifyRequestIf + where + S: Send + Sync + RpcServiceT<'a>, + { + type Future = S::Future; + + fn call(&self, mut req: Request<'a>) -> Self::Future { + // Re-direct all calls that isn't `say_hello` to `say_goodbye` + if req.method == "say_hello" { + req.method = "say_goodbye".into(); + } else if req.method == "say_goodbye" { + req.method = "say_hello".into(); + } + + self.0.call(req) + } + } + + reth_tracing::init_test_tracing(); + let endpoint = dummy_endpoint(); + + let rpc_middleware = RpcServiceBuilder::new().layer_fn(ModifyRequestIf); + let server = Builder::default().set_rpc_middleware(rpc_middleware).build(&endpoint); + + let mut module = RpcModule::new(()); + let goodbye_msg = r#"{"jsonrpc":"2.0","id":1,"result":"goodbye"}"#; + let hello_msg = r#"{"jsonrpc":"2.0","id":2,"result":"hello"}"#; + module.register_method("say_hello", move |_, _| hello_msg).unwrap(); + module.register_method("say_goodbye", move |_, _| goodbye_msg).unwrap(); + let handle = server.start(module).await.unwrap(); + tokio::spawn(handle.stopped()); + + let client = IpcClientBuilder::default().build(endpoint).await.unwrap(); + let say_hello_response: String = client.request("say_hello", rpc_params![]).await.unwrap(); + let say_goodbye_response: String = + client.request("say_goodbye", rpc_params![]).await.unwrap(); + + assert_eq!(say_hello_response, goodbye_msg); + assert_eq!(say_goodbye_response, hello_msg); + } } diff --git a/crates/rpc/ipc/src/server/rpc_service.rs b/crates/rpc/ipc/src/server/rpc_service.rs new file mode 100644 index 000000000..94e9ed2aa --- /dev/null +++ b/crates/rpc/ipc/src/server/rpc_service.rs @@ -0,0 +1,138 @@ +//! JSON-RPC service middleware. +use futures_util::future::BoxFuture; +use jsonrpsee::{ + server::{ + middleware::rpc::{ResponseFuture, RpcServiceT}, + IdProvider, + }, + types::{error::reject_too_many_subscriptions, ErrorCode, ErrorObject, Request}, + BoundedSubscriptions, ConnectionDetails, MethodCallback, MethodResponse, MethodSink, Methods, + SubscriptionState, +}; +use std::sync::Arc; + +/// JSON-RPC service middleware. +#[derive(Clone, Debug)] +pub struct RpcService { + conn_id: usize, + methods: Methods, + max_response_body_size: usize, + cfg: RpcServiceCfg, +} + +/// Configuration of the RpcService. +#[allow(dead_code)] +#[derive(Clone, Debug)] +pub(crate) enum RpcServiceCfg { + /// The server supports only calls. + OnlyCalls, + /// The server supports both method calls and subscriptions. + CallsAndSubscriptions { + bounded_subscriptions: BoundedSubscriptions, + sink: MethodSink, + id_provider: Arc, + }, +} + +impl RpcService { + /// Create a new service. + pub(crate) fn new( + methods: Methods, + max_response_body_size: usize, + conn_id: usize, + cfg: RpcServiceCfg, + ) -> Self { + Self { methods, max_response_body_size, conn_id, cfg } + } +} + +impl<'a> RpcServiceT<'a> for RpcService { + // The rpc module is already boxing the futures and + // it's used to under the hood by the RpcService. + type Future = ResponseFuture>; + + fn call(&self, req: Request<'a>) -> Self::Future { + let conn_id = self.conn_id; + let max_response_body_size = self.max_response_body_size; + + let params = req.params(); + let name = req.method_name(); + let id = req.id().clone(); + + match self.methods.method_with_name(name) { + None => { + let rp = MethodResponse::error(id, ErrorObject::from(ErrorCode::MethodNotFound)); + ResponseFuture::ready(rp) + } + Some((_name, method)) => match method { + MethodCallback::Async(callback) => { + let params = params.into_owned(); + let id = id.into_owned(); + + let fut = (callback)(id, params, conn_id, max_response_body_size); + ResponseFuture::future(fut) + } + MethodCallback::AsyncWithDetails(callback) => { + let params = params.into_owned(); + let id = id.into_owned(); + + // Note: Add the `Request::extensions` to the connection details when available + // here. + let fut = (callback)( + id, + params, + ConnectionDetails::_new(conn_id), + max_response_body_size, + ); + ResponseFuture::future(fut) + } + MethodCallback::Sync(callback) => { + let rp = (callback)(id, params, max_response_body_size); + ResponseFuture::ready(rp) + } + MethodCallback::Subscription(callback) => { + let RpcServiceCfg::CallsAndSubscriptions { + bounded_subscriptions, + sink, + id_provider, + } = self.cfg.clone() + else { + tracing::warn!("Subscriptions not supported"); + let rp = + MethodResponse::error(id, ErrorObject::from(ErrorCode::InternalError)); + return ResponseFuture::ready(rp); + }; + + if let Some(p) = bounded_subscriptions.acquire() { + let conn_state = SubscriptionState { + conn_id, + id_provider: &*id_provider.clone(), + subscription_permit: p, + }; + + let fut = callback(id.clone(), params, sink, conn_state); + ResponseFuture::future(fut) + } else { + let max = bounded_subscriptions.max(); + let rp = MethodResponse::error(id, reject_too_many_subscriptions(max)); + ResponseFuture::ready(rp) + } + } + MethodCallback::Unsubscription(callback) => { + // Don't adhere to any resource or subscription limits; always let unsubscribing + // happen! + + let RpcServiceCfg::CallsAndSubscriptions { .. } = self.cfg else { + tracing::warn!("Subscriptions not supported"); + let rp = + MethodResponse::error(id, ErrorObject::from(ErrorCode::InternalError)); + return ResponseFuture::ready(rp); + }; + + let rp = callback(id, params, conn_id, max_response_body_size); + ResponseFuture::ready(rp) + } + }, + } + } +} diff --git a/crates/rpc/rpc-api/Cargo.toml b/crates/rpc/rpc-api/Cargo.toml index c2ada1e88..fe22eae0f 100644 --- a/crates/rpc/rpc-api/Cargo.toml +++ b/crates/rpc/rpc-api/Cargo.toml @@ -16,6 +16,7 @@ workspace = true reth-primitives.workspace = true reth-rpc-types.workspace = true reth-engine-primitives.workspace = true +reth-network-types.workspace = true # misc jsonrpsee = { workspace = true, features = ["server", "macros"] } diff --git a/crates/rpc/rpc-api/src/admin.rs b/crates/rpc/rpc-api/src/admin.rs index 7497d1205..4c31221cd 100644 --- a/crates/rpc/rpc-api/src/admin.rs +++ b/crates/rpc/rpc-api/src/admin.rs @@ -1,5 +1,6 @@ use jsonrpsee::{core::RpcResult, proc_macros::rpc}; -use reth_primitives::{AnyNode, NodeRecord}; +use reth_network_types::AnyNode; +use reth_primitives::NodeRecord; use reth_rpc_types::{admin::NodeInfo, PeerInfo}; /// Admin namespace rpc interface that gives access to several non-standard RPC methods. diff --git a/crates/rpc/rpc-api/src/engine.rs b/crates/rpc/rpc-api/src/engine.rs index 9304bbc5b..d320c7460 100644 --- a/crates/rpc/rpc-api/src/engine.rs +++ b/crates/rpc/rpc-api/src/engine.rs @@ -131,7 +131,7 @@ pub trait EngineApi { /// layer p2p specification, meaning the input should be treated as untrusted or potentially /// adversarial. /// - /// Implementors should take care when acting on the input to this method, specifically + /// Implementers should take care when acting on the input to this method, specifically /// ensuring that the range is limited properly, and that the range boundaries are computed /// correctly and without panics. #[method(name = "getPayloadBodiesByRangeV1")] diff --git a/crates/rpc/rpc-api/src/eth.rs b/crates/rpc/rpc-api/src/eth.rs index c878a7e1c..b6c2993bb 100644 --- a/crates/rpc/rpc-api/src/eth.rs +++ b/crates/rpc/rpc-api/src/eth.rs @@ -1,12 +1,10 @@ use jsonrpsee::{core::RpcResult, proc_macros::rpc}; -use reth_primitives::{ - serde_helper::{num::U64HexOrNumber, JsonStorageKey}, - Address, BlockId, BlockNumberOrTag, Bytes, B256, B64, U256, U64, -}; +use reth_primitives::{Address, BlockId, BlockNumberOrTag, Bytes, B256, B64, U256, U64}; use reth_rpc_types::{ - state::StateOverride, AccessListWithGasUsed, AnyTransactionReceipt, BlockOverrides, Bundle, - EIP1186AccountProofResponse, EthCallResponse, FeeHistory, Header, Index, RichBlock, - StateContext, SyncStatus, Transaction, TransactionRequest, Work, + serde_helpers::JsonStorageKey, state::StateOverride, AccessListWithGasUsed, + AnyTransactionReceipt, BlockOverrides, Bundle, EIP1186AccountProofResponse, EthCallResponse, + FeeHistory, Header, Index, RichBlock, StateContext, SyncStatus, Transaction, + TransactionRequest, Work, }; /// Eth rpc interface: @@ -247,7 +245,7 @@ pub trait EthApi { #[method(name = "feeHistory")] async fn fee_history( &self, - block_count: U64HexOrNumber, + block_count: u64, newest_block: BlockNumberOrTag, reward_percentiles: Option>, ) -> RpcResult; diff --git a/crates/rpc/rpc-api/src/ganache.rs b/crates/rpc/rpc-api/src/ganache.rs index 0156f074a..338c91498 100644 --- a/crates/rpc/rpc-api/src/ganache.rs +++ b/crates/rpc/rpc-api/src/ganache.rs @@ -34,7 +34,7 @@ pub trait GanacheApi { /// is the snapshot id to revert to. This deletes the given snapshot, as well as any snapshots /// taken after (e.g.: reverting to id 0x1 will delete snapshots with ids 0x1, 0x2, etc.). /// - /// Reutnrs `true` if a snapshot was reverted, otherwise `false`. + /// Returns `true` if a snapshot was reverted, otherwise `false`. #[method(name = "revert")] async fn evm_revert(&self, snapshot_id: U256) -> RpcResult; diff --git a/crates/rpc/rpc-api/src/optimism.rs b/crates/rpc/rpc-api/src/optimism.rs index 80d60415d..3ff7c6ce3 100644 --- a/crates/rpc/rpc-api/src/optimism.rs +++ b/crates/rpc/rpc-api/src/optimism.rs @@ -2,18 +2,10 @@ #![allow(unreachable_pub)] use jsonrpsee::{core::RpcResult, proc_macros::rpc}; use reth_primitives::{Address, BlockNumber, ChainId, B256}; -use reth_rpc_types::BlockNumberOrTag; +use reth_rpc_types::{BlockId, BlockNumberOrTag}; use serde::{Deserialize, Serialize}; use std::{collections::HashMap, net::IpAddr}; -/// todo: move to reth_rpc_types - -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub struct BlockId { - pub hash: B256, - pub number: BlockNumber, -} - // https://github.com/ethereum-optimism/optimism/blob/develop/op-service/eth/id.go#L33 #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] @@ -329,19 +321,19 @@ mod tests { #[test] fn test_output_response() { - let output_response_json = r#"{"version":"0x0000000000000000000000000000000000000000000000000000000000000000","outputRoot":"0xf1119e7d0fef8c54ab799be80fc61f503cea4e5c0aa1cf7ac104ef3a104f3bd1","blockRef":{"hash":"0x6d39c46aabc847f5f2664a22bbc5f65a57286603095a9ebc946d1ed19ef4925c","number":118818299,"parentHash":"0x8a0876a165da864c223d30e444b1c003fb59920c88dfb12157c0f83826e0f8ed","timestamp":1713235375,"l1origin":{"hash":"0x807da416f5aaa26fa228e0cf53e76fab783b56d7996c717663335b40e0b28824","number":19665136},"sequenceNumber":4},"withdrawalStorageRoot":"0x5c9a29a8ad2ecf97fb4bdea74c715fd2c13fa87d4861414478bc4579601c3585","stateRoot":"0x16849c0a93d00bb2d7ceacda11a1478854d2bbb0a377b4d6793b67a3f05eb6fe","syncStatus":{"current_l1":{"hash":"0x2f0f186d0fece338aa563f5dfc49a73cba5607445ff87aca833fd1d6833c5e05","number":19661406,"parentHash":"0x2c7c564d2960c8035fa6962ebf071668fdcdf8ca004bca5adfd04166ce32aacc","timestamp":1713190115},"current_l1_finalized":{"hash":"0xbd916c8552f5dcd68d2cc836a4d173426e85e6625845cfb3fb60610d383670db","number":19665084,"parentHash":"0xe16fade2cddae87d0f9487600481f980619a138de735c97626239edf08c53275","timestamp":1713234647},"head_l1":{"hash":"0xf98493dcc3d82fe9af339c0a81b0f96172a56764f9abcff464c740e0cb3ccee7","number":19665175,"parentHash":"0xfbab86e5b807916c7ddfa395db794cdf4162128b9770eb8eb829679d81d74328","timestamp":1713235763},"safe_l1":{"hash":"0xfb8f07e551eb65c3282aaefe9a4954c15672e0077b2a5a1db18fcd2126cbc922","number":19665115,"parentHash":"0xfc0d62788fb9cda1cacb54a0e53ca398289436a6b68d1ba69db2942500b4ce5f","timestamp":1713235031},"finalized_l1":{"hash":"0xbd916c8552f5dcd68d2cc836a4d173426e85e6625845cfb3fb60610d383670db","number":19665084,"parentHash":"0xe16fade2cddae87d0f9487600481f980619a138de735c97626239edf08c53275","timestamp":1713234647},"unsafe_l2":{"hash":"0x3540517a260316758a4872f7626e8b9e009968b6d8cfa9c11bfd3a03e7656bd5","number":118818499,"parentHash":"0x09f30550e6d6f217691e185bf1a2b4665b83f43fc8dbcc68c0bfd513e6805590","timestamp":1713235775,"l1origin":{"hash":"0x036003c1c6561123a2f6573b7a34e9598bd023199e259d91765ee2c8677d9c07","number":19665170},"sequenceNumber":0},"safe_l2":{"hash":"0x2e8c339104e3ce0a81c636a10ea9181acbfd3c195d43f2f2dacce8f869b1cca8","number":118795493,"parentHash":"0xaac10ffe0a2cbd572a0ee8aa0b09341ad7bbec491f0bf328dd526637617b1b4a","timestamp":1713189763,"l1origin":{"hash":"0x55c6ed6a81829e9dffc9c968724af657fcf8e0b497188d05476e94801eb483ce","number":19661371},"sequenceNumber":1},"finalized_l2":{"hash":"0x2e8c339104e3ce0a81c636a10ea9181acbfd3c195d43f2f2dacce8f869b1cca8","number":118795493,"parentHash":"0xaac10ffe0a2cbd572a0ee8aa0b09341ad7bbec491f0bf328dd526637617b1b4a","timestamp":1713189763,"l1origin":{"hash":"0x55c6ed6a81829e9dffc9c968724af657fcf8e0b497188d05476e94801eb483ce","number":19661371},"sequenceNumber":1},"pending_safe_l2":{"hash":"0x2e8c339104e3ce0a81c636a10ea9181acbfd3c195d43f2f2dacce8f869b1cca8","number":118795493,"parentHash":"0xaac10ffe0a2cbd572a0ee8aa0b09341ad7bbec491f0bf328dd526637617b1b4a","timestamp":1713189763,"l1origin":{"hash":"0x55c6ed6a81829e9dffc9c968724af657fcf8e0b497188d05476e94801eb483ce","number":19661371},"sequenceNumber":1}}}"#; + let output_response_json = r#"{"version":"0x0000000000000000000000000000000000000000000000000000000000000000","outputRoot":"0xf1119e7d0fef8c54ab799be80fc61f503cea4e5c0aa1cf7ac104ef3a104f3bd1","blockRef":{"hash":"0x6d39c46aabc847f5f2664a22bbc5f65a57286603095a9ebc946d1ed19ef4925c","number":118818299,"parentHash":"0x8a0876a165da864c223d30e444b1c003fb59920c88dfb12157c0f83826e0f8ed","timestamp":1713235375,"l1origin":{"blockHash":"0x807da416f5aaa26fa228e0cf53e76fab783b56d7996c717663335b40e0b28824"},"sequenceNumber":4},"withdrawalStorageRoot":"0x5c9a29a8ad2ecf97fb4bdea74c715fd2c13fa87d4861414478bc4579601c3585","stateRoot":"0x16849c0a93d00bb2d7ceacda11a1478854d2bbb0a377b4d6793b67a3f05eb6fe","syncStatus":{"current_l1":{"hash":"0x2f0f186d0fece338aa563f5dfc49a73cba5607445ff87aca833fd1d6833c5e05","number":19661406,"parentHash":"0x2c7c564d2960c8035fa6962ebf071668fdcdf8ca004bca5adfd04166ce32aacc","timestamp":1713190115},"current_l1_finalized":{"hash":"0xbd916c8552f5dcd68d2cc836a4d173426e85e6625845cfb3fb60610d383670db","number":19665084,"parentHash":"0xe16fade2cddae87d0f9487600481f980619a138de735c97626239edf08c53275","timestamp":1713234647},"head_l1":{"hash":"0xf98493dcc3d82fe9af339c0a81b0f96172a56764f9abcff464c740e0cb3ccee7","number":19665175,"parentHash":"0xfbab86e5b807916c7ddfa395db794cdf4162128b9770eb8eb829679d81d74328","timestamp":1713235763},"safe_l1":{"hash":"0xfb8f07e551eb65c3282aaefe9a4954c15672e0077b2a5a1db18fcd2126cbc922","number":19665115,"parentHash":"0xfc0d62788fb9cda1cacb54a0e53ca398289436a6b68d1ba69db2942500b4ce5f","timestamp":1713235031},"finalized_l1":{"hash":"0xbd916c8552f5dcd68d2cc836a4d173426e85e6625845cfb3fb60610d383670db","number":19665084,"parentHash":"0xe16fade2cddae87d0f9487600481f980619a138de735c97626239edf08c53275","timestamp":1713234647},"unsafe_l2":{"hash":"0x3540517a260316758a4872f7626e8b9e009968b6d8cfa9c11bfd3a03e7656bd5","number":118818499,"parentHash":"0x09f30550e6d6f217691e185bf1a2b4665b83f43fc8dbcc68c0bfd513e6805590","timestamp":1713235775,"l1origin":{"blockHash":"0x036003c1c6561123a2f6573b7a34e9598bd023199e259d91765ee2c8677d9c07"},"sequenceNumber":0},"safe_l2":{"hash":"0x2e8c339104e3ce0a81c636a10ea9181acbfd3c195d43f2f2dacce8f869b1cca8","number":118795493,"parentHash":"0xaac10ffe0a2cbd572a0ee8aa0b09341ad7bbec491f0bf328dd526637617b1b4a","timestamp":1713189763,"l1origin":{"blockHash":"0x55c6ed6a81829e9dffc9c968724af657fcf8e0b497188d05476e94801eb483ce"},"sequenceNumber":1},"finalized_l2":{"hash":"0x2e8c339104e3ce0a81c636a10ea9181acbfd3c195d43f2f2dacce8f869b1cca8","number":118795493,"parentHash":"0xaac10ffe0a2cbd572a0ee8aa0b09341ad7bbec491f0bf328dd526637617b1b4a","timestamp":1713189763,"l1origin":{"blockHash":"0x55c6ed6a81829e9dffc9c968724af657fcf8e0b497188d05476e94801eb483ce"},"sequenceNumber":1},"pending_safe_l2":{"hash":"0x2e8c339104e3ce0a81c636a10ea9181acbfd3c195d43f2f2dacce8f869b1cca8","number":118795493,"parentHash":"0xaac10ffe0a2cbd572a0ee8aa0b09341ad7bbec491f0bf328dd526637617b1b4a","timestamp":1713189763,"l1origin":{"blockHash":"0x55c6ed6a81829e9dffc9c968724af657fcf8e0b497188d05476e94801eb483ce"},"sequenceNumber":1}}}"#; test_helper::(output_response_json); } #[test] fn serialize_sync_status() { - let sync_status_json = r#"{"current_l1":{"hash":"0x2f0f186d0fece338aa563f5dfc49a73cba5607445ff87aca833fd1d6833c5e05","number":19661406,"parentHash":"0x2c7c564d2960c8035fa6962ebf071668fdcdf8ca004bca5adfd04166ce32aacc","timestamp":1713190115},"current_l1_finalized":{"hash":"0x4d769506bbfe27051715225af5ec4189f6bbd235b6d32db809dd8f5a03737b03","number":19665052,"parentHash":"0xc6324687f2baf8cc48eebd15df3a461b2b2838b5f5b16615531fc31788edb8c4","timestamp":1713234263},"head_l1":{"hash":"0xfc5ab77c6c08662a3b4d85b8c86010b7aecfc2c0369e4458f80357530db8e919","number":19665141,"parentHash":"0x099792a293002b987f3507524b28614f399b2b5ed607788520963c251844113c","timestamp":1713235355},"safe_l1":{"hash":"0xbd916c8552f5dcd68d2cc836a4d173426e85e6625845cfb3fb60610d383670db","number":19665084,"parentHash":"0xe16fade2cddae87d0f9487600481f980619a138de735c97626239edf08c53275","timestamp":1713234647},"finalized_l1":{"hash":"0x4d769506bbfe27051715225af5ec4189f6bbd235b6d32db809dd8f5a03737b03","number":19665052,"parentHash":"0xc6324687f2baf8cc48eebd15df3a461b2b2838b5f5b16615531fc31788edb8c4","timestamp":1713234263},"unsafe_l2":{"hash":"0x6d39c46aabc847f5f2664a22bbc5f65a57286603095a9ebc946d1ed19ef4925c","number":118818299,"parentHash":"0x8a0876a165da864c223d30e444b1c003fb59920c88dfb12157c0f83826e0f8ed","timestamp":1713235375,"l1origin":{"hash":"0x807da416f5aaa26fa228e0cf53e76fab783b56d7996c717663335b40e0b28824","number":19665136},"sequenceNumber":4},"safe_l2":{"hash":"0x2e8c339104e3ce0a81c636a10ea9181acbfd3c195d43f2f2dacce8f869b1cca8","number":118795493,"parentHash":"0xaac10ffe0a2cbd572a0ee8aa0b09341ad7bbec491f0bf328dd526637617b1b4a","timestamp":1713189763,"l1origin":{"hash":"0x55c6ed6a81829e9dffc9c968724af657fcf8e0b497188d05476e94801eb483ce","number":19661371},"sequenceNumber":1},"finalized_l2":{"hash":"0x2e8c339104e3ce0a81c636a10ea9181acbfd3c195d43f2f2dacce8f869b1cca8","number":118795493,"parentHash":"0xaac10ffe0a2cbd572a0ee8aa0b09341ad7bbec491f0bf328dd526637617b1b4a","timestamp":1713189763,"l1origin":{"hash":"0x55c6ed6a81829e9dffc9c968724af657fcf8e0b497188d05476e94801eb483ce","number":19661371},"sequenceNumber":1},"pending_safe_l2":{"hash":"0x2e8c339104e3ce0a81c636a10ea9181acbfd3c195d43f2f2dacce8f869b1cca8","number":118795493,"parentHash":"0xaac10ffe0a2cbd572a0ee8aa0b09341ad7bbec491f0bf328dd526637617b1b4a","timestamp":1713189763,"l1origin":{"hash":"0x55c6ed6a81829e9dffc9c968724af657fcf8e0b497188d05476e94801eb483ce","number":19661371},"sequenceNumber":1}}"#; + let sync_status_json = r#"{"current_l1":{"hash":"0x2f0f186d0fece338aa563f5dfc49a73cba5607445ff87aca833fd1d6833c5e05","number":19661406,"parentHash":"0x2c7c564d2960c8035fa6962ebf071668fdcdf8ca004bca5adfd04166ce32aacc","timestamp":1713190115},"current_l1_finalized":{"hash":"0x4d769506bbfe27051715225af5ec4189f6bbd235b6d32db809dd8f5a03737b03","number":19665052,"parentHash":"0xc6324687f2baf8cc48eebd15df3a461b2b2838b5f5b16615531fc31788edb8c4","timestamp":1713234263},"head_l1":{"hash":"0xfc5ab77c6c08662a3b4d85b8c86010b7aecfc2c0369e4458f80357530db8e919","number":19665141,"parentHash":"0x099792a293002b987f3507524b28614f399b2b5ed607788520963c251844113c","timestamp":1713235355},"safe_l1":{"hash":"0xbd916c8552f5dcd68d2cc836a4d173426e85e6625845cfb3fb60610d383670db","number":19665084,"parentHash":"0xe16fade2cddae87d0f9487600481f980619a138de735c97626239edf08c53275","timestamp":1713234647},"finalized_l1":{"hash":"0x4d769506bbfe27051715225af5ec4189f6bbd235b6d32db809dd8f5a03737b03","number":19665052,"parentHash":"0xc6324687f2baf8cc48eebd15df3a461b2b2838b5f5b16615531fc31788edb8c4","timestamp":1713234263},"unsafe_l2":{"hash":"0x6d39c46aabc847f5f2664a22bbc5f65a57286603095a9ebc946d1ed19ef4925c","number":118818299,"parentHash":"0x8a0876a165da864c223d30e444b1c003fb59920c88dfb12157c0f83826e0f8ed","timestamp":1713235375,"l1origin":{"blockHash":"0x807da416f5aaa26fa228e0cf53e76fab783b56d7996c717663335b40e0b28824"},"sequenceNumber":4},"safe_l2":{"hash":"0x2e8c339104e3ce0a81c636a10ea9181acbfd3c195d43f2f2dacce8f869b1cca8","number":118795493,"parentHash":"0xaac10ffe0a2cbd572a0ee8aa0b09341ad7bbec491f0bf328dd526637617b1b4a","timestamp":1713189763,"l1origin":{"blockHash":"0x55c6ed6a81829e9dffc9c968724af657fcf8e0b497188d05476e94801eb483ce"},"sequenceNumber":1},"finalized_l2":{"hash":"0x2e8c339104e3ce0a81c636a10ea9181acbfd3c195d43f2f2dacce8f869b1cca8","number":118795493,"parentHash":"0xaac10ffe0a2cbd572a0ee8aa0b09341ad7bbec491f0bf328dd526637617b1b4a","timestamp":1713189763,"l1origin":{"blockHash":"0x55c6ed6a81829e9dffc9c968724af657fcf8e0b497188d05476e94801eb483ce"},"sequenceNumber":1},"pending_safe_l2":{"hash":"0x2e8c339104e3ce0a81c636a10ea9181acbfd3c195d43f2f2dacce8f869b1cca8","number":118795493,"parentHash":"0xaac10ffe0a2cbd572a0ee8aa0b09341ad7bbec491f0bf328dd526637617b1b4a","timestamp":1713189763,"l1origin":{"blockHash":"0x55c6ed6a81829e9dffc9c968724af657fcf8e0b497188d05476e94801eb483ce"},"sequenceNumber":1}}"#; test_helper::(sync_status_json); } #[test] fn test_rollup_config() { - let rollup_config_json = r#"{"genesis":{"l1":{"hash":"0x438335a20d98863a4c0c97999eb2481921ccd28553eac6f913af7c12aec04108","number":17422590},"l2":{"hash":"0xdbf6a80fef073de06add9b0d14026d6e5a86c85f6d102c36d3d8e9cf89c2afd3","number":105235063},"l2_time":1686068903,"system_config":{"batcherAddr":"0x6887246668a3b87f54deb3b94ba47a6f63f32985","overhead":"0x00000000000000000000000000000000000000000000000000000000000000bc","scalar":"0x00000000000000000000000000000000000000000000000000000000000a6fe0","gasLimit":30000000}},"block_time":2,"max_sequencer_drift":600,"seq_window_size":3600,"channel_timeout":300,"l1_chain_id":1,"l2_chain_id":10,"regolith_time":0,"canyon_time":1704992401,"delta_time":1708560000,"ecotone_time":1710374401,"batch_inbox_address":"0xff00000000000000000000000000000000000010","deposit_contract_address":"0xbeb5fc579115071764c7423a4f12edde41f106ed","l1_system_config_address":"0x229047fed2591dbec1ef1118d64f7af3db9eb290","protocol_versions_address":"0x8062abc286f5e7d9428a0ccb9abd71e50d93b935","da_challenge_address":"0x0000000000000000000000000000000000000000","da_challenge_window":0,"da_resolve_window":0,"use_plasma":false}"#; + let rollup_config_json = r#"{"genesis":{"l1":{"blockHash":"0x438335a20d98863a4c0c97999eb2481921ccd28553eac6f913af7c12aec04108"},"l2":{"blockHash":"0xdbf6a80fef073de06add9b0d14026d6e5a86c85f6d102c36d3d8e9cf89c2afd3"},"l2_time":1686068903,"system_config":{"batcherAddr":"0x6887246668a3b87f54deb3b94ba47a6f63f32985","overhead":"0x00000000000000000000000000000000000000000000000000000000000000bc","scalar":"0x00000000000000000000000000000000000000000000000000000000000a6fe0","gasLimit":30000000}},"block_time":2,"max_sequencer_drift":600,"seq_window_size":3600,"channel_timeout":300,"l1_chain_id":1,"l2_chain_id":10,"regolith_time":0,"canyon_time":1704992401,"delta_time":1708560000,"ecotone_time":1710374401,"batch_inbox_address":"0xff00000000000000000000000000000000000010","deposit_contract_address":"0xbeb5fc579115071764c7423a4f12edde41f106ed","l1_system_config_address":"0x229047fed2591dbec1ef1118d64f7af3db9eb290","protocol_versions_address":"0x8062abc286f5e7d9428a0ccb9abd71e50d93b935","da_challenge_address":"0x0000000000000000000000000000000000000000","da_challenge_window":0,"da_resolve_window":0,"use_plasma":false}"#; test_helper::(rollup_config_json); } diff --git a/crates/rpc/rpc-builder/Cargo.toml b/crates/rpc/rpc-builder/Cargo.toml index ef79a7ed3..7e198c998 100644 --- a/crates/rpc/rpc-builder/Cargo.toml +++ b/crates/rpc/rpc-builder/Cargo.toml @@ -44,7 +44,8 @@ tracing.workspace = true reth-beacon-consensus.workspace = true reth-interfaces = { workspace = true, features = ["test-utils"] } reth-network-api.workspace = true -reth-node-ethereum.workspace = true +reth-evm-ethereum.workspace = true +reth-ethereum-engine-primitives.workspace = true reth-payload-builder = { workspace = true, features = ["test-utils"] } reth-primitives.workspace = true reth-provider = { workspace = true, features = ["test-utils"] } diff --git a/crates/rpc/rpc-builder/src/auth.rs b/crates/rpc/rpc-builder/src/auth.rs index 2349c6e85..186d61332 100644 --- a/crates/rpc/rpc-builder/src/auth.rs +++ b/crates/rpc/rpc-builder/src/auth.rs @@ -13,8 +13,9 @@ use jsonrpsee::{ server::{AlreadyStoppedError, RpcModule}, Methods, }; -pub use reth_ipc::server::{Builder as IpcServerBuilder, Endpoint}; +pub use reth_ipc::server::Builder as IpcServerBuilder; +use jsonrpsee::http_client::transport::HttpBackend; use reth_engine_primitives::EngineTypes; use reth_evm::ConfigureEvm; use reth_network_api::{NetworkInfo, Peers}; @@ -27,17 +28,13 @@ use reth_rpc::{ cache::EthStateCache, gas_oracle::GasPriceOracle, EthFilterConfig, FeeHistoryCache, FeeHistoryCacheConfig, }, - AuthLayer, Claims, EngineEthApi, EthApi, EthFilter, EthSubscriptionIdProvider, - JwtAuthValidator, JwtSecret, + secret_to_bearer_header, AuthClientLayer, AuthClientService, AuthLayer, EngineEthApi, EthApi, + EthFilter, EthSubscriptionIdProvider, JwtAuthValidator, JwtSecret, }; use reth_rpc_api::servers::*; use reth_tasks::{pool::BlockingTaskPool, TaskSpawner}; use reth_transaction_pool::TransactionPool; -use std::{ - fmt, - net::{IpAddr, Ipv4Addr, SocketAddr}, - time::{Duration, SystemTime, UNIX_EPOCH}, -}; +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use tower::layer::util::Identity; /// Configure and launch a _standalone_ auth server with `engine` and a _new_ `eth` namespace. @@ -161,7 +158,7 @@ pub struct AuthServerConfig { /// Configs for JSON-RPC Http. pub(crate) server_config: ServerBuilder, /// Configs for IPC server - pub(crate) ipc_server_config: Option, + pub(crate) ipc_server_config: Option>, /// IPC endpoint pub(crate) ipc_endpoint: Option, } @@ -205,8 +202,7 @@ impl AuthServerConfig { let ipc_endpoint_str = ipc_endpoint .clone() .unwrap_or_else(|| constants::DEFAULT_ENGINE_API_IPC_ENDPOINT.to_string()); - let ipc_path = Endpoint::new(ipc_endpoint_str); - let ipc_server = ipc_server_config.build(ipc_path.path()); + let ipc_server = ipc_server_config.build(ipc_endpoint_str); let res = ipc_server .start(module.inner) .await @@ -219,26 +215,15 @@ impl AuthServerConfig { } /// Builder type for configuring an `AuthServerConfig`. +#[derive(Debug)] pub struct AuthServerConfigBuilder { socket_addr: Option, secret: JwtSecret, server_config: Option>, - ipc_server_config: Option, + ipc_server_config: Option>, ipc_endpoint: Option, } -impl fmt::Debug for AuthServerConfigBuilder { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("AuthServerConfig") - .field("socket_addr", &self.socket_addr) - .field("secret", &self.secret) - .field("server_config", &self.server_config) - .field("ipc_server_config", &self.ipc_server_config) - .field("ipc_endpoint", &self.ipc_endpoint) - .finish() - } -} - // === impl AuthServerConfigBuilder === impl AuthServerConfigBuilder { @@ -289,7 +274,7 @@ impl AuthServerConfigBuilder { /// Configures the IPC server /// /// Note: this always configures an [EthSubscriptionIdProvider] - pub fn with_ipc_config(mut self, config: IpcServerBuilder) -> Self { + pub fn with_ipc_config(mut self, config: IpcServerBuilder) -> Self { self.ipc_server_config = Some(config.set_id_provider(EthSubscriptionIdProvider::default())); self } @@ -410,32 +395,27 @@ impl AuthServerHandle { format!("ws://{}", self.local_addr) } - fn bearer(&self) -> String { - format!( - "Bearer {}", - self.secret - .encode(&Claims { - iat: (SystemTime::now().duration_since(UNIX_EPOCH).unwrap() + - Duration::from_secs(60)) - .as_secs(), - exp: None, - }) - .unwrap() - ) - } - /// Returns a http client connected to the server. - pub fn http_client(&self) -> jsonrpsee::http_client::HttpClient { + pub fn http_client( + &self, + ) -> jsonrpsee::http_client::HttpClient> { + // Create a middleware that adds a new JWT token to every request. + let secret_layer = AuthClientLayer::new(self.secret.clone()); + let middleware = tower::ServiceBuilder::default().layer(secret_layer); jsonrpsee::http_client::HttpClientBuilder::default() - .set_headers(HeaderMap::from_iter([(AUTHORIZATION, self.bearer().parse().unwrap())])) + .set_http_middleware(middleware) .build(self.http_url()) .expect("Failed to create http client") } - /// Returns a ws client connected to the server. + /// Returns a ws client connected to the server. Note that the connection can only be + /// be established within 1 minute due to the JWT token expiration. pub async fn ws_client(&self) -> jsonrpsee::ws_client::WsClient { jsonrpsee::ws_client::WsClientBuilder::default() - .set_headers(HeaderMap::from_iter([(AUTHORIZATION, self.bearer().parse().unwrap())])) + .set_headers(HeaderMap::from_iter([( + AUTHORIZATION, + secret_to_bearer_header(&self.secret), + )])) .build(self.ws_url()) .await .expect("Failed to create ws client") @@ -449,7 +429,7 @@ impl AuthServerHandle { if let Some(ipc_endpoint) = self.ipc_endpoint.clone() { return Some( IpcClientBuilder::default() - .build(Endpoint::new(ipc_endpoint).path()) + .build(ipc_endpoint) .await .expect("Failed to create ipc client"), ) @@ -463,10 +443,7 @@ impl AuthServerHandle { } /// Return an ipc endpoint - pub fn ipc_endpoint(&self) -> Option { - if let Some(ipc_endpoint) = self.ipc_endpoint.clone() { - return Some(Endpoint::new(ipc_endpoint)) - } - None + pub fn ipc_endpoint(&self) -> Option { + self.ipc_endpoint.clone() } } diff --git a/crates/rpc/rpc-builder/src/cors.rs b/crates/rpc/rpc-builder/src/cors.rs index 73e755f9f..46ff722ac 100644 --- a/crates/rpc/rpc-builder/src/cors.rs +++ b/crates/rpc/rpc-builder/src/cors.rs @@ -3,7 +3,7 @@ use tower_http::cors::{AllowOrigin, Any, CorsLayer}; /// Error thrown when parsing cors domains went wrong #[derive(Debug, thiserror::Error)] -pub(crate) enum CorsDomainError { +pub enum CorsDomainError { #[error("{domain} is an invalid header value")] InvalidHeader { domain: String }, #[error("wildcard origin (`*`) cannot be passed as part of a list: {input}")] diff --git a/crates/rpc/rpc-builder/src/error.rs b/crates/rpc/rpc-builder/src/error.rs index fd59536f7..68a2183fe 100644 --- a/crates/rpc/rpc-builder/src/error.rs +++ b/crates/rpc/rpc-builder/src/error.rs @@ -1,4 +1,4 @@ -use crate::RethRpcModule; +use crate::{cors::CorsDomainError, RethRpcModule}; use reth_ipc::server::IpcServerStartError; use std::{io, io::ErrorKind, net::SocketAddr}; @@ -57,6 +57,9 @@ pub enum RpcError { /// IO error. error: io::Error, }, + /// Cors parsing error. + #[error(transparent)] + Cors(#[from] CorsDomainError), /// Http and WS server configured on the same port but with conflicting settings. #[error(transparent)] WsHttpSamePortError(#[from] WsHttpSamePortError), diff --git a/crates/rpc/rpc-builder/src/lib.rs b/crates/rpc/rpc-builder/src/lib.rs index ef5b8868c..7d86a0056 100644 --- a/crates/rpc/rpc-builder/src/lib.rs +++ b/crates/rpc/rpc-builder/src/lib.rs @@ -156,8 +156,8 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] use crate::{ - auth::AuthRpcModule, error::WsHttpSamePortError, metrics::RpcRequestMetrics, - RpcModuleSelection::Selection, + auth::AuthRpcModule, cors::CorsDomainError, error::WsHttpSamePortError, + metrics::RpcRequestMetrics, RpcModuleSelection::Selection, }; use constants::*; use error::{RpcError, ServerKind}; @@ -171,7 +171,9 @@ use jsonrpsee::{ use reth_engine_primitives::EngineTypes; use reth_evm::ConfigureEvm; use reth_ipc::server::IpcServer; -pub use reth_ipc::server::{Builder as IpcServerBuilder, Endpoint}; +pub use reth_ipc::server::{ + Builder as IpcServerBuilder, RpcServiceBuilder as IpcRpcServiceBuilder, +}; use reth_network_api::{noop::NoopNetwork, NetworkInfo, Peers}; use reth_provider::{ AccountReader, BlockReader, BlockReaderIdExt, CanonStateSubscriptions, ChainSpecProvider, @@ -715,7 +717,7 @@ impl RpcModuleSelection { /// Creates a new [RpcModule] based on the configured reth modules. /// - /// Note: This will always create new instance of the module handlers and is therefor only + /// Note: This will always create new instance of the module handlers and is therefore only /// recommended for launching standalone transports. If multiple transports need to be /// configured it's recommended to use the [RpcModuleBuilder]. #[allow(clippy::too_many_arguments)] @@ -1039,12 +1041,12 @@ where Network: NetworkInfo + Peers + Clone + 'static, { /// Instantiates AdminApi - pub fn admin_api(&mut self) -> AdminApi { + pub fn admin_api(&self) -> AdminApi { AdminApi::new(self.network.clone(), self.provider.chain_spec()) } /// Instantiates Web3Api - pub fn web3_api(&mut self) -> Web3Api { + pub fn web3_api(&self) -> Web3Api { Web3Api::new(self.network.clone()) } @@ -1441,7 +1443,7 @@ where } /// Instantiates RethApi - pub fn reth_api(&mut self) -> RethApi { + pub fn reth_api(&self) -> RethApi { RethApi::new(self.provider.clone(), Box::new(self.executor.clone())) } } @@ -1457,7 +1459,7 @@ where /// /// Once the [RpcModule] is built via [RpcModuleBuilder] the servers can be started, See also /// [ServerBuilder::build] and [Server::start](jsonrpsee::server::Server::start). -#[derive(Default)] +#[derive(Default, Debug)] pub struct RpcServerConfig { /// Configs for JSON-RPC Http. http_server_config: Option>, @@ -1472,28 +1474,13 @@ pub struct RpcServerConfig { /// Address where to bind the ws server to ws_addr: Option, /// Configs for JSON-RPC IPC server - ipc_server_config: Option, + ipc_server_config: Option>, /// The Endpoint where to launch the ipc server - ipc_endpoint: Option, + ipc_endpoint: Option, /// JWT secret for authentication jwt_secret: Option, } -impl fmt::Debug for RpcServerConfig { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("RpcServerConfig") - .field("http_server_config", &self.http_server_config) - .field("http_cors_domains", &self.http_cors_domains) - .field("http_addr", &self.http_addr) - .field("ws_server_config", &self.ws_server_config) - .field("ws_addr", &self.ws_addr) - .field("ipc_server_config", &self.ipc_server_config) - .field("ipc_endpoint", &self.ipc_endpoint.as_ref().map(|endpoint| endpoint.path())) - .field("jwt_secret", &self.jwt_secret) - .finish() - } -} - /// === impl RpcServerConfig === impl RpcServerConfig { @@ -1508,7 +1495,7 @@ impl RpcServerConfig { } /// Creates a new config with only ipc set - pub fn ipc(config: IpcServerBuilder) -> Self { + pub fn ipc(config: IpcServerBuilder) -> Self { Self::default().with_ipc(config) } @@ -1568,7 +1555,7 @@ impl RpcServerConfig { /// /// Note: this always configures an [EthSubscriptionIdProvider] [IdProvider] for convenience. /// To set a custom [IdProvider], please use [Self::with_id_provider]. - pub fn with_ipc(mut self, config: IpcServerBuilder) -> Self { + pub fn with_ipc(mut self, config: IpcServerBuilder) -> Self { self.ipc_server_config = Some(config.set_id_provider(EthSubscriptionIdProvider::default())); self } @@ -1597,7 +1584,7 @@ impl RpcServerConfig { /// /// Default is [DEFAULT_IPC_ENDPOINT] pub fn with_ipc_endpoint(mut self, path: impl Into) -> Self { - self.ipc_endpoint = Some(Endpoint::new(path.into())); + self.ipc_endpoint = Some(path.into()); self } @@ -1626,9 +1613,9 @@ impl RpcServerConfig { self.ws_addr } - /// Returns the [Endpoint] of the ipc server - pub fn ipc_endpoint(&self) -> Option<&Endpoint> { - self.ipc_endpoint.as_ref() + /// Returns the endpoint of the ipc server + pub fn ipc_endpoint(&self) -> Option { + self.ipc_endpoint.clone() } /// Convenience function to do [RpcServerConfig::build] and [RpcServer::start] in one step @@ -1636,6 +1623,16 @@ impl RpcServerConfig { self.build(&modules).await?.start(modules).await } + /// Creates the [CorsLayer] if any + fn maybe_cors_layer(cors: Option) -> Result, CorsDomainError> { + cors.as_deref().map(cors::create_cors_layer).transpose() + } + + /// Creates the [AuthLayer] if any + fn maybe_jwt_layer(&self) -> Option> { + self.jwt_secret.clone().map(|secret| AuthLayer::new(JwtAuthValidator::new(secret))) + } + /// Builds the ws and http server(s). /// /// If both are on the same port, they are combined into one server. @@ -1647,7 +1644,6 @@ impl RpcServerConfig { Ipv4Addr::LOCALHOST, DEFAULT_HTTP_RPC_PORT, ))); - let jwt_secret = self.jwt_secret.clone(); let ws_socket_addr = self .ws_addr @@ -1673,33 +1669,39 @@ impl RpcServerConfig { } .cloned(); - let secret = self.jwt_secret.clone(); - // we merge this into one server using the http setup self.ws_server_config.take(); modules.config.ensure_ws_http_identical()?; let builder = self.http_server_config.take().expect("http_server_config is Some"); - let (server, addr) = WsHttpServerKind::build( - builder, - http_socket_addr, - cors, - secret, - ServerKind::WsHttp(http_socket_addr), - modules - .http - .as_ref() - .or(modules.ws.as_ref()) - .map(RpcRequestMetrics::same_port) - .unwrap_or_default(), - ) - .await?; + let server = builder + .set_http_middleware( + tower::ServiceBuilder::new() + .option_layer(Self::maybe_cors_layer(cors)?) + .option_layer(self.maybe_jwt_layer()), + ) + .set_rpc_middleware( + RpcServiceBuilder::new().layer( + modules + .http + .as_ref() + .or(modules.ws.as_ref()) + .map(RpcRequestMetrics::same_port) + .unwrap_or_default(), + ), + ) + .build(http_socket_addr) + .await + .map_err(|err| RpcError::server_error(err, ServerKind::WsHttp(http_socket_addr)))?; + let addr = server + .local_addr() + .map_err(|err| RpcError::server_error(err, ServerKind::WsHttp(http_socket_addr)))?; return Ok(WsHttpServer { http_local_addr: Some(addr), ws_local_addr: Some(addr), server: WsHttpServers::SamePort(server), - jwt_secret, + jwt_secret: self.jwt_secret.clone(), }) } @@ -1709,32 +1711,48 @@ impl RpcServerConfig { let mut ws_local_addr = None; let mut ws_server = None; if let Some(builder) = self.ws_server_config.take() { - let builder = builder.ws_only(); - let (server, addr) = WsHttpServerKind::build( - builder, - ws_socket_addr, - self.ws_cors_domains.take(), - self.jwt_secret.clone(), - ServerKind::WS(ws_socket_addr), - modules.ws.as_ref().map(RpcRequestMetrics::ws).unwrap_or_default(), - ) - .await?; + let server = builder + .ws_only() + .set_http_middleware( + tower::ServiceBuilder::new() + .option_layer(Self::maybe_cors_layer(self.ws_cors_domains.clone())?) + .option_layer(self.maybe_jwt_layer()), + ) + .set_rpc_middleware( + RpcServiceBuilder::new() + .layer(modules.ws.as_ref().map(RpcRequestMetrics::ws).unwrap_or_default()), + ) + .build(ws_socket_addr) + .await + .map_err(|err| RpcError::server_error(err, ServerKind::WS(ws_socket_addr)))?; + let addr = server + .local_addr() + .map_err(|err| RpcError::server_error(err, ServerKind::WS(ws_socket_addr)))?; + ws_local_addr = Some(addr); ws_server = Some(server); } if let Some(builder) = self.http_server_config.take() { - let builder = builder.http_only(); - let (server, addr) = WsHttpServerKind::build( - builder, - http_socket_addr, - self.http_cors_domains.take(), - self.jwt_secret.clone(), - ServerKind::Http(http_socket_addr), - modules.http.as_ref().map(RpcRequestMetrics::http).unwrap_or_default(), - ) - .await?; - http_local_addr = Some(addr); + let server = builder + .http_only() + .set_http_middleware( + tower::ServiceBuilder::new() + .option_layer(Self::maybe_cors_layer(self.http_cors_domains.clone())?) + .option_layer(self.maybe_jwt_layer()), + ) + .set_rpc_middleware( + RpcServiceBuilder::new().layer( + modules.http.as_ref().map(RpcRequestMetrics::http).unwrap_or_default(), + ), + ) + .build(http_socket_addr) + .await + .map_err(|err| RpcError::server_error(err, ServerKind::Http(http_socket_addr)))?; + let local_addr = server + .local_addr() + .map_err(|err| RpcError::server_error(err, ServerKind::Http(http_socket_addr)))?; + http_local_addr = Some(local_addr); http_server = Some(server); } @@ -1742,7 +1760,7 @@ impl RpcServerConfig { http_local_addr, ws_local_addr, server: WsHttpServers::DifferentPort { http: http_server, ws: ws_server }, - jwt_secret, + jwt_secret: self.jwt_secret.clone(), }) } @@ -1756,14 +1774,11 @@ impl RpcServerConfig { server.ws_http = self.build_ws_http(modules).await?; if let Some(builder) = self.ipc_server_config { - // let metrics = modules.ipc.as_ref().map(RpcRequestMetrics::new).unwrap_or_default(); - let ipc_path = self - .ipc_endpoint - .unwrap_or_else(|| Endpoint::new(DEFAULT_IPC_ENDPOINT.to_string())); + let metrics = modules.ipc.as_ref().map(RpcRequestMetrics::ipc).unwrap_or_default(); + let ipc_path = self.ipc_endpoint.unwrap_or_else(|| DEFAULT_IPC_ENDPOINT.into()); let ipc = builder - // TODO(mattsse): add metrics middleware for IPC - // .set_middleware(metrics) - .build(ipc_path.path()); + .set_rpc_middleware(IpcRpcServiceBuilder::new().layer(metrics)) + .build(ipc_path); server.ipc = Some(ipc); } @@ -1961,6 +1976,15 @@ struct WsHttpServer { jwt_secret: Option, } +// Define the type alias with detailed type complexity +type WsHttpServerKind = Server< + Stack< + tower::util::Either, Identity>, + Stack, Identity>, + >, + Stack, +>; + /// Enum for holding the http and ws servers in all possible combinations. enum WsHttpServers { /// Both servers are on the same port @@ -1982,13 +2006,13 @@ impl WsHttpServers { let mut http_handle = None; let mut ws_handle = None; match self { - WsHttpServers::SamePort(both) => { + WsHttpServers::SamePort(server) => { // Make sure http and ws modules are identical, since we currently can't run // different modules on same server config.ensure_ws_http_identical()?; if let Some(module) = http_module.or(ws_module) { - let handle = both.start(module).await; + let handle = server.start(module); http_handle = Some(handle.clone()); ws_handle = Some(handle); } @@ -1997,12 +2021,12 @@ impl WsHttpServers { if let Some((server, module)) = http.and_then(|server| http_module.map(|module| (server, module))) { - http_handle = Some(server.start(module).await); + http_handle = Some(server.start(module)); } if let Some((server, module)) = ws.and_then(|server| ws_module.map(|module| (server, module))) { - ws_handle = Some(server.start(module).await); + ws_handle = Some(server.start(module)); } } } @@ -2017,117 +2041,12 @@ impl Default for WsHttpServers { } } -/// Http Servers Enum -#[allow(clippy::type_complexity)] -enum WsHttpServerKind { - /// Http server - Plain(Server>), - /// Http server with cors - WithCors(Server, Stack>), - /// Http server with auth - WithAuth( - Server, Identity>, Stack>, - ), - /// Http server with cors and auth - WithCorsAuth( - Server< - Stack, Stack>, - Stack, - >, - ), -} - -// === impl WsHttpServerKind === - -impl WsHttpServerKind { - /// Starts the server and returns the handle - async fn start(self, module: RpcModule<()>) -> ServerHandle { - match self { - WsHttpServerKind::Plain(server) => server.start(module), - WsHttpServerKind::WithCors(server) => server.start(module), - WsHttpServerKind::WithAuth(server) => server.start(module), - WsHttpServerKind::WithCorsAuth(server) => server.start(module), - } - } - - /// Builds the server according to the given config parameters. - /// - /// Returns the address of the started server. - async fn build( - builder: ServerBuilder, - socket_addr: SocketAddr, - cors_domains: Option, - jwt_secret: Option, - server_kind: ServerKind, - metrics: RpcRequestMetrics, - ) -> Result<(Self, SocketAddr), RpcError> { - if let Some(cors) = cors_domains.as_deref().map(cors::create_cors_layer) { - let cors = cors.map_err(|err| RpcError::Custom(err.to_string()))?; - - if let Some(secret) = jwt_secret { - // stack cors and auth layers - let middleware = tower::ServiceBuilder::new() - .layer(cors) - .layer(AuthLayer::new(JwtAuthValidator::new(secret.clone()))); - - let server = builder - .set_http_middleware(middleware) - .set_rpc_middleware(RpcServiceBuilder::new().layer(metrics)) - .build(socket_addr) - .await - .map_err(|err| RpcError::server_error(err, server_kind))?; - let local_addr = - server.local_addr().map_err(|err| RpcError::server_error(err, server_kind))?; - let server = WsHttpServerKind::WithCorsAuth(server); - Ok((server, local_addr)) - } else { - let middleware = tower::ServiceBuilder::new().layer(cors); - let server = builder - .set_http_middleware(middleware) - .set_rpc_middleware(RpcServiceBuilder::new().layer(metrics)) - .build(socket_addr) - .await - .map_err(|err| RpcError::server_error(err, server_kind))?; - let local_addr = - server.local_addr().map_err(|err| RpcError::server_error(err, server_kind))?; - let server = WsHttpServerKind::WithCors(server); - Ok((server, local_addr)) - } - } else if let Some(secret) = jwt_secret { - // jwt auth layered service - let middleware = tower::ServiceBuilder::new() - .layer(AuthLayer::new(JwtAuthValidator::new(secret.clone()))); - let server = builder - .set_http_middleware(middleware) - .set_rpc_middleware(RpcServiceBuilder::new().layer(metrics)) - .build(socket_addr) - .await - .map_err(|err| RpcError::server_error(err, ServerKind::Auth(socket_addr)))?; - let local_addr = - server.local_addr().map_err(|err| RpcError::server_error(err, server_kind))?; - let server = WsHttpServerKind::WithAuth(server); - Ok((server, local_addr)) - } else { - // plain server without any middleware - let server = builder - .set_rpc_middleware(RpcServiceBuilder::new().layer(metrics)) - .build(socket_addr) - .await - .map_err(|err| RpcError::server_error(err, server_kind))?; - let local_addr = - server.local_addr().map_err(|err| RpcError::server_error(err, server_kind))?; - let server = WsHttpServerKind::Plain(server); - Ok((server, local_addr)) - } - } -} - /// Container type for each transport ie. http, ws, and ipc server pub struct RpcServer { /// Configured ws,http servers ws_http: WsHttpServer, /// ipc server - ipc: Option, + ipc: Option>>, } // === impl RpcServer === @@ -2151,8 +2070,8 @@ impl RpcServer { self.ws_http.ws_local_addr } - /// Returns the [`Endpoint`] of the ipc server if started. - pub fn ipc_endpoint(&self) -> Option<&Endpoint> { + /// Returns the endpoint of the ipc server if started. + pub fn ipc_endpoint(&self) -> Option { self.ipc.as_ref().map(|ipc| ipc.endpoint()) } @@ -2160,7 +2079,7 @@ impl RpcServer { /// /// This returns an [RpcServerHandle] that's connected to the server task(s) until the server is /// stopped or the [RpcServerHandle] is dropped. - #[instrument(name = "start", skip_all, fields(http = ?self.http_local_addr(), ws = ?self.ws_local_addr(), ipc = ?self.ipc_endpoint().map(|ipc|ipc.path())), target = "rpc", level = "TRACE")] + #[instrument(name = "start", skip_all, fields(http = ?self.http_local_addr(), ws = ?self.ws_local_addr(), ipc = ?self.ipc_endpoint()), target = "rpc", level = "TRACE")] pub async fn start(self, modules: TransportRpcModules) -> Result { trace!(target: "rpc", "staring RPC server"); let Self { ws_http, ipc: ipc_server } = self; @@ -2182,7 +2101,7 @@ impl RpcServer { if let Some((server, module)) = ipc_server.and_then(|server| ipc.map(|module| (server, module))) { - handle.ipc_endpoint = Some(server.endpoint().path().to_string()); + handle.ipc_endpoint = Some(server.endpoint()); handle.ipc = Some(server.start(module).await?); } @@ -2194,7 +2113,7 @@ impl fmt::Debug for RpcServer { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("RpcServer") .field("http", &self.ws_http.http_local_addr.is_some()) - .field("ws", &self.ws_http.http_local_addr.is_some()) + .field("ws", &self.ws_http.ws_local_addr.is_some()) .field("ipc", &self.ipc.is_some()) .finish() } @@ -2203,7 +2122,7 @@ impl fmt::Debug for RpcServer { /// A handle to the spawned servers. /// /// When this type is dropped or [RpcServerHandle::stop] has been called the server will be stopped. -#[derive(Clone)] +#[derive(Clone, Debug)] #[must_use = "Server stops if dropped"] pub struct RpcServerHandle { /// The address of the http/ws server @@ -2306,16 +2225,6 @@ impl RpcServerHandle { } } -impl fmt::Debug for RpcServerHandle { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("RpcServerHandle") - .field("http", &self.http.is_some()) - .field("ws", &self.ws.is_some()) - .field("ipc", &self.ipc.is_some()) - .finish() - } -} - #[cfg(test)] mod tests { use super::*; diff --git a/crates/rpc/rpc-builder/tests/it/auth.rs b/crates/rpc/rpc-builder/tests/it/auth.rs index 51ba2f145..b5416bf67 100644 --- a/crates/rpc/rpc-builder/tests/it/auth.rs +++ b/crates/rpc/rpc-builder/tests/it/auth.rs @@ -2,13 +2,13 @@ use crate::utils::launch_auth; use jsonrpsee::core::client::{ClientT, SubscriptionClientT}; -use reth_node_ethereum::EthEngineTypes; +use reth_ethereum_engine_primitives::EthEngineTypes; use reth_primitives::{Block, U64}; use reth_rpc::JwtSecret; use reth_rpc_api::clients::EngineApiClient; use reth_rpc_types::engine::{ForkchoiceState, PayloadId, TransitionConfiguration}; use reth_rpc_types_compat::engine::payload::{ - convert_block_to_payload_input_v2, try_block_to_payload_v1, + block_to_payload_v1, convert_block_to_payload_input_v2, }; #[allow(unused_must_use)] async fn test_basic_engine_calls(client: &C) @@ -17,7 +17,7 @@ where C: EngineApiClient, { let block = Block::default().seal_slow(); - EngineApiClient::new_payload_v1(client, try_block_to_payload_v1(block.clone())).await; + EngineApiClient::new_payload_v1(client, block_to_payload_v1(block.clone())).await; EngineApiClient::new_payload_v2(client, convert_block_to_payload_input_v2(block)).await; EngineApiClient::fork_choice_updated_v1(client, ForkchoiceState::default(), None).await; EngineApiClient::get_payload_v1(client, PayloadId::new([0, 0, 0, 0, 0, 0, 0, 0])).await; diff --git a/crates/rpc/rpc-builder/tests/it/http.rs b/crates/rpc/rpc-builder/tests/it/http.rs index 7fc714a2d..42fecb87d 100644 --- a/crates/rpc/rpc-builder/tests/it/http.rs +++ b/crates/rpc/rpc-builder/tests/it/http.rs @@ -167,7 +167,7 @@ where EthApiClient::block_number(client).await.unwrap(); EthApiClient::get_code(client, address, None).await.unwrap(); EthApiClient::send_raw_transaction(client, tx).await.unwrap(); - EthApiClient::fee_history(client, 0.into(), block_number, None).await.unwrap(); + EthApiClient::fee_history(client, 0, block_number, None).await.unwrap(); EthApiClient::balance(client, address, None).await.unwrap(); EthApiClient::transaction_count(client, address, None).await.unwrap(); EthApiClient::storage_at(client, address, U256::default().into(), None).await.unwrap(); diff --git a/crates/rpc/rpc-builder/tests/it/utils.rs b/crates/rpc/rpc-builder/tests/it/utils.rs index c11801442..403e12a1b 100644 --- a/crates/rpc/rpc-builder/tests/it/utils.rs +++ b/crates/rpc/rpc-builder/tests/it/utils.rs @@ -1,6 +1,7 @@ use reth_beacon_consensus::BeaconConsensusEngineHandle; +use reth_ethereum_engine_primitives::EthEngineTypes; +use reth_evm_ethereum::EthEvmConfig; use reth_network_api::noop::NoopNetwork; -use reth_node_ethereum::{EthEngineTypes, EthEvmConfig}; use reth_payload_builder::test_utils::spawn_test_payload_service; use reth_primitives::MAINNET; use reth_provider::test_utils::{NoopProvider, TestCanonStateSubscriptions}; diff --git a/crates/rpc/rpc-engine-api/Cargo.toml b/crates/rpc/rpc-engine-api/Cargo.toml index 271363963..5fe782a6e 100644 --- a/crates/rpc/rpc-engine-api/Cargo.toml +++ b/crates/rpc/rpc-engine-api/Cargo.toml @@ -14,7 +14,6 @@ workspace = true [dependencies] # reth reth-primitives.workspace = true -reth-interfaces.workspace = true reth-provider.workspace = true reth-rpc-types.workspace = true reth-rpc-api.workspace = true @@ -40,12 +39,13 @@ tracing.workspace = true serde.workspace = true [dev-dependencies] -alloy-rlp.workspace = true -reth-node-ethereum.workspace = true -reth-node-optimism.workspace = true +reth-ethereum-engine-primitives.workspace = true reth-interfaces = { workspace = true, features = ["test-utils"] } reth-provider = { workspace = true, features = ["test-utils"] } reth-payload-builder = { workspace = true, features = ["test-utils"] } + +alloy-rlp.workspace = true + assert_matches.workspace = true [features] diff --git a/crates/rpc/rpc-engine-api/src/engine_api.rs b/crates/rpc/rpc-engine-api/src/engine_api.rs index 7fc52b21c..0e4476bb7 100644 --- a/crates/rpc/rpc-engine-api/src/engine_api.rs +++ b/crates/rpc/rpc-engine-api/src/engine_api.rs @@ -6,15 +6,14 @@ use reth_engine_primitives::{ validate_payload_timestamp, EngineApiMessageVersion, EngineTypes, PayloadAttributes, PayloadBuilderAttributes, PayloadOrAttributes, }; -use reth_interfaces::consensus::ForkchoiceState; use reth_payload_builder::PayloadStore; use reth_primitives::{BlockHash, BlockHashOrNumber, BlockNumber, ChainSpec, Hardfork, B256, U64}; use reth_provider::{BlockReader, EvmEnvProvider, HeaderProvider, StateProviderFactory}; use reth_rpc_api::EngineApiServer; use reth_rpc_types::engine::{ CancunPayloadFields, ExecutionPayload, ExecutionPayloadBodiesV1, ExecutionPayloadInputV2, - ExecutionPayloadV1, ExecutionPayloadV3, ForkchoiceUpdated, PayloadId, PayloadStatus, - TransitionConfiguration, CAPABILITIES, + ExecutionPayloadV1, ExecutionPayloadV3, ExecutionPayloadV4, ForkchoiceState, ForkchoiceUpdated, + PayloadId, PayloadStatus, TransitionConfiguration, CAPABILITIES, }; use reth_rpc_types_compat::engine::payload::{ convert_payload_input_v2_to_payload, convert_to_payload_body_v1, @@ -149,6 +148,30 @@ where Ok(self.inner.beacon_consensus.new_payload(payload, Some(cancun_fields)).await?) } + /// See also + pub async fn new_payload_v4( + &self, + payload: ExecutionPayloadV4, + versioned_hashes: Vec, + parent_beacon_block_root: B256, + ) -> EngineApiResult { + let payload = ExecutionPayload::from(payload); + let payload_or_attrs = + PayloadOrAttributes::<'_, EngineT::PayloadAttributes>::from_execution_payload( + &payload, + Some(parent_beacon_block_root), + ); + EngineT::validate_version_specific_fields( + &self.inner.chain_spec, + EngineApiMessageVersion::V4, + payload_or_attrs, + )?; + + let cancun_fields = CancunPayloadFields { versioned_hashes, parent_beacon_block_root }; + + Ok(self.inner.beacon_consensus.new_payload(payload, Some(cancun_fields)).await?) + } + /// Sends a message to the beacon consensus engine to update the fork choice _without_ /// withdrawals. /// @@ -281,7 +304,42 @@ where .map_err(|_| EngineApiError::UnknownPayload)? .try_into() .map_err(|_| { - warn!("could not transform built payload into ExecutionPayloadV2"); + warn!("could not transform built payload into ExecutionPayloadV3"); + EngineApiError::UnknownPayload + }) + } + + /// Returns the most recent version of the payload that is available in the corresponding + /// payload build process at the time of receiving this call. + /// + /// See also + /// + /// Note: + /// > Provider software MAY stop the corresponding build process after serving this call. + pub async fn get_payload_v4( + &self, + payload_id: PayloadId, + ) -> EngineApiResult { + // First we fetch the payload attributes to check the timestamp + let attributes = self.get_payload_attributes(payload_id).await?; + + // validate timestamp according to engine rules + validate_payload_timestamp( + &self.inner.chain_spec, + EngineApiMessageVersion::V4, + attributes.timestamp(), + )?; + + // Now resolve the payload + self.inner + .payload_store + .resolve(payload_id) + .await + .ok_or(EngineApiError::UnknownPayload)? + .map_err(|_| EngineApiError::UnknownPayload)? + .try_into() + .map_err(|_| { + warn!("could not transform built payload into ExecutionPayloadV4"); EngineApiError::UnknownPayload }) } @@ -293,7 +351,7 @@ where /// layer p2p specification, meaning the input should be treated as untrusted or potentially /// adversarial. /// - /// Implementors should take care when acting on the input to this method, specifically + /// Implementers should take care when acting on the input to this method, specifically /// ensuring that the range is limited properly, and that the range boundaries are computed /// correctly and without panics. pub async fn get_payload_bodies_by_range( @@ -662,7 +720,7 @@ where /// layer p2p specification, meaning the input should be treated as untrusted or potentially /// adversarial. /// - /// Implementors should take care when acting on the input to this method, specifically + /// Implementers should take care when acting on the input to this method, specifically /// ensuring that the range is limited properly, and that the range boundaries are computed /// correctly and without panics. /// @@ -713,8 +771,8 @@ mod tests { use super::*; use assert_matches::assert_matches; use reth_beacon_consensus::BeaconEngineMessage; + use reth_ethereum_engine_primitives::EthEngineTypes; use reth_interfaces::test_utils::generators::random_block; - use reth_node_ethereum::EthEngineTypes; use reth_payload_builder::test_utils::spawn_test_payload_service; use reth_primitives::{SealedBlock, B256, MAINNET}; use reth_provider::test_utils::MockEthProvider; diff --git a/crates/rpc/rpc-engine-api/src/error.rs b/crates/rpc/rpc-engine-api/src/error.rs index 8a7790cf0..57318d0d6 100644 --- a/crates/rpc/rpc-engine-api/src/error.rs +++ b/crates/rpc/rpc-engine-api/src/error.rs @@ -5,6 +5,7 @@ use reth_beacon_consensus::{BeaconForkChoiceUpdateError, BeaconOnNewPayloadError use reth_engine_primitives::EngineObjectValidationError; use reth_payload_builder::error::PayloadBuilderError; use reth_primitives::{B256, U256}; +use reth_rpc_types::ToRpcError; use thiserror::Error; /// The Engine API result type @@ -86,11 +87,16 @@ pub enum EngineApiError { /// The payload or attributes are known to be malformed before processing. #[error(transparent)] EngineObjectValidationError(#[from] EngineObjectValidationError), - /// If the optimism feature flag is enabled, the payload attributes must have a present - /// gas limit for the forkchoice updated method. - #[cfg(feature = "optimism")] - #[error("Missing gas limit in payload attributes")] - MissingGasLimitInPayloadAttributes, + /// Any other error + #[error("{0}")] + Other(Box), +} + +impl EngineApiError { + /// Crates a new [EngineApiError::Other] variant. + pub fn other(err: E) -> Self { + Self::Other(Box::new(err)) + } } /// Helper type to represent the `error` field in the error response: @@ -188,15 +194,6 @@ impl From for jsonrpsee_types::error::ErrorObject<'static> { ) } }, - // Optimism errors - #[cfg(feature = "optimism")] - EngineApiError::MissingGasLimitInPayloadAttributes => { - jsonrpsee_types::error::ErrorObject::owned( - INVALID_PARAMS_CODE, - INVALID_PARAMS_MSG, - Some(ErrorData::new(error)), - ) - } // Any other server error EngineApiError::TerminalTD { .. } | EngineApiError::TerminalBlockHash { .. } | @@ -206,6 +203,7 @@ impl From for jsonrpsee_types::error::ErrorObject<'static> { SERVER_ERROR_MSG, Some(ErrorData::new(error)), ), + EngineApiError::Other(err) => err.to_rpc_error(), } } } @@ -222,7 +220,6 @@ mod tests { err: impl Into>, ) { let err = err.into(); - dbg!(&err); assert_eq!(err.code(), code); assert_eq!(err.message(), message); } diff --git a/crates/rpc/rpc-engine-api/tests/it/payload.rs b/crates/rpc/rpc-engine-api/tests/it/payload.rs index 8853b5c88..22219584c 100644 --- a/crates/rpc/rpc-engine-api/tests/it/payload.rs +++ b/crates/rpc/rpc-engine-api/tests/it/payload.rs @@ -13,8 +13,8 @@ use reth_rpc_types::engine::{ ExecutionPayload, ExecutionPayloadBodyV1, ExecutionPayloadV1, PayloadError, }; use reth_rpc_types_compat::engine::payload::{ - convert_standalone_withdraw_to_withdrawal, convert_to_payload_body_v1, try_block_to_payload, - try_block_to_payload_v1, try_into_sealed_block, try_payload_v1_to_block, + block_to_payload, block_to_payload_v1, convert_to_payload_body_v1, try_into_sealed_block, + try_payload_v1_to_block, }; fn transform_block Block>(src: SealedBlock, f: F) -> ExecutionPayload { @@ -23,7 +23,7 @@ fn transform_block Block>(src: SealedBlock, f: F) -> Executi // Recalculate roots transformed.header.transactions_root = proofs::calculate_transaction_root(&transformed.body); transformed.header.ommers_hash = proofs::calculate_ommers_root(&transformed.ommers); - try_block_to_payload(SealedBlock { + block_to_payload(SealedBlock { header: transformed.header.seal_slow(), body: transformed.body, ommers: transformed.ommers, @@ -46,11 +46,7 @@ fn payload_body_roundtrip() { .map(|x| TransactionSigned::decode(&mut &x[..])) .collect::, _>>(), ); - let withdraw = payload_body.withdrawals.map(|withdrawals| { - Withdrawals::new( - withdrawals.into_iter().map(convert_standalone_withdraw_to_withdrawal).collect(), - ) - }); + let withdraw = payload_body.withdrawals.map(Withdrawals::new); assert_eq!(block.withdrawals, withdraw); } } @@ -93,7 +89,7 @@ fn payload_validation() { ); // Invalid encoded transactions - let mut payload_with_invalid_txs: ExecutionPayloadV1 = try_block_to_payload_v1(block.clone()); + let mut payload_with_invalid_txs: ExecutionPayloadV1 = block_to_payload_v1(block.clone()); payload_with_invalid_txs.transactions.iter_mut().for_each(|tx| { *tx = Bytes::new().into(); diff --git a/crates/rpc/rpc-types-compat/src/block.rs b/crates/rpc/rpc-types-compat/src/block.rs index 8c82686f9..1c2a44ebb 100644 --- a/crates/rpc/rpc-types-compat/src/block.rs +++ b/crates/rpc/rpc-types-compat/src/block.rs @@ -3,7 +3,7 @@ use crate::transaction::from_recovered_with_block_context; use alloy_rlp::Encodable; use reth_primitives::{ - Block as PrimitiveBlock, BlockWithSenders, Header as PrimitiveHeader, B256, U256, + Block as PrimitiveBlock, BlockWithSenders, Header as PrimitiveHeader, Withdrawals, B256, U256, }; use reth_rpc_types::{Block, BlockError, BlockTransactions, BlockTransactionsKind, Header}; @@ -141,17 +141,7 @@ pub fn from_primitive_with_hash(primitive_header: reth_primitives::SealedHeader) excess_blob_gas: excess_blob_gas.map(u128::from), parent_beacon_block_root, total_difficulty: None, - } -} - -fn from_primitive_withdrawal( - withdrawal: reth_primitives::Withdrawal, -) -> reth_rpc_types::Withdrawal { - reth_rpc_types::Withdrawal { - index: withdrawal.index, - address: withdrawal.address, - validator_index: withdrawal.validator_index, - amount: withdrawal.amount, + requests_root: None, } } @@ -167,13 +157,11 @@ fn from_block_with_transactions( let mut header = from_primitive_with_hash(block.header.seal(block_hash)); header.total_difficulty = Some(total_difficulty); - let withdrawals = if header.withdrawals_root.is_some() { - block - .withdrawals - .map(|withdrawals| withdrawals.into_iter().map(from_primitive_withdrawal).collect()) - } else { - None - }; + let withdrawals = header + .withdrawals_root + .is_some() + .then(|| block.withdrawals.map(Withdrawals::into_inner)) + .flatten(); Block { header, diff --git a/crates/rpc/rpc-types-compat/src/engine/mod.rs b/crates/rpc/rpc-types-compat/src/engine/mod.rs index e03ba6f4c..aa7456250 100644 --- a/crates/rpc/rpc-types-compat/src/engine/mod.rs +++ b/crates/rpc/rpc-types-compat/src/engine/mod.rs @@ -1,6 +1,3 @@ //! Standalone functions for engine specific rpc type conversions pub mod payload; -pub use payload::{ - convert_standalone_withdraw_to_withdrawal, convert_withdrawal_to_standalone_withdraw, - try_block_to_payload_v1, try_into_sealed_block, try_payload_v1_to_block, -}; +pub use payload::{block_to_payload_v1, try_into_sealed_block, try_payload_v1_to_block}; diff --git a/crates/rpc/rpc-types-compat/src/engine/payload.rs b/crates/rpc/rpc-types-compat/src/engine/payload.rs index 469475301..f504c169c 100644 --- a/crates/rpc/rpc-types-compat/src/engine/payload.rs +++ b/crates/rpc/rpc-types-compat/src/engine/payload.rs @@ -4,11 +4,12 @@ use reth_primitives::{ constants::{EMPTY_OMMER_ROOT_HASH, MAXIMUM_EXTRA_DATA_SIZE, MIN_PROTOCOL_BASE_FEE_U256}, proofs::{self}, - Block, Header, SealedBlock, TransactionSigned, UintTryTo, Withdrawal, Withdrawals, B256, U256, + Block, Header, SealedBlock, TransactionSigned, UintTryTo, Withdrawals, B256, U256, }; use reth_rpc_types::engine::{ payload::{ExecutionPayloadBodyV1, ExecutionPayloadFieldV2, ExecutionPayloadInputV2}, - ExecutionPayload, ExecutionPayloadV1, ExecutionPayloadV2, ExecutionPayloadV3, PayloadError, + ExecutionPayload, ExecutionPayloadV1, ExecutionPayloadV2, ExecutionPayloadV3, + ExecutionPayloadV4, PayloadError, }; /// Converts [ExecutionPayloadV1] to [Block] @@ -65,11 +66,8 @@ pub fn try_payload_v2_to_block(payload: ExecutionPayloadV2) -> Result Result Result { + // this performs the same conversion as the underlying V3 payload. + // + // the new request lists (`deposit_requests`, `withdrawal_requests`) are EL -> CL only, so we do + // not do anything special here to handle them + try_payload_v3_to_block(payload.payload_inner) +} + /// Converts [SealedBlock] to [ExecutionPayload] -pub fn try_block_to_payload(value: SealedBlock) -> ExecutionPayload { +pub fn block_to_payload(value: SealedBlock) -> ExecutionPayload { + // todo(onbjerg): check for requests_root here and return payload v4 if value.header.parent_beacon_block_root.is_some() { // block with parent beacon block root: V3 ExecutionPayload::V3(block_to_payload_v3(value)) } else if value.withdrawals.is_some() { // block with withdrawals: V2 - ExecutionPayload::V2(try_block_to_payload_v2(value)) + ExecutionPayload::V2(block_to_payload_v2(value)) } else { // otherwise V1 - ExecutionPayload::V1(try_block_to_payload_v1(value)) + ExecutionPayload::V1(block_to_payload_v1(value)) } } /// Converts [SealedBlock] to [ExecutionPayloadV1] -pub fn try_block_to_payload_v1(value: SealedBlock) -> ExecutionPayloadV1 { +pub fn block_to_payload_v1(value: SealedBlock) -> ExecutionPayloadV1 { let transactions = value.raw_transactions(); ExecutionPayloadV1 { parent_hash: value.parent_hash, @@ -122,15 +130,8 @@ pub fn try_block_to_payload_v1(value: SealedBlock) -> ExecutionPayloadV1 { } /// Converts [SealedBlock] to [ExecutionPayloadV2] -pub fn try_block_to_payload_v2(value: SealedBlock) -> ExecutionPayloadV2 { +pub fn block_to_payload_v2(value: SealedBlock) -> ExecutionPayloadV2 { let transactions = value.raw_transactions(); - let standalone_withdrawals: Vec = value - .withdrawals - .clone() - .unwrap_or_default() - .into_iter() - .map(convert_withdrawal_to_standalone_withdraw) - .collect(); ExecutionPayloadV2 { payload_inner: ExecutionPayloadV1 { @@ -149,7 +150,7 @@ pub fn try_block_to_payload_v2(value: SealedBlock) -> ExecutionPayloadV2 { block_hash: value.hash(), transactions, }, - withdrawals: standalone_withdrawals, + withdrawals: value.withdrawals.unwrap_or_default().into_inner(), } } @@ -157,15 +158,9 @@ pub fn try_block_to_payload_v2(value: SealedBlock) -> ExecutionPayloadV2 { pub fn block_to_payload_v3(value: SealedBlock) -> ExecutionPayloadV3 { let transactions = value.raw_transactions(); - let withdrawals: Vec = value - .withdrawals - .clone() - .unwrap_or_default() - .into_iter() - .map(convert_withdrawal_to_standalone_withdraw) - .collect(); - ExecutionPayloadV3 { + blob_gas_used: value.blob_gas_used.unwrap_or_default(), + excess_blob_gas: value.excess_blob_gas.unwrap_or_default(), payload_inner: ExecutionPayloadV2 { payload_inner: ExecutionPayloadV1 { parent_hash: value.parent_hash, @@ -183,11 +178,8 @@ pub fn block_to_payload_v3(value: SealedBlock) -> ExecutionPayloadV3 { block_hash: value.hash(), transactions, }, - withdrawals, + withdrawals: value.withdrawals.unwrap_or_default().into_inner(), }, - - blob_gas_used: value.blob_gas_used.unwrap_or_default(), - excess_blob_gas: value.excess_blob_gas.unwrap_or_default(), } } @@ -195,9 +187,9 @@ pub fn block_to_payload_v3(value: SealedBlock) -> ExecutionPayloadV3 { pub fn convert_block_to_payload_field_v2(value: SealedBlock) -> ExecutionPayloadFieldV2 { // if there are withdrawals, return V2 if value.withdrawals.is_some() { - ExecutionPayloadFieldV2::V2(try_block_to_payload_v2(value)) + ExecutionPayloadFieldV2::V2(block_to_payload_v2(value)) } else { - ExecutionPayloadFieldV2::V1(try_block_to_payload_v1(value)) + ExecutionPayloadFieldV2::V1(block_to_payload_v1(value)) } } @@ -222,12 +214,9 @@ pub fn convert_payload_input_v2_to_payload(value: ExecutionPayloadInputV2) -> Ex /// Converts [SealedBlock] to [ExecutionPayloadInputV2] pub fn convert_block_to_payload_input_v2(value: SealedBlock) -> ExecutionPayloadInputV2 { - let withdraw = value.withdrawals.clone().map(|withdrawals| { - withdrawals.into_iter().map(convert_withdrawal_to_standalone_withdraw).collect::>() - }); ExecutionPayloadInputV2 { - withdrawals: withdraw, - execution_payload: try_block_to_payload_v1(value), + withdrawals: value.withdrawals.clone().map(Withdrawals::into_inner), + execution_payload: block_to_payload_v1(value), } } @@ -246,6 +235,7 @@ pub fn try_into_block( ExecutionPayload::V1(payload) => try_payload_v1_to_block(payload)?, ExecutionPayload::V2(payload) => try_payload_v2_to_block(payload)?, ExecutionPayload::V3(payload) => try_payload_v3_to_block(payload)?, + ExecutionPayload::V4(payload) => try_payload_v4_to_block(payload)?, }; base_payload.header.parent_beacon_block_root = parent_beacon_block_root; @@ -295,30 +285,6 @@ pub fn validate_block_hash( Ok(sealed_block) } -/// Converts [Withdrawal] to [reth_rpc_types::Withdrawal] -pub fn convert_withdrawal_to_standalone_withdraw( - withdrawal: Withdrawal, -) -> reth_rpc_types::Withdrawal { - reth_rpc_types::Withdrawal { - index: withdrawal.index, - validator_index: withdrawal.validator_index, - address: withdrawal.address, - amount: withdrawal.amount, - } -} - -/// Converts [reth_rpc_types::Withdrawal] to [Withdrawal] -pub fn convert_standalone_withdraw_to_withdrawal( - standalone: reth_rpc_types::Withdrawal, -) -> Withdrawal { - Withdrawal { - index: standalone.index, - validator_index: standalone.validator_index, - address: standalone.address, - amount: standalone.amount, - } -} - /// Converts [Block] to [ExecutionPayloadBodyV1] pub fn convert_to_payload_body_v1(value: Block) -> ExecutionPayloadBodyV1 { let transactions = value.body.into_iter().map(|tx| { @@ -326,10 +292,10 @@ pub fn convert_to_payload_body_v1(value: Block) -> ExecutionPayloadBodyV1 { tx.encode_enveloped(&mut out); out.into() }); - let withdraw: Option> = value.withdrawals.map(|withdrawals| { - withdrawals.into_iter().map(convert_withdrawal_to_standalone_withdraw).collect::>() - }); - ExecutionPayloadBodyV1 { transactions: transactions.collect(), withdrawals: withdraw } + ExecutionPayloadBodyV1 { + transactions: transactions.collect(), + withdrawals: value.withdrawals.map(Withdrawals::into_inner), + } } /// Transforms a [SealedBlock] into a [ExecutionPayloadV1] diff --git a/crates/rpc/rpc-types-compat/src/lib.rs b/crates/rpc/rpc-types-compat/src/lib.rs index 7aabf4323..99eff4fa7 100644 --- a/crates/rpc/rpc-types-compat/src/lib.rs +++ b/crates/rpc/rpc-types-compat/src/lib.rs @@ -12,6 +12,5 @@ pub mod block; pub mod engine; -pub mod log; pub mod proof; pub mod transaction; diff --git a/crates/rpc/rpc-types-compat/src/log.rs b/crates/rpc/rpc-types-compat/src/log.rs deleted file mode 100644 index 2b6d33c42..000000000 --- a/crates/rpc/rpc-types-compat/src/log.rs +++ /dev/null @@ -1,16 +0,0 @@ -//! Compatibility functions for rpc `Log` type. - -/// Creates a new rpc Log from a primitive log type from DB -#[inline] -pub fn from_primitive_log(log: reth_primitives::Log) -> reth_rpc_types::Log { - reth_rpc_types::Log { - inner: log, - block_hash: None, - block_number: None, - block_timestamp: None, - transaction_hash: None, - transaction_index: None, - log_index: None, - removed: false, - } -} diff --git a/crates/rpc/rpc-types-compat/src/transaction/mod.rs b/crates/rpc/rpc-types-compat/src/transaction/mod.rs index 745d32e34..6a35429c5 100644 --- a/crates/rpc/rpc-types-compat/src/transaction/mod.rs +++ b/crates/rpc/rpc-types-compat/src/transaction/mod.rs @@ -1,13 +1,8 @@ //! Compatibility functions for rpc `Transaction` type. -use alloy_rpc_types::request::{TransactionInput, TransactionRequest}; -use reth_primitives::{ - BlockNumber, Transaction as PrimitiveTransaction, TransactionKind as PrimitiveTransactionKind, - TransactionSignedEcRecovered, TxType, B256, -}; -#[cfg(feature = "optimism")] -use reth_rpc_types::optimism::OptimismTransactionFields; -use reth_rpc_types::{AccessList, AccessListItem, Transaction}; +use alloy_rpc_types::request::{TransactionInput, TransactionRequest}; +use reth_primitives::{BlockNumber, TransactionSignedEcRecovered, TxKind, TxType, B256}; +use reth_rpc_types::Transaction; use signature::from_primitive_signature; pub use typed::*; @@ -45,11 +40,11 @@ fn fill( transaction_index: Option, ) -> Transaction { let signer = tx.signer(); - let mut signed_tx = tx.into_signed(); + let signed_tx = tx.into_signed(); let to = match signed_tx.kind() { - PrimitiveTransactionKind::Create => None, - PrimitiveTransactionKind::Call(to) => Some(*to), + TxKind::Create => None, + TxKind::Call(to) => Some(*to), }; #[allow(unreachable_patterns)] @@ -77,51 +72,8 @@ fn fill( // let chain_id = signed_tx.chain_id().map(U64::from); let chain_id = signed_tx.chain_id(); - let mut blob_versioned_hashes = None; - - #[allow(unreachable_patterns)] - let access_list = match &mut signed_tx.transaction { - PrimitiveTransaction::Legacy(_) => None, - PrimitiveTransaction::Eip2930(tx) => Some(AccessList( - tx.access_list - .0 - .iter() - .map(|item| AccessListItem { - address: item.address.0.into(), - storage_keys: item.storage_keys.iter().map(|key| key.0.into()).collect(), - }) - .collect(), - )), - PrimitiveTransaction::Eip1559(tx) => Some(AccessList( - tx.access_list - .0 - .iter() - .map(|item| AccessListItem { - address: item.address.0.into(), - storage_keys: item.storage_keys.iter().map(|key| key.0.into()).collect(), - }) - .collect(), - )), - PrimitiveTransaction::Eip4844(tx) => { - // extract the blob hashes from the transaction - blob_versioned_hashes = Some(std::mem::take(&mut tx.blob_versioned_hashes)); - - Some(AccessList( - tx.access_list - .0 - .iter() - .map(|item| AccessListItem { - address: item.address.0.into(), - storage_keys: item.storage_keys.iter().map(|key| key.0.into()).collect(), - }) - .collect(), - )) - } - _ => { - // OP deposit tx - None - } - }; + let blob_versioned_hashes = signed_tx.blob_versioned_hashes(); + let access_list = signed_tx.access_list().cloned(); let signature = from_primitive_signature(*signed_tx.signature(), signed_tx.tx_type(), signed_tx.chain_id()); @@ -151,7 +103,7 @@ fn fill( blob_versioned_hashes, // Optimism fields #[cfg(feature = "optimism")] - other: OptimismTransactionFields { + other: reth_rpc_types::optimism::OptimismTransactionFields { source_hash: signed_tx.source_hash(), mint: signed_tx.mint().map(reth_primitives::U128::from), is_system_tx: signed_tx.is_deposit().then_some(signed_tx.is_system_transaction()), @@ -162,32 +114,16 @@ fn fill( } } -/// Convert [reth_primitives::AccessList] to [reth_rpc_types::AccessList] -pub fn from_primitive_access_list( - access_list: reth_primitives::AccessList, -) -> reth_rpc_types::AccessList { - reth_rpc_types::AccessList( - access_list - .0 - .into_iter() - .map(|item| reth_rpc_types::AccessListItem { - address: item.address.0.into(), - storage_keys: item.storage_keys.into_iter().map(|key| key.0.into()).collect(), - }) - .collect(), - ) -} - /// Convert [TransactionSignedEcRecovered] to [TransactionRequest] pub fn transaction_to_call_request(tx: TransactionSignedEcRecovered) -> TransactionRequest { let from = tx.signer(); - let to = tx.transaction.to(); + let to = Some(tx.transaction.to().into()); let gas = tx.transaction.gas_limit(); let value = tx.transaction.value(); let input = tx.transaction.input().clone(); let nonce = tx.transaction.nonce(); let chain_id = tx.transaction.chain_id(); - let access_list = tx.transaction.access_list().cloned().map(from_primitive_access_list); + let access_list = tx.transaction.access_list().cloned(); let max_fee_per_blob_gas = tx.transaction.max_fee_per_blob_gas(); let blob_versioned_hashes = tx.transaction.blob_versioned_hashes(); let tx_type = tx.transaction.tx_type(); diff --git a/crates/rpc/rpc-types-compat/src/transaction/typed.rs b/crates/rpc/rpc-types-compat/src/transaction/typed.rs index cc90c626e..b119a0956 100644 --- a/crates/rpc/rpc-types-compat/src/transaction/typed.rs +++ b/crates/rpc/rpc-types-compat/src/transaction/typed.rs @@ -16,7 +16,7 @@ pub fn to_primitive_transaction( nonce: tx.nonce, gas_price: tx.gas_price.to(), gas_limit: tx.gas_limit.try_into().ok()?, - to: to_primitive_transaction_kind(tx.kind), + to: tx.kind, value: tx.value, input: tx.input, }), @@ -25,7 +25,7 @@ pub fn to_primitive_transaction( nonce: tx.nonce, gas_price: tx.gas_price.to(), gas_limit: tx.gas_limit.try_into().ok()?, - to: to_primitive_transaction_kind(tx.kind), + to: tx.kind, value: tx.value, input: tx.input, access_list: tx.access_list, @@ -35,7 +35,7 @@ pub fn to_primitive_transaction( nonce: tx.nonce, max_fee_per_gas: tx.max_fee_per_gas.to(), gas_limit: tx.gas_limit.try_into().ok()?, - to: to_primitive_transaction_kind(tx.kind), + to: tx.kind, value: tx.value, input: tx.input, access_list: tx.access_list, @@ -47,7 +47,7 @@ pub fn to_primitive_transaction( gas_limit: tx.gas_limit.to(), max_fee_per_gas: tx.max_fee_per_gas.to(), max_priority_fee_per_gas: tx.max_priority_fee_per_gas.to(), - to: to_primitive_transaction_kind(tx.kind), + to: tx.kind, value: tx.value, access_list: tx.access_list, blob_versioned_hashes: tx.blob_versioned_hashes, @@ -56,13 +56,3 @@ pub fn to_primitive_transaction( }), }) } - -/// Transforms a [reth_rpc_types::TransactionKind] into a [reth_primitives::TransactionKind] -pub fn to_primitive_transaction_kind( - kind: reth_rpc_types::TransactionKind, -) -> reth_primitives::TransactionKind { - match kind { - reth_rpc_types::TransactionKind::Call(to) => reth_primitives::TransactionKind::Call(to), - reth_rpc_types::TransactionKind::Create => reth_primitives::TransactionKind::Create, - } -} diff --git a/crates/rpc/rpc-types/Cargo.toml b/crates/rpc/rpc-types/Cargo.toml index 5f87e9482..83ad91f5c 100644 --- a/crates/rpc/rpc-types/Cargo.toml +++ b/crates/rpc/rpc-types/Cargo.toml @@ -12,17 +12,16 @@ description = "Reth RPC types" workspace = true [dependencies] + # ethereum -alloy-rlp = { workspace = true, features = ["arrayvec", "derive"] } alloy-primitives = { workspace = true, features = ["rand", "rlp", "serde"] } alloy-rpc-types = { workspace = true, features = ["jsonrpsee-types"] } alloy-rpc-types-anvil.workspace = true alloy-rpc-types-trace.workspace = true +alloy-rpc-types-beacon.workspace = true alloy-rpc-types-engine = { workspace = true, features = ["jsonrpsee-types"] } ethereum_ssz_derive = { version = "0.5", optional = true } ethereum_ssz = { version = "0.5", optional = true } -alloy-genesis.workspace = true -enr = { workspace = true, features = ["serde", "rust-secp256k1"] } # misc thiserror.workspace = true @@ -30,19 +29,10 @@ serde = { workspace = true, features = ["derive"] } serde_with = "3.3" serde_json.workspace = true jsonrpsee-types = { workspace = true, optional = true } -url = "2.3" -# necessary so we don't hit a "undeclared 'std'": -# https://github.com/paradigmxyz/reth/pull/177#discussion_r1021172198 -secp256k1.workspace = true - -# arbitrary -arbitrary = { workspace = true, features = ["derive"], optional = true } -proptest = { workspace = true, optional = true } -proptest-derive = { workspace = true, optional = true } [features] default = ["jsonrpsee-types"] -arbitrary = ["dep:arbitrary", "dep:proptest-derive", "dep:proptest", "alloy-primitives/arbitrary", "alloy-rpc-types/arbitrary"] +arbitrary = ["alloy-primitives/arbitrary", "alloy-rpc-types/arbitrary"] ssz = ["dep:ethereum_ssz" ,"dep:ethereum_ssz_derive", "alloy-primitives/ssz", "alloy-rpc-types/ssz", "alloy-rpc-types-engine/ssz"] diff --git a/crates/rpc/rpc-types/src/admin.rs b/crates/rpc/rpc-types/src/admin.rs deleted file mode 100644 index aeb44fab6..000000000 --- a/crates/rpc/rpc-types/src/admin.rs +++ /dev/null @@ -1,123 +0,0 @@ -use crate::{NodeRecord, PeerId}; -use alloy_genesis::ChainConfig; -use alloy_primitives::{B256, U256}; -use serde::{Deserialize, Serialize}; -use std::{ - collections::BTreeMap, - net::{IpAddr, SocketAddr}, -}; - -/// Represents the `admin_nodeInfo` response, which can be queried for all the information -/// known about the running node at the networking granularity. -/// -/// Note: this format is not standardized. Reth follows Geth's format, -/// see: -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub struct NodeInfo { - /// Enode of the node in URL format. - pub enode: NodeRecord, - /// ID of the local node. - pub id: PeerId, - /// IP of the local node. - pub ip: IpAddr, - /// Address exposed for listening for the local node. - #[serde(rename = "listenAddr")] - pub listen_addr: SocketAddr, - /// Ports exposed by the node for discovery and listening. - pub ports: Ports, - /// Name of the network - pub name: String, - /// Networking protocols being run by the local node. - pub protocols: Protocols, -} - -impl NodeInfo { - /// Creates a new instance of `NodeInfo`. - pub fn new(enr: NodeRecord, status: NetworkStatus, config: ChainConfig) -> NodeInfo { - NodeInfo { - enode: enr, - id: enr.id, - ip: enr.address, - listen_addr: enr.tcp_addr(), - ports: Ports { discovery: enr.udp_port, listener: enr.tcp_port }, - name: status.client_version, - protocols: Protocols { - eth: EthProtocolInfo::new(status.eth_protocol_info, config), - other: Default::default(), - }, - } - } -} - -/// All supported protocols -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub struct Protocols { - /// Info about `eth` sub-protocol - pub eth: EthProtocolInfo, - /// Placeholder for any other protocols - #[serde(flatten, default)] - pub other: BTreeMap, -} - -/// Ports exposed by the node for discovery and listening. -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub struct Ports { - /// Port exposed for node discovery. - pub discovery: u16, - /// Port exposed for listening. - pub listener: u16, -} - -/// The status of the network being ran by the local node. -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub struct NetworkStatus { - /// The local node client version. - pub client_version: String, - /// The current ethereum protocol version - pub protocol_version: u64, - /// Information about the Ethereum Wire Protocol. - pub eth_protocol_info: EthProtocolInfo, -} - -/// Information about the Ethereum Wire Protocol (ETH) -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub struct EthProtocolInfo { - /// The current difficulty at the head of the chain. - pub difficulty: U256, - /// The block hash of the head of the chain. - pub head: B256, - /// Network ID in base 10. - pub network: u64, - /// Genesis block of the current chain. - pub genesis: B256, - /// Configuration of the chain. - pub config: ChainConfig, -} - -impl EthProtocolInfo { - /// Creates a new instance of `EthProtocolInfo`. - pub fn new(info: EthProtocolInfo, config: ChainConfig) -> EthProtocolInfo { - EthProtocolInfo { - difficulty: info.difficulty, - head: info.head, - network: info.network, - genesis: info.genesis, - config, - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_parse_node_info_roundtrip() { - let sample = r#"{"enode":"enode://44826a5d6a55f88a18298bca4773fca5749cdc3a5c9f308aa7d810e9b31123f3e7c5fba0b1d70aac5308426f47df2a128a6747040a3815cc7dd7167d03be320d@[::]:30303","id":"44826a5d6a55f88a18298bca4773fca5749cdc3a5c9f308aa7d810e9b31123f3e7c5fba0b1d70aac5308426f47df2a128a6747040a3815cc7dd7167d03be320d","ip":"::","listenAddr":"[::]:30303","name":"reth","ports":{"discovery":30303,"listener":30303},"protocols":{"eth":{"difficulty":17334254859343145000,"genesis":"0xd4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3","head":"0xb83f73fbe6220c111136aefd27b160bf4a34085c65ba89f24246b3162257c36a","network":1, "config": {"chainId": 17000,"homesteadBlock": 0,"daoForkSupport": true,"eip150Block": 0,"eip155Block": 0,"eip158Block": 0,"byzantiumBlock": 0,"constantinopleBlock": 0,"petersburgBlock": 0,"istanbulBlock": 0,"berlinBlock": 0,"londonBlock": 0,"shanghaiTime": 1696000704,"cancunTime": 1707305664,"terminalTotalDifficulty": 0,"terminalTotalDifficultyPassed": true,"ethash": {}}}}}"#; - - let info: NodeInfo = serde_json::from_str(sample).unwrap(); - let serialized = serde_json::to_string_pretty(&info).unwrap(); - let de_serialized: NodeInfo = serde_json::from_str(&serialized).unwrap(); - assert_eq!(info, de_serialized) - } -} diff --git a/crates/rpc/rpc-types/src/beacon/constants.rs b/crates/rpc/rpc-types/src/beacon/constants.rs deleted file mode 100644 index 945a4ba20..000000000 --- a/crates/rpc/rpc-types/src/beacon/constants.rs +++ /dev/null @@ -1,17 +0,0 @@ -/// The Domain Separation Tag for hash_to_point in Ethereum beacon chain BLS12-381 signatures. -/// -/// This is also the name of the ciphersuite that defines beacon chain BLS signatures. -/// -/// See: -/// -/// -pub const BLS_DST_SIG: &[u8] = b"BLS_SIG_BLS12381G2_XMD:SHA-256_SSWU_RO_POP_"; - -/// The number of bytes in a BLS12-381 public key. -pub const BLS_PUBLIC_KEY_BYTES_LEN: usize = 48; - -/// The number of bytes in a BLS12-381 secret key. -pub const BLS_SECRET_KEY_BYTES_LEN: usize = 32; - -/// The number of bytes in a BLS12-381 signature. -pub const BLS_SIGNATURE_BYTES_LEN: usize = 96; diff --git a/crates/rpc/rpc-types/src/beacon/events/attestation.rs b/crates/rpc/rpc-types/src/beacon/events/attestation.rs deleted file mode 100644 index c789a4671..000000000 --- a/crates/rpc/rpc-types/src/beacon/events/attestation.rs +++ /dev/null @@ -1,30 +0,0 @@ -use alloy_primitives::B256; -use serde::{Deserialize, Serialize}; -use serde_with::{serde_as, DisplayFromStr}; - -#[serde_as] -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct AttestationData { - #[serde_as(as = "DisplayFromStr")] - pub slot: u64, - #[serde_as(as = "DisplayFromStr")] - pub index: u64, - pub beacon_block_root: B256, - pub source: Source, - pub target: Target, -} - -#[serde_as] -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct Source { - #[serde_as(as = "DisplayFromStr")] - pub epoch: u64, - pub root: B256, -} -#[serde_as] -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct Target { - #[serde_as(as = "DisplayFromStr")] - pub epoch: u64, - pub root: B256, -} diff --git a/crates/rpc/rpc-types/src/beacon/events/light_client_finality.rs b/crates/rpc/rpc-types/src/beacon/events/light_client_finality.rs deleted file mode 100644 index 10928c7a7..000000000 --- a/crates/rpc/rpc-types/src/beacon/events/light_client_finality.rs +++ /dev/null @@ -1,54 +0,0 @@ -use alloy_primitives::{Bytes, B256}; -use serde::{Deserialize, Serialize}; -use serde_with::{serde_as, DisplayFromStr}; - -#[serde_as] -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct LightClientFinalityData { - pub attested_header: AttestedHeader, - pub finalized_header: FinalizedHeader, - pub finality_branch: Vec, - pub sync_aggregate: SyncAggregate, - #[serde_as(as = "DisplayFromStr")] - pub signature_slot: u64, -} - -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct AttestedHeader { - pub beacon: Beacon, -} - -#[serde_as] -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct Beacon { - #[serde_as(as = "DisplayFromStr")] - pub slot: u64, - #[serde_as(as = "DisplayFromStr")] - pub proposer_index: u64, - pub parent_root: B256, - pub state_root: B256, - pub body_root: B256, -} - -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct FinalizedHeader { - pub beacon: Beacon2, -} - -#[serde_as] -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct Beacon2 { - #[serde_as(as = "DisplayFromStr")] - pub slot: u64, - #[serde_as(as = "DisplayFromStr")] - pub proposer_index: u64, - pub parent_root: B256, - pub state_root: B256, - pub body_root: B256, -} - -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct SyncAggregate { - pub sync_committee_bits: Bytes, - pub sync_committee_signature: Bytes, -} diff --git a/crates/rpc/rpc-types/src/beacon/events/light_client_optimistic.rs b/crates/rpc/rpc-types/src/beacon/events/light_client_optimistic.rs deleted file mode 100644 index af310f8cc..000000000 --- a/crates/rpc/rpc-types/src/beacon/events/light_client_optimistic.rs +++ /dev/null @@ -1,24 +0,0 @@ -use crate::beacon::header::BeaconBlockHeader; -use alloy_primitives::Bytes; -use serde::{Deserialize, Serialize}; -use serde_with::{serde_as, DisplayFromStr}; - -#[serde_as] -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct LightClientOptimisticData { - pub attested_header: AttestedHeader, - pub sync_aggregate: SyncAggregate, - #[serde_as(as = "DisplayFromStr")] - pub signature_slot: u64, -} - -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct AttestedHeader { - pub beacon: BeaconBlockHeader, -} - -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct SyncAggregate { - pub sync_committee_bits: Bytes, - pub sync_committee_signature: Bytes, -} diff --git a/crates/rpc/rpc-types/src/beacon/events/mod.rs b/crates/rpc/rpc-types/src/beacon/events/mod.rs deleted file mode 100644 index 501494a91..000000000 --- a/crates/rpc/rpc-types/src/beacon/events/mod.rs +++ /dev/null @@ -1,403 +0,0 @@ -//! Support for the Beacon API events -//! -//! See also [ethereum-beacon-API eventstream](https://ethereum.github.io/beacon-APIs/#/Events/eventstream) - -use crate::engine::PayloadAttributes; -use alloy_primitives::{Address, Bytes, B256}; -use attestation::AttestationData; -use light_client_finality::LightClientFinalityData; -use light_client_optimistic::LightClientOptimisticData; -use serde::{Deserialize, Serialize}; -use serde_with::{serde_as, DisplayFromStr}; - -pub mod attestation; -pub mod light_client_finality; -pub mod light_client_optimistic; - -/// Topic variant for the eventstream API -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub enum BeaconNodeEventTopic { - PayloadAttributes, - Head, - Block, - Attestation, - VoluntaryExit, - BlsToExecutionChange, - FinalizedCheckpoint, - ChainReorg, - ContributionAndProof, - LightClientFinalityUpdate, - LightClientOptimisticUpdate, - BlobSidecar, -} - -impl BeaconNodeEventTopic { - /// Returns the identifier value for the eventstream query - pub fn query_value(&self) -> &'static str { - match self { - BeaconNodeEventTopic::PayloadAttributes => "payload_attributes", - BeaconNodeEventTopic::Head => "head", - BeaconNodeEventTopic::Block => "block", - BeaconNodeEventTopic::Attestation => "attestation", - BeaconNodeEventTopic::VoluntaryExit => "voluntary_exit", - BeaconNodeEventTopic::BlsToExecutionChange => "bls_to_execution_change", - BeaconNodeEventTopic::FinalizedCheckpoint => "finalized_checkpoint", - BeaconNodeEventTopic::ChainReorg => "chain_reorg", - BeaconNodeEventTopic::ContributionAndProof => "contribution_and_proof", - BeaconNodeEventTopic::LightClientFinalityUpdate => "light_client_finality_update", - BeaconNodeEventTopic::LightClientOptimisticUpdate => "light_client_optimistic_update", - BeaconNodeEventTopic::BlobSidecar => "blob_sidecar", - } - } -} - -/// Event for the `payload_attributes` topic of the beacon API node event stream. -/// -/// This event gives block builders and relays sufficient information to construct or verify a block -/// at `proposal_slot`. -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct PayloadAttributesEvent { - /// the identifier of the beacon hard fork at `proposal_slot`, e.g `"bellatrix"`, `"capella"`. - pub version: String, - /// Wrapped data of the event. - pub data: PayloadAttributesData, -} - -/// Event for the `Head` topic of the beacon API node event stream. -/// -/// The node has finished processing, resulting in a new head. previous_duty_dependent_root is -/// \`get_block_root_at_slot(state, compute_start_slot_at_epoch(epoch - 1) - 1)\` and -/// current_duty_dependent_root is \`get_block_root_at_slot(state, -/// compute_start_slot_at_epoch(epoch) -/// - 1)\`. Both dependent roots use the genesis block root in the case of underflow. -#[serde_as] -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct HeadEvent { - #[serde_as(as = "DisplayFromStr")] - pub slot: u64, - pub block: B256, - pub state: B256, - pub epoch_transition: bool, - pub previous_duty_dependent_root: B256, - pub current_duty_dependent_root: B256, - pub execution_optimistic: bool, -} - -/// Event for the `Block` topic of the beacon API node event stream. -/// -/// The node has received a valid block (from P2P or API) -#[serde_as] -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct BlockEvent { - #[serde_as(as = "DisplayFromStr")] - pub slot: u64, - pub block: B256, - pub execution_optimistic: bool, -} - -/// Event for the `Attestation` topic of the beacon API node event stream. -/// -/// The node has received a valid attestation (from P2P or API) -#[serde_as] -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct AttestationEvent { - pub aggregation_bits: Bytes, - pub signature: Bytes, - pub data: AttestationData, -} - -/// Event for the `VoluntaryExit` topic of the beacon API node event stream. -/// -/// The node has received a valid voluntary exit (from P2P or API) -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct VoluntaryExitEvent { - pub message: VoluntaryExitMessage, - pub signature: Bytes, -} - -#[serde_as] -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct VoluntaryExitMessage { - #[serde_as(as = "DisplayFromStr")] - pub epoch: u64, - #[serde_as(as = "DisplayFromStr")] - pub validator_index: u64, -} - -/// Event for the `BlsToExecutionChange` topic of the beacon API node event stream. -/// -/// The node has received a BLS to execution change (from P2P or API) -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct BlsToExecutionChangeEvent { - pub message: BlsToExecutionChangeMessage, - pub signature: Bytes, -} - -#[serde_as] -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct BlsToExecutionChangeMessage { - #[serde_as(as = "DisplayFromStr")] - pub validator_index: u64, - pub from_bls_pubkey: String, - pub to_execution_address: Address, -} - -/// Event for the `Deposit` topic of the beacon API node event stream. -/// -/// Finalized checkpoint has been updated -#[serde_as] -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct FinalizedCheckpointEvent { - pub block: B256, - pub state: B256, - #[serde_as(as = "DisplayFromStr")] - pub epoch: u64, - pub execution_optimistic: bool, -} - -/// Event for the `ChainReorg` topic of the beacon API node event stream. -/// -/// The node has reorganized its chain -#[serde_as] -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct ChainReorgEvent { - #[serde_as(as = "DisplayFromStr")] - pub slot: u64, - #[serde_as(as = "DisplayFromStr")] - pub depth: u64, - pub old_head_block: B256, - pub new_head_block: B256, - pub old_head_state: B256, - pub new_head_state: B256, - #[serde_as(as = "DisplayFromStr")] - pub epoch: u64, - pub execution_optimistic: bool, -} - -/// Event for the `ContributionAndProof` topic of the beacon API node event stream. -/// -/// The node has received a valid sync committee SignedContributionAndProof (from P2P or API) -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct ContributionAndProofEvent { - pub message: ContributionAndProofMessage, - pub signature: Bytes, -} - -#[serde_as] -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct ContributionAndProofMessage { - #[serde_as(as = "DisplayFromStr")] - pub aggregator_index: u64, - pub contribution: Contribution, - pub selection_proof: Bytes, -} - -#[serde_as] -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct Contribution { - #[serde_as(as = "DisplayFromStr")] - pub slot: u64, - pub beacon_block_root: B256, - #[serde_as(as = "DisplayFromStr")] - pub subcommittee_index: u64, - pub aggregation_bits: Bytes, - pub signature: Bytes, -} - -/// Event for the `LightClientFinalityUpdate` topic of the beacon API node event stream. -/// -/// The node's latest known `LightClientFinalityUpdate` has been updated -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct LightClientFinalityUpdateEvent { - pub version: String, - pub data: LightClientFinalityData, -} - -/// Event for the `LightClientOptimisticUpdate` topic of the beacon API node event stream. -/// -/// The node's latest known `LightClientOptimisticUpdate` has been updated -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct LightClientOptimisticUpdateEvent { - pub version: String, - pub data: LightClientOptimisticData, -} - -/// Event for the `BlobSidecar` topic of the beacon API node event stream. -/// -/// The node has received a BlobSidecar (from P2P or API) that passes all gossip validations on the -/// blob_sidecar_{subnet_id} topic -#[serde_as] -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct BlobSidecarEvent { - pub block_root: B256, - #[serde_as(as = "DisplayFromStr")] - pub index: u64, - #[serde_as(as = "DisplayFromStr")] - pub slot: u64, - pub kzg_commitment: Bytes, - pub versioned_hash: B256, -} - -impl PayloadAttributesEvent { - /// Returns the payload attributes - pub fn attributes(&self) -> &PayloadAttributes { - &self.data.payload_attributes - } -} - -/// Data of the event that contains the payload attributes -#[serde_as] -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct PayloadAttributesData { - /// The slot at which a block using these payload attributes may be built - #[serde_as(as = "DisplayFromStr")] - pub proposal_slot: u64, - /// the beacon block root of the parent block to be built upon. - pub parent_block_root: B256, - /// the execution block number of the parent block. - #[serde_as(as = "DisplayFromStr")] - pub parent_block_number: u64, - /// the execution block hash of the parent block. - pub parent_block_hash: B256, - /// The execution block number of the parent block. - /// the validator index of the proposer at `proposal_slot` on the chain identified by - /// `parent_block_root`. - #[serde_as(as = "DisplayFromStr")] - pub proposer_index: u64, - /// Beacon API encoding of `PayloadAttributesV` as defined by the `execution-apis` - /// specification - /// - /// Note: this uses the beacon API format which uses snake-case and quoted decimals rather than - /// big-endian hex. - #[serde(with = "crate::beacon::payload::beacon_api_payload_attributes")] - pub payload_attributes: PayloadAttributes, -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn serde_payload_attributes_event() { - let s = r#"{"version":"capella","data":{"proposal_slot":"173332","proposer_index":"649112","parent_block_root":"0x5a49069647f6bf8f25d76b55ce920947654ade4ba1c6ab826d16712dd62b42bf","parent_block_number":"161093","parent_block_hash":"0x608b3d140ecb5bbcd0019711ac3704ece7be8e6d100816a55db440c1bcbb0251","payload_attributes":{"timestamp":"1697982384","prev_randao":"0x3142abd98055871ebf78f0f8e758fd3a04df3b6e34d12d09114f37a737f8f01e","suggested_fee_recipient":"0x0000000000000000000000000000000000000001","withdrawals":[{"index":"2461612","validator_index":"853570","address":"0x778f5f13c4be78a3a4d7141bcb26999702f407cf","amount":"45016211"},{"index":"2461613","validator_index":"853571","address":"0x778f5f13c4be78a3a4d7141bcb26999702f407cf","amount":"5269785"},{"index":"2461614","validator_index":"853572","address":"0x778f5f13c4be78a3a4d7141bcb26999702f407cf","amount":"5275106"},{"index":"2461615","validator_index":"853573","address":"0x778f5f13c4be78a3a4d7141bcb26999702f407cf","amount":"5235962"},{"index":"2461616","validator_index":"853574","address":"0x778f5f13c4be78a3a4d7141bcb26999702f407cf","amount":"5252171"},{"index":"2461617","validator_index":"853575","address":"0x778f5f13c4be78a3a4d7141bcb26999702f407cf","amount":"5221319"},{"index":"2461618","validator_index":"853576","address":"0x778f5f13c4be78a3a4d7141bcb26999702f407cf","amount":"5260879"},{"index":"2461619","validator_index":"853577","address":"0x778f5f13c4be78a3a4d7141bcb26999702f407cf","amount":"5285244"},{"index":"2461620","validator_index":"853578","address":"0x778f5f13c4be78a3a4d7141bcb26999702f407cf","amount":"5266681"},{"index":"2461621","validator_index":"853579","address":"0x778f5f13c4be78a3a4d7141bcb26999702f407cf","amount":"5271322"},{"index":"2461622","validator_index":"853580","address":"0x778f5f13c4be78a3a4d7141bcb26999702f407cf","amount":"5231327"},{"index":"2461623","validator_index":"853581","address":"0x778f5f13c4be78a3a4d7141bcb26999702f407cf","amount":"5276761"},{"index":"2461624","validator_index":"853582","address":"0x778f5f13c4be78a3a4d7141bcb26999702f407cf","amount":"5246244"},{"index":"2461625","validator_index":"853583","address":"0x778f5f13c4be78a3a4d7141bcb26999702f407cf","amount":"5261011"},{"index":"2461626","validator_index":"853584","address":"0x778f5f13c4be78a3a4d7141bcb26999702f407cf","amount":"5276477"},{"index":"2461627","validator_index":"853585","address":"0x778f5f13c4be78a3a4d7141bcb26999702f407cf","amount":"5275319"}]}}}"#; - - let event: PayloadAttributesEvent = - serde_json::from_str::(s).unwrap(); - let input = serde_json::from_str::(s).unwrap(); - let json = serde_json::to_value(event).unwrap(); - assert_eq!(input, json); - } - #[test] - fn serde_head_event() { - let s = r#"{"slot":"10", "block":"0x9a2fefd2fdb57f74993c7780ea5b9030d2897b615b89f808011ca5aebed54eaf", "state":"0x600e852a08c1200654ddf11025f1ceacb3c2e74bdd5c630cde0838b2591b69f9", "epoch_transition":false, "previous_duty_dependent_root":"0x5e0043f107cb57913498fbf2f99ff55e730bf1e151f02f221e977c91a90a0e91", "current_duty_dependent_root":"0x5e0043f107cb57913498fbf2f99ff55e730bf1e151f02f221e977c91a90a0e91", "execution_optimistic": false}"#; - - let event: HeadEvent = serde_json::from_str::(s).unwrap(); - let input = serde_json::from_str::(s).unwrap(); - let json = serde_json::to_value(event).unwrap(); - assert_eq!(input, json); - } - - #[test] - fn serde_block_event() { - let s = r#"{"slot":"10", "block":"0x9a2fefd2fdb57f74993c7780ea5b9030d2897b615b89f808011ca5aebed54eaf", "execution_optimistic": false}"#; - - let event: BlockEvent = serde_json::from_str::(s).unwrap(); - let input = serde_json::from_str::(s).unwrap(); - let json = serde_json::to_value(event).unwrap(); - assert_eq!(input, json); - } - #[test] - fn serde_attestation_event() { - let s = r#"{"aggregation_bits":"0x01", "signature":"0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505", "data":{"slot":"1", "index":"1", "beacon_block_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2", "source":{"epoch":"1", "root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"}, "target":{"epoch":"1", "root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"}}}"#; - - let event: AttestationEvent = serde_json::from_str::(s).unwrap(); - let input = serde_json::from_str::(s).unwrap(); - let json = serde_json::to_value(event).unwrap(); - assert_eq!(input, json); - } - - #[test] - fn serde_voluntary_exit_event() { - let s = r#"{"message":{"epoch":"1", "validator_index":"1"}, "signature":"0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"}"#; - - let event: VoluntaryExitEvent = serde_json::from_str::(s).unwrap(); - let input = serde_json::from_str::(s).unwrap(); - let json = serde_json::to_value(event).unwrap(); - assert_eq!(input, json); - } - - #[test] - fn serde_bls_to_execution_change_event() { - let s = r#"{"message":{"validator_index":"1", "from_bls_pubkey":"0x933ad9491b62059dd065b560d256d8957a8c402cc6e8d8ee7290ae11e8f7329267a8811c397529dac52ae1342ba58c95", "to_execution_address":"0x9be8d619c56699667c1fedcd15f6b14d8b067f72"}, "signature":"0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"}"#; - - let event: BlsToExecutionChangeEvent = - serde_json::from_str::(s).unwrap(); - let input = serde_json::from_str::(s).unwrap(); - let json = serde_json::to_value(event).unwrap(); - assert_eq!(input, json); - } - - #[test] - fn serde_finalize_checkpoint_event() { - let s = r#"{"block":"0x9a2fefd2fdb57f74993c7780ea5b9030d2897b615b89f808011ca5aebed54eaf", "state":"0x600e852a08c1200654ddf11025f1ceacb3c2e74bdd5c630cde0838b2591b69f9", "epoch":"2", "execution_optimistic": false }"#; - - let event: FinalizedCheckpointEvent = - serde_json::from_str::(s).unwrap(); - let input = serde_json::from_str::(s).unwrap(); - let json = serde_json::to_value(event).unwrap(); - assert_eq!(input, json); - } - - #[test] - fn serde_chain_reorg_event() { - let s = r#"{"slot":"200", "depth":"50", "old_head_block":"0x9a2fefd2fdb57f74993c7780ea5b9030d2897b615b89f808011ca5aebed54eaf", "new_head_block":"0x76262e91970d375a19bfe8a867288d7b9cde43c8635f598d93d39d041706fc76", "old_head_state":"0x9a2fefd2fdb57f74993c7780ea5b9030d2897b615b89f808011ca5aebed54eaf", "new_head_state":"0x600e852a08c1200654ddf11025f1ceacb3c2e74bdd5c630cde0838b2591b69f9", "epoch":"2", "execution_optimistic": false}"#; - - let event: ChainReorgEvent = serde_json::from_str::(s).unwrap(); - let input = serde_json::from_str::(s).unwrap(); - let json = serde_json::to_value(event).unwrap(); - assert_eq!(input, json); - } - - #[test] - fn serde_contribution_and_proof_event() { - let s = r#"{"message": {"aggregator_index": "997", "contribution": {"slot": "168097", "beacon_block_root": "0x56f1fd4262c08fa81e27621c370e187e621a67fc80fe42340b07519f84b42ea1", "subcommittee_index": "0", "aggregation_bits": "0xffffffffffffffffffffffffffffffff", "signature": "0x85ab9018e14963026476fdf784cc674da144b3dbdb47516185438768774f077d882087b90ad642469902e782a8b43eed0cfc1b862aa9a473b54c98d860424a702297b4b648f3f30bdaae8a8b7627d10d04cb96a2cc8376af3e54a9aa0c8145e3"}, "selection_proof": "0x87c305f04bfe5db27c2b19fc23e00d7ac496ec7d3e759cbfdd1035cb8cf6caaa17a36a95a08ba78c282725e7b66a76820ca4eb333822bd399ceeb9807a0f2926c67ce67cfe06a0b0006838203b493505a8457eb79913ce1a3bcd1cc8e4ef30ed"}, "signature": "0xac118511474a94f857300b315c50585c32a713e4452e26a6bb98cdb619936370f126ed3b6bb64469259ee92e69791d9e12d324ce6fd90081680ce72f39d85d50b0ff977260a8667465e613362c6d6e6e745e1f9323ec1d6f16041c4e358839ac"}"#; - - let event: ContributionAndProofEvent = - serde_json::from_str::(s).unwrap(); - let input = serde_json::from_str::(s).unwrap(); - let json = serde_json::to_value(event).unwrap(); - assert_eq!(input, json); - } - - #[test] - fn serde_light_client_finality_update_event() { - let s = r#"{"version":"phase0", "data": {"attested_header": {"beacon": {"slot":"1", "proposer_index":"1", "parent_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2", "state_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2", "body_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"}}, "finalized_header": {"beacon": {"slot":"1", "proposer_index":"1", "parent_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2", "state_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2", "body_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"}}, "finality_branch": ["0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2", "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2", "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2", "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2", "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2", "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"], "sync_aggregate": {"sync_committee_bits":"0x01", "sync_committee_signature":"0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"}, "signature_slot":"1"}}"#; - - let event: LightClientFinalityUpdateEvent = - serde_json::from_str::(s).unwrap(); - let input = serde_json::from_str::(s).unwrap(); - let json = serde_json::to_value(event).unwrap(); - assert_eq!(input, json); - } - #[test] - fn serde_light_client_optimistic_update_event() { - let s = r#"{"version":"phase0", "data": {"attested_header": {"beacon": {"slot":"1", "proposer_index":"1", "parent_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2", "state_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2", "body_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"}}, "sync_aggregate": {"sync_committee_bits":"0x01", "sync_committee_signature":"0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"}, "signature_slot":"1"}}"#; - - let event: LightClientOptimisticUpdateEvent = - serde_json::from_str::(s).unwrap(); - let input = serde_json::from_str::(s).unwrap(); - let json = serde_json::to_value(event).unwrap(); - assert_eq!(input, json); - } - - #[test] - fn serde_blob_sidecar_event() { - let s = r#"{"block_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2", "index": "1", "slot": "1", "kzg_commitment": "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505", "versioned_hash": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"}"#; - - let event: BlobSidecarEvent = serde_json::from_str::(s).unwrap(); - let input = serde_json::from_str::(s).unwrap(); - let json = serde_json::to_value(event).unwrap(); - assert_eq!(input, json); - } -} diff --git a/crates/rpc/rpc-types/src/beacon/header.rs b/crates/rpc/rpc-types/src/beacon/header.rs deleted file mode 100644 index 9843d3351..000000000 --- a/crates/rpc/rpc-types/src/beacon/header.rs +++ /dev/null @@ -1,125 +0,0 @@ -//! Beacon block header types. -//! -//! See also - -use alloy_primitives::{Bytes, B256}; -use serde::{Deserialize, Serialize}; -use serde_with::{serde_as, DisplayFromStr}; - -/// The response to a request for beacon block headers: `getBlockHeaders` -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct HeadersResponse { - /// True if the response references an unverified execution payload. Optimistic information may - /// be invalidated at a later time. If the field is not present, assume the False value. - pub execution_optimistic: bool, - /// True if the response references the finalized history of the chain, as determined by fork - /// choice. If the field is not present, additional calls are necessary to compare the epoch of - /// the requested information with the finalized checkpoint. - pub finalized: bool, - /// Container for the header data. - pub data: Vec, -} - -/// The response to a request for a __single__ beacon block header: `headers/{id}` -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct HeaderResponse { - /// True if the response references an unverified execution payload. Optimistic information may - /// be invalidated at a later time. If the field is not present, assume the False value. - pub execution_optimistic: bool, - /// True if the response references the finalized history of the chain, as determined by fork - /// choice. If the field is not present, additional calls are necessary to compare the epoch of - /// the requested information with the finalized checkpoint. - pub finalized: bool, - /// Container for the header data. - pub data: HeaderData, -} - -/// Container type for a beacon block header. -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct HeaderData { - /// root hash of the block - pub root: B256, - /// Whether the block is part of the canonical chain - pub canonical: bool, - /// The `SignedBeaconBlockHeader` object envelope from the CL spec. - pub header: Header, -} - -/// [BeaconBlockHeader] with a signature. -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct Header { - /// The `BeaconBlockHeader` object from the CL spec. - pub message: BeaconBlockHeader, - pub signature: Bytes, -} - -/// The header of a beacon block. -#[serde_as] -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct BeaconBlockHeader { - /// The slot to which this block corresponds. - #[serde_as(as = "DisplayFromStr")] - pub slot: u64, - /// Index of validator in validator registry. - #[serde_as(as = "DisplayFromStr")] - pub proposer_index: u64, - /// The signing merkle root of the parent BeaconBlock. - pub parent_root: B256, - /// The tree hash merkle root of the BeaconState for the BeaconBlock. - pub state_root: B256, - /// The tree hash merkle root of the BeaconBlockBody for the BeaconBlock - pub body_root: B256, -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn serde_headers_response() { - let s = r#"{ - "execution_optimistic": false, - "finalized": false, - "data": [ - { - "root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2", - "canonical": true, - "header": { - "message": { - "slot": "1", - "proposer_index": "1", - "parent_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2", - "state_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2", - "body_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2" - }, - "signature": "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505" - } - } - ] -}"#; - let _header_response: HeadersResponse = serde_json::from_str(s).unwrap(); - } - - #[test] - fn serde_header_response() { - let s = r#"{ - "execution_optimistic": false, - "finalized": false, - "data": { - "root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2", - "canonical": true, - "header": { - "message": { - "slot": "1", - "proposer_index": "1", - "parent_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2", - "state_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2", - "body_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2" - }, - "signature": "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505" - } - } -}"#; - let _header_response: HeaderResponse = serde_json::from_str(s).unwrap(); - } -} diff --git a/crates/rpc/rpc-types/src/beacon/mod.rs b/crates/rpc/rpc-types/src/beacon/mod.rs deleted file mode 100644 index 1184d2e43..000000000 --- a/crates/rpc/rpc-types/src/beacon/mod.rs +++ /dev/null @@ -1,19 +0,0 @@ -//! Types for the Ethereum 2.0 RPC protocol (beacon chain). - -#![allow(missing_docs)] - -use alloy_primitives::FixedBytes; -use constants::{BLS_PUBLIC_KEY_BYTES_LEN, BLS_SIGNATURE_BYTES_LEN}; - -pub mod constants; -/// Beacon API events support. -pub mod events; -pub mod header; -pub mod payload; -pub mod withdrawals; - -/// BLS signature type -pub type BlsSignature = FixedBytes; - -/// BLS public key type -pub type BlsPublicKey = FixedBytes; diff --git a/crates/rpc/rpc-types/src/beacon/payload.rs b/crates/rpc/rpc-types/src/beacon/payload.rs deleted file mode 100644 index a4898b723..000000000 --- a/crates/rpc/rpc-types/src/beacon/payload.rs +++ /dev/null @@ -1,565 +0,0 @@ -//! Payload support for the beacon API. -//! -//! Internal helper module to deserialize/serialize the payload attributes for the beacon API, which -//! uses snake case and quoted decimals. -//! -//! This is necessary because we don't want to allow a mixture of both formats, hence `serde` -//! aliases are not an option. -//! -//! See also - -#![allow(missing_docs)] - -use crate::{ - beacon::{withdrawals::BeaconWithdrawal, BlsPublicKey}, - engine::{ExecutionPayload, ExecutionPayloadV1, ExecutionPayloadV2, ExecutionPayloadV3}, - Withdrawal, -}; -use alloy_primitives::{Address, Bloom, Bytes, B256, U256}; -use serde::{Deserialize, Deserializer, Serialize, Serializer}; -use serde_with::{serde_as, DeserializeAs, DisplayFromStr, SerializeAs}; -use std::borrow::Cow; - -/// Response object of GET `/eth/v1/builder/header/{slot}/{parent_hash}/{pubkey}` -/// -/// See also -#[derive(Default, Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct GetExecutionPayloadHeaderResponse { - pub version: String, - pub data: ExecutionPayloadHeaderData, -} - -#[derive(Default, Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct ExecutionPayloadHeaderData { - pub message: ExecutionPayloadHeaderMessage, - pub signature: Bytes, -} - -#[serde_as] -#[derive(Default, Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct ExecutionPayloadHeaderMessage { - pub header: ExecutionPayloadHeader, - #[serde_as(as = "DisplayFromStr")] - pub value: U256, - pub pubkey: BlsPublicKey, -} - -/// The header of the execution payload. -#[serde_as] -#[derive(Default, Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct ExecutionPayloadHeader { - pub parent_hash: B256, - pub fee_recipient: Address, - pub state_root: B256, - pub receipts_root: B256, - pub logs_bloom: Bloom, - pub prev_randao: B256, - #[serde_as(as = "DisplayFromStr")] - pub block_number: String, - #[serde_as(as = "DisplayFromStr")] - pub gas_limit: u64, - #[serde_as(as = "DisplayFromStr")] - pub gas_used: u64, - #[serde_as(as = "DisplayFromStr")] - pub timestamp: u64, - pub extra_data: Bytes, - #[serde_as(as = "DisplayFromStr")] - pub base_fee_per_gas: U256, - pub block_hash: B256, - pub transactions_root: B256, -} - -#[serde_as] -#[derive(Serialize, Deserialize)] -struct BeaconPayloadAttributes { - #[serde_as(as = "DisplayFromStr")] - timestamp: u64, - prev_randao: B256, - suggested_fee_recipient: Address, - #[serde(skip_serializing_if = "Option::is_none")] - #[serde_as(as = "Option>")] - withdrawals: Option>, - #[serde(skip_serializing_if = "Option::is_none")] - parent_beacon_block_root: Option, -} - -/// Optimism Payload Attributes -#[serde_as] -#[derive(Serialize, Deserialize)] -struct BeaconOptimismPayloadAttributes { - #[serde(flatten)] - payload_attributes: BeaconPayloadAttributes, - #[serde(default, skip_serializing_if = "Option::is_none")] - transactions: Option>, - #[serde(default, skip_serializing_if = "Option::is_none")] - no_tx_pool: Option, - #[serde(default, skip_serializing_if = "Option::is_none")] - #[serde_as(as = "Option")] - gas_limit: Option, -} - -/// A helper module for serializing and deserializing optimism payload attributes for the beacon -/// API. -/// -/// See docs for [beacon_api_payload_attributes]. -pub mod beacon_api_payload_attributes_optimism { - use super::*; - use crate::engine::{OptimismPayloadAttributes, PayloadAttributes}; - - /// Serialize the payload attributes for the beacon API. - pub fn serialize( - payload_attributes: &OptimismPayloadAttributes, - serializer: S, - ) -> Result - where - S: Serializer, - { - let beacon_api_payload_attributes = BeaconPayloadAttributes { - timestamp: payload_attributes.payload_attributes.timestamp, - prev_randao: payload_attributes.payload_attributes.prev_randao, - suggested_fee_recipient: payload_attributes.payload_attributes.suggested_fee_recipient, - withdrawals: payload_attributes.payload_attributes.withdrawals.clone(), - parent_beacon_block_root: payload_attributes - .payload_attributes - .parent_beacon_block_root, - }; - - let op_beacon_api_payload_attributes = BeaconOptimismPayloadAttributes { - payload_attributes: beacon_api_payload_attributes, - transactions: payload_attributes.transactions.clone(), - no_tx_pool: payload_attributes.no_tx_pool, - gas_limit: payload_attributes.gas_limit, - }; - - op_beacon_api_payload_attributes.serialize(serializer) - } - - /// Deserialize the payload attributes for the beacon API. - pub fn deserialize<'de, D>(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - let beacon_api_payload_attributes = - BeaconOptimismPayloadAttributes::deserialize(deserializer)?; - Ok(OptimismPayloadAttributes { - payload_attributes: PayloadAttributes { - timestamp: beacon_api_payload_attributes.payload_attributes.timestamp, - prev_randao: beacon_api_payload_attributes.payload_attributes.prev_randao, - suggested_fee_recipient: beacon_api_payload_attributes - .payload_attributes - .suggested_fee_recipient, - withdrawals: beacon_api_payload_attributes.payload_attributes.withdrawals, - parent_beacon_block_root: beacon_api_payload_attributes - .payload_attributes - .parent_beacon_block_root, - }, - transactions: beacon_api_payload_attributes.transactions, - no_tx_pool: beacon_api_payload_attributes.no_tx_pool, - gas_limit: beacon_api_payload_attributes.gas_limit, - }) - } -} - -/// A helper module for serializing and deserializing the payload attributes for the beacon API. -/// -/// The beacon API encoded object has equivalent fields to the -/// [PayloadAttributes](crate::engine::PayloadAttributes) with two differences: -/// 1) `snake_case` identifiers must be used rather than `camelCase`; -/// 2) integers must be encoded as quoted decimals rather than big-endian hex. -pub mod beacon_api_payload_attributes { - use super::*; - use crate::engine::PayloadAttributes; - - /// Serialize the payload attributes for the beacon API. - pub fn serialize( - payload_attributes: &PayloadAttributes, - serializer: S, - ) -> Result - where - S: Serializer, - { - let beacon_api_payload_attributes = BeaconPayloadAttributes { - timestamp: payload_attributes.timestamp, - prev_randao: payload_attributes.prev_randao, - suggested_fee_recipient: payload_attributes.suggested_fee_recipient, - withdrawals: payload_attributes.withdrawals.clone(), - parent_beacon_block_root: payload_attributes.parent_beacon_block_root, - }; - beacon_api_payload_attributes.serialize(serializer) - } - - /// Deserialize the payload attributes for the beacon API. - pub fn deserialize<'de, D>(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - let beacon_api_payload_attributes = BeaconPayloadAttributes::deserialize(deserializer)?; - Ok(PayloadAttributes { - timestamp: beacon_api_payload_attributes.timestamp, - prev_randao: beacon_api_payload_attributes.prev_randao, - suggested_fee_recipient: beacon_api_payload_attributes.suggested_fee_recipient, - withdrawals: beacon_api_payload_attributes.withdrawals, - parent_beacon_block_root: beacon_api_payload_attributes.parent_beacon_block_root, - }) - } -} - -#[serde_as] -#[derive(Debug, Serialize, Deserialize)] -struct BeaconExecutionPayloadV1<'a> { - parent_hash: Cow<'a, B256>, - fee_recipient: Cow<'a, Address>, - state_root: Cow<'a, B256>, - receipts_root: Cow<'a, B256>, - logs_bloom: Cow<'a, Bloom>, - prev_randao: Cow<'a, B256>, - #[serde_as(as = "DisplayFromStr")] - block_number: u64, - #[serde_as(as = "DisplayFromStr")] - gas_limit: u64, - #[serde_as(as = "DisplayFromStr")] - gas_used: u64, - #[serde_as(as = "DisplayFromStr")] - timestamp: u64, - extra_data: Cow<'a, Bytes>, - #[serde_as(as = "DisplayFromStr")] - base_fee_per_gas: U256, - block_hash: Cow<'a, B256>, - transactions: Cow<'a, Vec>, -} - -impl<'a> From> for ExecutionPayloadV1 { - fn from(payload: BeaconExecutionPayloadV1<'a>) -> Self { - let BeaconExecutionPayloadV1 { - parent_hash, - fee_recipient, - state_root, - receipts_root, - logs_bloom, - prev_randao, - block_number, - gas_limit, - gas_used, - timestamp, - extra_data, - base_fee_per_gas, - block_hash, - transactions, - } = payload; - ExecutionPayloadV1 { - parent_hash: parent_hash.into_owned(), - fee_recipient: fee_recipient.into_owned(), - state_root: state_root.into_owned(), - receipts_root: receipts_root.into_owned(), - logs_bloom: logs_bloom.into_owned(), - prev_randao: prev_randao.into_owned(), - block_number, - gas_limit, - gas_used, - timestamp, - extra_data: extra_data.into_owned(), - base_fee_per_gas, - block_hash: block_hash.into_owned(), - transactions: transactions.into_owned(), - } - } -} - -impl<'a> From<&'a ExecutionPayloadV1> for BeaconExecutionPayloadV1<'a> { - fn from(value: &'a ExecutionPayloadV1) -> Self { - let ExecutionPayloadV1 { - parent_hash, - fee_recipient, - state_root, - receipts_root, - logs_bloom, - prev_randao, - block_number, - gas_limit, - gas_used, - timestamp, - extra_data, - base_fee_per_gas, - block_hash, - transactions, - } = value; - - BeaconExecutionPayloadV1 { - parent_hash: Cow::Borrowed(parent_hash), - fee_recipient: Cow::Borrowed(fee_recipient), - state_root: Cow::Borrowed(state_root), - receipts_root: Cow::Borrowed(receipts_root), - logs_bloom: Cow::Borrowed(logs_bloom), - prev_randao: Cow::Borrowed(prev_randao), - block_number: *block_number, - gas_limit: *gas_limit, - gas_used: *gas_used, - timestamp: *timestamp, - extra_data: Cow::Borrowed(extra_data), - base_fee_per_gas: *base_fee_per_gas, - block_hash: Cow::Borrowed(block_hash), - transactions: Cow::Borrowed(transactions), - } - } -} - -/// A helper serde module to convert from/to the Beacon API which uses quoted decimals rather than -/// big-endian hex. -pub mod beacon_payload_v1 { - use super::*; - - /// Serialize the payload attributes for the beacon API. - pub fn serialize( - payload_attributes: &ExecutionPayloadV1, - serializer: S, - ) -> Result - where - S: Serializer, - { - BeaconExecutionPayloadV1::from(payload_attributes).serialize(serializer) - } - - /// Deserialize the payload attributes for the beacon API. - pub fn deserialize<'de, D>(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - BeaconExecutionPayloadV1::deserialize(deserializer).map(Into::into) - } -} - -#[serde_as] -#[derive(Debug, Serialize, Deserialize)] -struct BeaconExecutionPayloadV2<'a> { - /// Inner V1 payload - #[serde(flatten)] - payload_inner: BeaconExecutionPayloadV1<'a>, - /// Array of [`Withdrawal`] enabled with V2 - /// See - #[serde_as(as = "Vec")] - withdrawals: Vec, -} - -impl<'a> From> for ExecutionPayloadV2 { - fn from(payload: BeaconExecutionPayloadV2<'a>) -> Self { - let BeaconExecutionPayloadV2 { payload_inner, withdrawals } = payload; - ExecutionPayloadV2 { payload_inner: payload_inner.into(), withdrawals } - } -} - -impl<'a> From<&'a ExecutionPayloadV2> for BeaconExecutionPayloadV2<'a> { - fn from(value: &'a ExecutionPayloadV2) -> Self { - let ExecutionPayloadV2 { payload_inner, withdrawals } = value; - BeaconExecutionPayloadV2 { - payload_inner: payload_inner.into(), - withdrawals: withdrawals.clone(), - } - } -} - -/// A helper serde module to convert from/to the Beacon API which uses quoted decimals rather than -/// big-endian hex. -pub mod beacon_payload_v2 { - use super::*; - - /// Serialize the payload attributes for the beacon API. - pub fn serialize( - payload_attributes: &ExecutionPayloadV2, - serializer: S, - ) -> Result - where - S: Serializer, - { - BeaconExecutionPayloadV2::from(payload_attributes).serialize(serializer) - } - - /// Deserialize the payload attributes for the beacon API. - pub fn deserialize<'de, D>(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - BeaconExecutionPayloadV2::deserialize(deserializer).map(Into::into) - } -} - -#[serde_as] -#[derive(Debug, Serialize, Deserialize)] -struct BeaconExecutionPayloadV3<'a> { - /// Inner V1 payload - #[serde(flatten)] - payload_inner: BeaconExecutionPayloadV2<'a>, - #[serde_as(as = "DisplayFromStr")] - blob_gas_used: u64, - #[serde_as(as = "DisplayFromStr")] - excess_blob_gas: u64, -} - -impl<'a> From> for ExecutionPayloadV3 { - fn from(payload: BeaconExecutionPayloadV3<'a>) -> Self { - let BeaconExecutionPayloadV3 { payload_inner, blob_gas_used, excess_blob_gas } = payload; - ExecutionPayloadV3 { payload_inner: payload_inner.into(), blob_gas_used, excess_blob_gas } - } -} - -impl<'a> From<&'a ExecutionPayloadV3> for BeaconExecutionPayloadV3<'a> { - fn from(value: &'a ExecutionPayloadV3) -> Self { - let ExecutionPayloadV3 { payload_inner, blob_gas_used, excess_blob_gas } = value; - BeaconExecutionPayloadV3 { - payload_inner: payload_inner.into(), - blob_gas_used: *blob_gas_used, - excess_blob_gas: *excess_blob_gas, - } - } -} - -/// A helper serde module to convert from/to the Beacon API which uses quoted decimals rather than -/// big-endian hex. -pub mod beacon_payload_v3 { - use super::*; - - /// Serialize the payload attributes for the beacon API. - pub fn serialize( - payload_attributes: &ExecutionPayloadV3, - serializer: S, - ) -> Result - where - S: Serializer, - { - BeaconExecutionPayloadV3::from(payload_attributes).serialize(serializer) - } - - /// Deserialize the payload attributes for the beacon API. - pub fn deserialize<'de, D>(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - BeaconExecutionPayloadV3::deserialize(deserializer).map(Into::into) - } -} - -/// Represents all possible payload versions. -#[derive(Debug, Serialize)] -#[serde(untagged)] -enum BeaconExecutionPayload<'a> { - /// V1 payload - V1(BeaconExecutionPayloadV1<'a>), - /// V2 payload - V2(BeaconExecutionPayloadV2<'a>), - /// V3 payload - V3(BeaconExecutionPayloadV3<'a>), -} - -// Deserializes untagged ExecutionPayload by trying each variant in falling order -impl<'de> Deserialize<'de> for BeaconExecutionPayload<'de> { - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - #[derive(Deserialize)] - #[serde(untagged)] - enum BeaconExecutionPayloadDesc<'a> { - V3(BeaconExecutionPayloadV3<'a>), - V2(BeaconExecutionPayloadV2<'a>), - V1(BeaconExecutionPayloadV1<'a>), - } - match BeaconExecutionPayloadDesc::deserialize(deserializer)? { - BeaconExecutionPayloadDesc::V3(payload) => Ok(Self::V3(payload)), - BeaconExecutionPayloadDesc::V2(payload) => Ok(Self::V2(payload)), - BeaconExecutionPayloadDesc::V1(payload) => Ok(Self::V1(payload)), - } - } -} - -impl<'a> From> for ExecutionPayload { - fn from(payload: BeaconExecutionPayload<'a>) -> Self { - match payload { - BeaconExecutionPayload::V1(payload) => { - ExecutionPayload::V1(ExecutionPayloadV1::from(payload)) - } - BeaconExecutionPayload::V2(payload) => { - ExecutionPayload::V2(ExecutionPayloadV2::from(payload)) - } - BeaconExecutionPayload::V3(payload) => { - ExecutionPayload::V3(ExecutionPayloadV3::from(payload)) - } - } - } -} - -impl<'a> From<&'a ExecutionPayload> for BeaconExecutionPayload<'a> { - fn from(value: &'a ExecutionPayload) -> Self { - match value { - ExecutionPayload::V1(payload) => { - BeaconExecutionPayload::V1(BeaconExecutionPayloadV1::from(payload)) - } - ExecutionPayload::V2(payload) => { - BeaconExecutionPayload::V2(BeaconExecutionPayloadV2::from(payload)) - } - ExecutionPayload::V3(payload) => { - BeaconExecutionPayload::V3(BeaconExecutionPayloadV3::from(payload)) - } - } - } -} - -impl<'a> SerializeAs for BeaconExecutionPayload<'a> { - fn serialize_as(source: &ExecutionPayload, serializer: S) -> Result - where - S: Serializer, - { - beacon_payload::serialize(source, serializer) - } -} - -impl<'de> DeserializeAs<'de, ExecutionPayload> for BeaconExecutionPayload<'de> { - fn deserialize_as(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - beacon_payload::deserialize(deserializer) - } -} - -pub mod beacon_payload { - use super::*; - - /// Serialize the payload attributes for the beacon API. - pub fn serialize( - payload_attributes: &ExecutionPayload, - serializer: S, - ) -> Result - where - S: Serializer, - { - BeaconExecutionPayload::from(payload_attributes).serialize(serializer) - } - - /// Deserialize the payload attributes for the beacon API. - pub fn deserialize<'de, D>(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - BeaconExecutionPayload::deserialize(deserializer).map(Into::into) - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn serde_get_payload_header_response() { - let s = r#"{"version":"bellatrix","data":{"message":{"header":{"parent_hash":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","fee_recipient":"0xabcf8e0d4e9587369b2301d0790347320302cc09","state_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","receipts_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","logs_bloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","prev_randao":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","block_number":"1","gas_limit":"1","gas_used":"1","timestamp":"1","extra_data":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","base_fee_per_gas":"1","block_hash":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","transactions_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"},"value":"1","pubkey":"0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"},"signature":"0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"}}"#; - let resp: GetExecutionPayloadHeaderResponse = serde_json::from_str(s).unwrap(); - let json: serde_json::Value = serde_json::from_str(s).unwrap(); - assert_eq!(json, serde_json::to_value(resp).unwrap()); - } - - #[test] - fn serde_payload_header() { - let s = r#"{"parent_hash":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","fee_recipient":"0xabcf8e0d4e9587369b2301d0790347320302cc09","state_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","receipts_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","logs_bloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","prev_randao":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","block_number":"1","gas_limit":"1","gas_used":"1","timestamp":"1","extra_data":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","base_fee_per_gas":"1","block_hash":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","transactions_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"}"#; - let header: ExecutionPayloadHeader = serde_json::from_str(s).unwrap(); - let json: serde_json::Value = serde_json::from_str(s).unwrap(); - assert_eq!(json, serde_json::to_value(header).unwrap()); - } -} diff --git a/crates/rpc/rpc-types/src/beacon/withdrawals.rs b/crates/rpc/rpc-types/src/beacon/withdrawals.rs deleted file mode 100644 index ea2930c5f..000000000 --- a/crates/rpc/rpc-types/src/beacon/withdrawals.rs +++ /dev/null @@ -1,70 +0,0 @@ -use crate::Withdrawal; -use alloy_primitives::Address; -use serde::{Deserialize, Deserializer, Serialize, Serializer}; -use serde_with::{serde_as, DeserializeAs, DisplayFromStr, SerializeAs}; - -/// Same as [Withdrawal] but respects the Beacon API format which uses snake-case and quoted -/// decimals. -#[serde_as] -#[derive(Serialize, Deserialize, Clone)] -pub(crate) struct BeaconWithdrawal { - #[serde_as(as = "DisplayFromStr")] - index: u64, - #[serde_as(as = "DisplayFromStr")] - validator_index: u64, - address: Address, - #[serde_as(as = "DisplayFromStr")] - amount: u64, -} - -impl SerializeAs for BeaconWithdrawal { - fn serialize_as(source: &Withdrawal, serializer: S) -> Result - where - S: Serializer, - { - beacon_withdrawals::serialize(source, serializer) - } -} - -impl<'de> DeserializeAs<'de, Withdrawal> for BeaconWithdrawal { - fn deserialize_as(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - beacon_withdrawals::deserialize(deserializer) - } -} - -/// A helper serde module to convert from/to the Beacon API which uses quoted decimals rather than -/// big-endian hex. -pub mod beacon_withdrawals { - use super::*; - - /// Serialize the payload attributes for the beacon API. - pub fn serialize(payload_attributes: &Withdrawal, serializer: S) -> Result - where - S: Serializer, - { - let withdrawal = BeaconWithdrawal { - index: payload_attributes.index, - validator_index: payload_attributes.validator_index, - address: payload_attributes.address, - amount: payload_attributes.amount, - }; - withdrawal.serialize(serializer) - } - - /// Deserialize the payload attributes for the beacon API. - pub fn deserialize<'de, D>(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - let withdrawal = BeaconWithdrawal::deserialize(deserializer)?; - Ok(Withdrawal { - index: withdrawal.index, - validator_index: withdrawal.validator_index, - address: withdrawal.address, - amount: withdrawal.amount, - }) - } -} diff --git a/crates/rpc/rpc-types/src/eth/error.rs b/crates/rpc/rpc-types/src/eth/error.rs new file mode 100644 index 000000000..e8d55b087 --- /dev/null +++ b/crates/rpc/rpc-types/src/eth/error.rs @@ -0,0 +1,9 @@ +//! Implementation specific Errors for the `eth_` namespace. + +use jsonrpsee_types::ErrorObject; + +/// A tait to convert an error to an RPC error. +pub trait ToRpcError: std::error::Error + Send + Sync + 'static { + /// Converts the error to a JSON-RPC error object. + fn to_rpc_error(&self) -> ErrorObject<'static>; +} diff --git a/crates/rpc/rpc-types/src/eth/mod.rs b/crates/rpc/rpc-types/src/eth/mod.rs index dd36e7fd5..6313dbeed 100644 --- a/crates/rpc/rpc-types/src/eth/mod.rs +++ b/crates/rpc/rpc-types/src/eth/mod.rs @@ -1,5 +1,6 @@ //! Ethereum related types +pub(crate) mod error; pub mod transaction; // re-export diff --git a/crates/rpc/rpc-types/src/eth/transaction/typed.rs b/crates/rpc/rpc-types/src/eth/transaction/typed.rs index bf995c353..6526bc2b6 100644 --- a/crates/rpc/rpc-types/src/eth/transaction/typed.rs +++ b/crates/rpc/rpc-types/src/eth/transaction/typed.rs @@ -2,10 +2,8 @@ //! transaction deserialized from the json input of an RPC call. Depending on what fields are set, //! it can be converted into the container type [`TypedTransactionRequest`]. -use alloy_primitives::{Address, Bytes, B256, U256}; -use alloy_rlp::{Buf, BufMut, Decodable, Encodable, Error as RlpError, EMPTY_STRING_CODE}; +use alloy_primitives::{Bytes, TxKind, B256, U256}; use alloy_rpc_types::{AccessList, BlobTransactionSidecar}; -use serde::{Deserialize, Serialize}; /// Container type for various Ethereum transaction requests /// @@ -36,7 +34,7 @@ pub struct LegacyTransactionRequest { /// The gas limit for the transaction pub gas_limit: U256, /// The kind of transaction (e.g., Call, Create) - pub kind: TransactionKind, + pub kind: TxKind, /// The value of the transaction pub value: U256, /// The input data for the transaction @@ -57,7 +55,7 @@ pub struct EIP2930TransactionRequest { /// The gas limit for the transaction pub gas_limit: U256, /// The kind of transaction (e.g., Call, Create) - pub kind: TransactionKind, + pub kind: TxKind, /// The value of the transaction pub value: U256, /// The input data for the transaction @@ -80,7 +78,7 @@ pub struct EIP1559TransactionRequest { /// The gas limit for the transaction pub gas_limit: U256, /// The kind of transaction (e.g., Call, Create) - pub kind: TransactionKind, + pub kind: TxKind, /// The value of the transaction pub value: U256, /// The input data for the transaction @@ -103,7 +101,7 @@ pub struct EIP4844TransactionRequest { /// The gas limit for the transaction pub gas_limit: U256, /// The kind of transaction (e.g., Call, Create) - pub kind: TransactionKind, + pub kind: TxKind, /// The value of the transaction pub value: U256, /// The input data for the transaction @@ -117,81 +115,3 @@ pub struct EIP4844TransactionRequest { /// Sidecar information for the transaction pub sidecar: BlobTransactionSidecar, } - -/// Represents the `to` field of a transaction request -/// -/// This determines what kind of transaction this is -#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)] -pub enum TransactionKind { - /// Transaction will call this address or transfer funds to this address - Call(Address), - /// No `to` field set, this transaction will create a contract - Create, -} - -// == impl TransactionKind == - -impl TransactionKind { - /// If this transaction is a call this returns the address of the callee - pub fn as_call(&self) -> Option<&Address> { - match self { - TransactionKind::Call(to) => Some(to), - TransactionKind::Create => None, - } - } -} - -impl Encodable for TransactionKind { - /// This encodes the `to` field of a transaction request. - /// If the [TransactionKind] is a [TransactionKind::Call] it will encode the inner address: - /// `rlp(address)` - /// - /// If the [TransactionKind] is a [TransactionKind::Create] it will encode an empty list: - /// `rlp([])`, which is also - fn encode(&self, out: &mut dyn BufMut) { - match self { - TransactionKind::Call(to) => to.encode(out), - TransactionKind::Create => [].encode(out), - } - } - fn length(&self) -> usize { - match self { - TransactionKind::Call(to) => to.length(), - TransactionKind::Create => [].length(), - } - } -} - -impl Decodable for TransactionKind { - fn decode(buf: &mut &[u8]) -> alloy_rlp::Result { - if let Some(&first) = buf.first() { - if first == EMPTY_STRING_CODE { - buf.advance(1); - Ok(TransactionKind::Create) - } else { - let addr =
::decode(buf)?; - Ok(TransactionKind::Call(addr)) - } - } else { - Err(RlpError::InputTooShort) - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn raw_kind_encoding_sanity() { - // check the 0x80 encoding for Create - let mut buf = Vec::new(); - TransactionKind::Create.encode(&mut buf); - assert_eq!(buf, vec![0x80]); - - // check decoding - let buf = [0x80]; - let decoded = TransactionKind::decode(&mut &buf[..]).unwrap(); - assert_eq!(decoded, TransactionKind::Create); - } -} diff --git a/crates/rpc/rpc-types/src/lib.rs b/crates/rpc/rpc-types/src/lib.rs index 964144ed6..5966a9b72 100644 --- a/crates/rpc/rpc-types/src/lib.rs +++ b/crates/rpc/rpc-types/src/lib.rs @@ -8,8 +8,8 @@ issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" )] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] -pub mod beacon; mod eth; mod mev; mod net; @@ -37,7 +37,8 @@ pub use eth::{ engine::{ ExecutionPayload, ExecutionPayloadV1, ExecutionPayloadV2, ExecutionPayloadV3, PayloadError, }, - transaction::{self, TransactionKind, TransactionRequest, TypedTransactionRequest}, + error::ToRpcError, + transaction::{self, TransactionRequest, TypedTransactionRequest}, }; pub use mev::*; diff --git a/crates/rpc/rpc-types/src/mev.rs b/crates/rpc/rpc-types/src/mev.rs index 2137e1ecf..9126c0963 100644 --- a/crates/rpc/rpc-types/src/mev.rs +++ b/crates/rpc/rpc-types/src/mev.rs @@ -1,12 +1,11 @@ //! MEV bundle type bindings use crate::{BlockId, BlockNumberOrTag, Log}; -use alloy_primitives::{Address, Bytes, TxHash, B256, U256, U64}; +use alloy_primitives::{Address, Bytes, TxHash, B256, U256}; use serde::{ ser::{SerializeSeq, Serializer}, Deserialize, Deserializer, Serialize, }; - /// A bundle of transactions to send to the matchmaker. /// /// Note: this is for `mev_sendBundle` and not `eth_sendBundle`. @@ -35,28 +34,33 @@ pub struct SendBundleRequest { #[serde(rename_all = "camelCase")] pub struct Inclusion { /// The first block the bundle is valid for. - pub block: U64, + #[serde(with = "alloy_rpc_types::serde_helpers::num::u64_via_ruint")] + pub block: u64, /// The last block the bundle is valid for. - #[serde(skip_serializing_if = "Option::is_none")] - pub max_block: Option, + #[serde( + default, + with = "alloy_rpc_types::serde_helpers::num::u64_opt_via_ruint", + skip_serializing_if = "Option::is_none" + )] + pub max_block: Option, } impl Inclusion { /// Creates a new inclusion with the given min block.. pub fn at_block(block: u64) -> Self { - Self { block: U64::from(block), max_block: None } + Self { block, max_block: None } } /// Returns the block number of the first block the bundle is valid for. #[inline] pub fn block_number(&self) -> u64 { - self.block.to() + self.block } /// Returns the block number of the last block the bundle is valid for. #[inline] pub fn max_block_number(&self) -> Option { - self.max_block.as_ref().map(|b| b.to()) + self.max_block.as_ref().map(|b| *b) } } @@ -100,8 +104,10 @@ pub struct Validity { #[serde(rename_all = "camelCase")] pub struct Refund { /// The index of the transaction in the bundle. + #[serde(with = "alloy_rpc_types::serde_helpers::num::u64_via_ruint")] pub body_idx: u64, /// The minimum percent of the bundle's earnings to redistribute. + #[serde(with = "alloy_rpc_types::serde_helpers::num::u64_via_ruint")] pub percent: u64, } @@ -113,6 +119,7 @@ pub struct RefundConfig { /// The address to refund. pub address: Address, /// The minimum percent of the bundle's earnings to redistribute. + #[serde(with = "alloy_rpc_types::serde_helpers::num::u64_via_ruint")] pub percent: u64, } @@ -312,26 +319,42 @@ pub struct SimBundleOverrides { /// Block used for simulation state. Defaults to latest block. /// Block header data will be derived from parent block by default. /// Specify other params to override the default values. - #[serde(skip_serializing_if = "Option::is_none")] + #[serde(default, skip_serializing_if = "Option::is_none")] pub parent_block: Option, /// Block number used for simulation, defaults to parentBlock.number + 1 - #[serde(skip_serializing_if = "Option::is_none")] - pub block_number: Option, + #[serde(default, with = "alloy_rpc_types::serde_helpers::num::u64_opt_via_ruint")] + pub block_number: Option, /// Coinbase used for simulation, defaults to parentBlock.coinbase - #[serde(skip_serializing_if = "Option::is_none")] + #[serde(default, skip_serializing_if = "Option::is_none")] pub coinbase: Option
, /// Timestamp used for simulation, defaults to parentBlock.timestamp + 12 - #[serde(skip_serializing_if = "Option::is_none")] - pub timestamp: Option, + #[serde( + default, + with = "alloy_rpc_types::serde_helpers::num::u64_opt_via_ruint", + skip_serializing_if = "Option::is_none" + )] + pub timestamp: Option, /// Gas limit used for simulation, defaults to parentBlock.gasLimit - #[serde(skip_serializing_if = "Option::is_none")] - pub gas_limit: Option, + #[serde( + default, + with = "alloy_rpc_types::serde_helpers::num::u64_opt_via_ruint", + skip_serializing_if = "Option::is_none" + )] + pub gas_limit: Option, /// Base fee used for simulation, defaults to parentBlock.baseFeePerGas - #[serde(skip_serializing_if = "Option::is_none")] - pub base_fee: Option, + #[serde( + default, + with = "alloy_rpc_types::serde_helpers::num::u64_opt_via_ruint", + skip_serializing_if = "Option::is_none" + )] + pub base_fee: Option, /// Timeout in seconds, defaults to 5 - #[serde(skip_serializing_if = "Option::is_none")] - pub timeout: Option, + #[serde( + default, + with = "alloy_rpc_types::serde_helpers::num::u64_opt_via_ruint", + skip_serializing_if = "Option::is_none" + )] + pub timeout: Option, } /// Response from the matchmaker after sending a simulation request. @@ -341,20 +364,25 @@ pub struct SimBundleResponse { /// Whether the simulation was successful. pub success: bool, /// Error message if the simulation failed. - #[serde(skip_serializing_if = "Option::is_none")] + #[serde(default, skip_serializing_if = "Option::is_none")] pub error: Option, /// The block number of the simulated block. - pub state_block: U64, + #[serde(with = "alloy_rpc_types::serde_helpers::num::u64_via_ruint")] + pub state_block: u64, /// The gas price of the simulated block. - pub mev_gas_price: U64, + #[serde(with = "alloy_rpc_types::serde_helpers::num::u64_via_ruint")] + pub mev_gas_price: u64, /// The profit of the simulated block. - pub profit: U64, + #[serde(with = "alloy_rpc_types::serde_helpers::num::u64_via_ruint")] + pub profit: u64, /// The refundable value of the simulated block. - pub refundable_value: U64, + #[serde(with = "alloy_rpc_types::serde_helpers::num::u64_via_ruint")] + pub refundable_value: u64, /// The gas used by the simulated block. - pub gas_used: U64, + #[serde(with = "alloy_rpc_types::serde_helpers::num::u64_via_ruint")] + pub gas_used: u64, /// Logs returned by mev_simBundle. - #[serde(skip_serializing_if = "Option::is_none")] + #[serde(default, skip_serializing_if = "Option::is_none")] pub logs: Option>, } @@ -363,18 +391,18 @@ pub struct SimBundleResponse { #[serde(rename_all = "camelCase")] pub struct SimBundleLogs { /// Logs for transactions in bundle. - #[serde(skip_serializing_if = "Option::is_none")] + #[serde(default, skip_serializing_if = "Option::is_none")] pub tx_logs: Option>, /// Logs for bundles in bundle. - #[serde(skip_serializing_if = "Option::is_none")] + #[serde(default, skip_serializing_if = "Option::is_none")] pub bundle_logs: Option>, } impl SendBundleRequest { /// Create a new bundle request. pub fn new( - block_num: U64, - max_block: Option, + block_num: u64, + max_block: Option, protocol_version: ProtocolVersion, bundle_body: Vec, ) -> Self { @@ -404,8 +432,12 @@ pub struct PrivateTransactionRequest { pub tx: Bytes, /// Hex-encoded number string, optional. Highest block number in which the transaction should /// be included. - #[serde(skip_serializing_if = "Option::is_none")] - pub max_block_number: Option, + #[serde( + default, + with = "alloy_rpc_types::serde_helpers::num::u64_opt_via_ruint", + skip_serializing_if = "Option::is_none" + )] + pub max_block_number: Option, /// Preferences for private transaction. #[serde(default, skip_serializing_if = "PrivateTransactionPreferences::is_empty")] pub preferences: PrivateTransactionPreferences, @@ -415,10 +447,10 @@ pub struct PrivateTransactionRequest { #[derive(Serialize, Deserialize, Default, Debug, Clone, PartialEq, Eq)] pub struct PrivateTransactionPreferences { /// Requirements for the bundle to be included in the block. - #[serde(skip_serializing_if = "Option::is_none")] + #[serde(default, skip_serializing_if = "Option::is_none")] pub validity: Option, /// Preferences on what data should be shared about the bundle and its transactions - #[serde(skip_serializing_if = "Option::is_none")] + #[serde(default, skip_serializing_if = "Option::is_none")] pub privacy: Option, } @@ -593,18 +625,27 @@ pub struct EthSendBundle { /// A list of hex-encoded signed transactions pub txs: Vec, /// hex-encoded block number for which this bundle is valid - pub block_number: U64, + #[serde(with = "alloy_rpc_types::serde_helpers::num::u64_via_ruint")] + pub block_number: u64, /// unix timestamp when this bundle becomes active - #[serde(skip_serializing_if = "Option::is_none")] + #[serde( + default, + with = "alloy_rpc_types::serde_helpers::num::u64_opt_via_ruint", + skip_serializing_if = "Option::is_none" + )] pub min_timestamp: Option, /// unix timestamp how long this bundle stays valid - #[serde(skip_serializing_if = "Option::is_none")] + #[serde( + default, + with = "alloy_rpc_types::serde_helpers::num::u64_opt_via_ruint", + skip_serializing_if = "Option::is_none" + )] pub max_timestamp: Option, /// list of hashes of possibly reverting txs #[serde(default, skip_serializing_if = "Vec::is_empty")] pub reverting_tx_hashes: Vec, /// UUID that can be used to cancel/replace this bundle - #[serde(rename = "replacementUuid", skip_serializing_if = "Option::is_none")] + #[serde(default, rename = "replacementUuid", skip_serializing_if = "Option::is_none")] pub replacement_uuid: Option, } @@ -625,11 +666,16 @@ pub struct EthCallBundle { /// A list of hex-encoded signed transactions pub txs: Vec, /// hex encoded block number for which this bundle is valid on - pub block_number: U64, + #[serde(with = "alloy_rpc_types::serde_helpers::num::u64_via_ruint")] + pub block_number: u64, /// Either a hex encoded number or a block tag for which state to base this simulation on pub state_block_number: BlockNumberOrTag, /// the timestamp to use for this bundle simulation, in seconds since the unix epoch - #[serde(skip_serializing_if = "Option::is_none")] + #[serde( + default, + with = "alloy_rpc_types::serde_helpers::num::u64_opt_via_ruint", + skip_serializing_if = "Option::is_none" + )] pub timestamp: Option, } @@ -654,8 +700,10 @@ pub struct EthCallBundleResponse { /// Results of individual transactions within the bundle pub results: Vec, /// The block number used as a base for this simulation + #[serde(with = "alloy_rpc_types::serde_helpers::num::u64_via_ruint")] pub state_block_number: u64, /// The total gas used by all transactions in the bundle + #[serde(with = "alloy_rpc_types::serde_helpers::num::u64_via_ruint")] pub total_gas_used: u64, } @@ -678,6 +726,7 @@ pub struct EthCallBundleTransactionResult { #[serde(with = "u256_numeric_string")] pub gas_price: U256, /// The amount of gas used by the transaction + #[serde(with = "alloy_rpc_types::serde_helpers::num::u64_via_ruint")] pub gas_used: u64, /// The address to which the transaction is sent (optional) pub to_address: Option
, @@ -706,7 +755,7 @@ mod u256_numeric_string { match val { serde_json::Value::String(s) => { if let Ok(val) = s.parse::() { - return Ok(U256::from(val)) + return Ok(U256::from(val)); } U256::from_str(&s).map_err(de::Error::custom) } @@ -827,7 +876,7 @@ mod tests { let bundle = SendBundleRequest { protocol_version: ProtocolVersion::V0_1, - inclusion: Inclusion { block: U64::from(1), max_block: None }, + inclusion: Inclusion { block: 1, max_block: None }, bundle_body, validity, privacy, diff --git a/crates/rpc/rpc-types/src/net.rs b/crates/rpc/rpc-types/src/net.rs index d72d00fa5..b434bcbf8 100644 --- a/crates/rpc/rpc-types/src/net.rs +++ b/crates/rpc/rpc-types/src/net.rs @@ -1,19 +1,5 @@ -use crate::{pk_to_id, PeerId}; -use alloy_rlp::{RlpDecodable, RlpEncodable}; use alloy_rpc_types::admin::EthProtocolInfo; -use enr::Enr; -use secp256k1::{SecretKey, SECP256K1}; use serde::{Deserialize, Serialize}; -use serde_with::{DeserializeFromStr, SerializeDisplay}; -use std::{ - fmt, - fmt::Write, - net::{IpAddr, Ipv4Addr, SocketAddr}, - num::ParseIntError, - str::FromStr, -}; -use thiserror::Error; -use url::{Host, Url}; /// The status of the network being ran by the local node. #[derive(Clone, Debug, Serialize, Deserialize)] @@ -25,346 +11,3 @@ pub struct NetworkStatus { /// Information about the Ethereum Wire Protocol. pub eth_protocol_info: EthProtocolInfo, } - -/// Represents a ENR in discovery. -/// -/// Note: this is only an excerpt of the [`NodeRecord`] data structure. -#[derive( - Clone, - Copy, - Debug, - Eq, - PartialEq, - Hash, - SerializeDisplay, - DeserializeFromStr, - RlpEncodable, - RlpDecodable, -)] -pub struct NodeRecord { - /// The Address of a node. - pub address: IpAddr, - /// TCP port of the port that accepts connections. - pub tcp_port: u16, - /// UDP discovery port. - pub udp_port: u16, - /// Public key of the discovery service - pub id: PeerId, -} - -impl NodeRecord { - /// Derive the [`NodeRecord`] from the secret key and addr - pub fn from_secret_key(addr: SocketAddr, sk: &SecretKey) -> Self { - let pk = secp256k1::PublicKey::from_secret_key(SECP256K1, sk); - let id = PeerId::from_slice(&pk.serialize_uncompressed()[1..]); - Self::new(addr, id) - } - - /// Converts the `address` into an [`Ipv4Addr`] if the `address` is a mapped - /// [Ipv6Addr](std::net::Ipv6Addr). - /// - /// Returns `true` if the address was converted. - /// - /// See also [std::net::Ipv6Addr::to_ipv4_mapped] - pub fn convert_ipv4_mapped(&mut self) -> bool { - // convert IPv4 mapped IPv6 address - if let IpAddr::V6(v6) = self.address { - if let Some(v4) = v6.to_ipv4_mapped() { - self.address = v4.into(); - return true - } - } - false - } - - /// Same as [Self::convert_ipv4_mapped] but consumes the type - pub fn into_ipv4_mapped(mut self) -> Self { - self.convert_ipv4_mapped(); - self - } - - /// Creates a new record from a socket addr and peer id. - #[allow(dead_code)] - pub fn new(addr: SocketAddr, id: PeerId) -> Self { - Self { address: addr.ip(), tcp_port: addr.port(), udp_port: addr.port(), id } - } - - /// The TCP socket address of this node - #[must_use] - pub fn tcp_addr(&self) -> SocketAddr { - SocketAddr::new(self.address, self.tcp_port) - } - - /// The UDP socket address of this node - #[must_use] - pub fn udp_addr(&self) -> SocketAddr { - SocketAddr::new(self.address, self.udp_port) - } -} - -impl fmt::Display for NodeRecord { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.write_str("enode://")?; - alloy_primitives::hex::encode(self.id.as_slice()).fmt(f)?; - f.write_char('@')?; - match self.address { - IpAddr::V4(ip) => { - ip.fmt(f)?; - } - IpAddr::V6(ip) => { - // encapsulate with brackets - f.write_char('[')?; - ip.fmt(f)?; - f.write_char(']')?; - } - } - f.write_char(':')?; - self.tcp_port.fmt(f)?; - if self.tcp_port != self.udp_port { - f.write_str("?discport=")?; - self.udp_port.fmt(f)?; - } - - Ok(()) - } -} - -/// Possible error types when parsing a [`NodeRecord`] -#[derive(Debug, Error)] -pub enum NodeRecordParseError { - /// Invalid url - #[error("Failed to parse url: {0}")] - InvalidUrl(String), - /// Invalid id - #[error("Failed to parse id")] - InvalidId(String), - /// Invalid discport - #[error("Failed to discport query: {0}")] - Discport(ParseIntError), -} - -impl FromStr for NodeRecord { - type Err = NodeRecordParseError; - - fn from_str(s: &str) -> Result { - let url = Url::parse(s).map_err(|e| NodeRecordParseError::InvalidUrl(e.to_string()))?; - - let address = match url.host() { - Some(Host::Ipv4(ip)) => IpAddr::V4(ip), - Some(Host::Ipv6(ip)) => IpAddr::V6(ip), - Some(Host::Domain(ip)) => IpAddr::V4( - Ipv4Addr::from_str(ip) - .map_err(|e| NodeRecordParseError::InvalidUrl(e.to_string()))?, - ), - _ => return Err(NodeRecordParseError::InvalidUrl(format!("invalid host: {url:?}"))), - }; - let port = url - .port() - .ok_or_else(|| NodeRecordParseError::InvalidUrl("no port specified".to_string()))?; - - let udp_port = if let Some(discovery_port) = url - .query_pairs() - .find_map(|(maybe_disc, port)| (maybe_disc.as_ref() == "discport").then_some(port)) - { - discovery_port.parse::().map_err(NodeRecordParseError::Discport)? - } else { - port - }; - - let id = url - .username() - .parse::() - .map_err(|e| NodeRecordParseError::InvalidId(e.to_string()))?; - - Ok(Self { address, id, tcp_port: port, udp_port }) - } -} - -impl TryFrom<&Enr> for NodeRecord { - type Error = NodeRecordParseError; - - fn try_from(enr: &Enr) -> Result { - let Some(address) = enr.ip4().map(IpAddr::from).or_else(|| enr.ip6().map(IpAddr::from)) - else { - return Err(NodeRecordParseError::InvalidUrl("ip missing".to_string())) - }; - - let Some(udp_port) = enr.udp4().or_else(|| enr.udp6()) else { - return Err(NodeRecordParseError::InvalidUrl("udp port missing".to_string())) - }; - - let Some(tcp_port) = enr.tcp4().or_else(|| enr.tcp6()) else { - return Err(NodeRecordParseError::InvalidUrl("tcp port missing".to_string())) - }; - - let id = pk_to_id(&enr.public_key()); - - Ok(NodeRecord { address, tcp_port, udp_port, id }.into_ipv4_mapped()) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use alloy_rlp::Decodable; - use rand::{thread_rng, Rng, RngCore}; - use std::net::Ipv6Addr; - - #[test] - fn test_mapped_ipv6() { - let mut rng = thread_rng(); - - let v4: Ipv4Addr = "0.0.0.0".parse().unwrap(); - let v6 = v4.to_ipv6_mapped(); - - let record = NodeRecord { - address: v6.into(), - tcp_port: rng.gen(), - udp_port: rng.gen(), - id: rng.gen(), - }; - - assert!(record.clone().convert_ipv4_mapped()); - assert_eq!(record.into_ipv4_mapped().address, IpAddr::from(v4)); - } - - #[test] - fn test_mapped_ipv4() { - let mut rng = thread_rng(); - let v4: Ipv4Addr = "0.0.0.0".parse().unwrap(); - - let record = NodeRecord { - address: v4.into(), - tcp_port: rng.gen(), - udp_port: rng.gen(), - id: rng.gen(), - }; - - assert!(!record.clone().convert_ipv4_mapped()); - assert_eq!(record.into_ipv4_mapped().address, IpAddr::from(v4)); - } - - #[test] - fn test_noderecord_codec_ipv4() { - let mut rng = thread_rng(); - for _ in 0..100 { - let mut ip = [0u8; 4]; - rng.fill_bytes(&mut ip); - let record = NodeRecord { - address: IpAddr::V4(ip.into()), - tcp_port: rng.gen(), - udp_port: rng.gen(), - id: rng.gen(), - }; - - let decoded = NodeRecord::decode(&mut alloy_rlp::encode(record).as_slice()).unwrap(); - assert_eq!(record, decoded); - } - } - - #[test] - fn test_noderecord_codec_ipv6() { - let mut rng = thread_rng(); - for _ in 0..100 { - let mut ip = [0u8; 16]; - rng.fill_bytes(&mut ip); - let record = NodeRecord { - address: IpAddr::V6(ip.into()), - tcp_port: rng.gen(), - udp_port: rng.gen(), - id: rng.gen(), - }; - - let decoded = NodeRecord::decode(&mut alloy_rlp::encode(record).as_slice()).unwrap(); - assert_eq!(record, decoded); - } - } - - #[test] - fn test_url_parse() { - let url = "enode://6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0@10.3.58.6:30303?discport=30301"; - let node: NodeRecord = url.parse().unwrap(); - assert_eq!(node, NodeRecord { - address: IpAddr::V4([10,3,58,6].into()), - tcp_port: 30303, - udp_port: 30301, - id: "6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0".parse().unwrap(), - }) - } - - #[test] - fn test_node_display() { - let url = "enode://6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0@10.3.58.6:30303"; - let node: NodeRecord = url.parse().unwrap(); - assert_eq!(url, &format!("{node}")); - } - - #[test] - fn test_node_display_discport() { - let url = "enode://6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0@10.3.58.6:30303?discport=30301"; - let node: NodeRecord = url.parse().unwrap(); - assert_eq!(url, &format!("{node}")); - } - - #[test] - fn test_node_serialize() { - let cases = vec![ - // IPv4 - ( - NodeRecord{ - address: IpAddr::V4([10, 3, 58, 6].into()), - tcp_port: 30303u16, - udp_port: 30301u16, - id: PeerId::from_str("6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0").unwrap(), - }, - "\"enode://6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0@10.3.58.6:30303?discport=30301\"" - ), - // IPv6 - ( - NodeRecord{ - address: Ipv6Addr::new(0x2001, 0xdb8, 0x3c4d, 0x15, 0x0, 0x0, 0xabcd, 0xef12).into(), - tcp_port: 52150u16, - udp_port: 52151u16, - id: PeerId::from_str("1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439").unwrap(), - }, - "\"enode://1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439@[2001:db8:3c4d:15::abcd:ef12]:52150?discport=52151\"", - ) - ]; - - for (node, expected) in cases { - let ser = serde_json::to_string::(&node).expect("couldn't serialize"); - assert_eq!(ser, expected); - } - } - - #[test] - fn test_node_deserialize() { - let cases = vec![ - // IPv4 - ( - "\"enode://6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0@10.3.58.6:30303?discport=30301\"", - NodeRecord{ - address: IpAddr::V4([10, 3, 58, 6].into()), - tcp_port: 30303u16, - udp_port: 30301u16, - id: PeerId::from_str("6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0").unwrap(), - } - ), - // IPv6 - ( - "\"enode://1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439@[2001:db8:3c4d:15::abcd:ef12]:52150?discport=52151\"", - NodeRecord{ - address: Ipv6Addr::new(0x2001, 0xdb8, 0x3c4d, 0x15, 0x0, 0x0, 0xabcd, 0xef12).into(), - tcp_port: 52150u16, - udp_port: 52151u16, - id: PeerId::from_str("1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439").unwrap(), - } - ), - ]; - - for (url, expected) in cases { - let node: NodeRecord = serde_json::from_str(url).expect("couldn't deserialize"); - assert_eq!(node, expected); - } - } -} diff --git a/crates/rpc/rpc-types/src/peer.rs b/crates/rpc/rpc-types/src/peer.rs index 44dbe5d71..a07e61d00 100644 --- a/crates/rpc/rpc-types/src/peer.rs +++ b/crates/rpc/rpc-types/src/peer.rs @@ -2,8 +2,3 @@ use alloy_primitives::B512; /// Alias for a peer identifier pub type PeerId = B512; - -/// Converts a [`secp256k1::PublicKey`] to a [`PeerId`]. -pub fn pk_to_id(pk: &secp256k1::PublicKey) -> PeerId { - PeerId::from_slice(&pk.serialize_uncompressed()[1..]) -} diff --git a/crates/rpc/rpc-types/src/relay/mod.rs b/crates/rpc/rpc-types/src/relay/mod.rs index 8fed94b79..35daa1b79 100644 --- a/crates/rpc/rpc-types/src/relay/mod.rs +++ b/crates/rpc/rpc-types/src/relay/mod.rs @@ -1,12 +1,10 @@ //! Relay API bindings: -use crate::{ - beacon::{BlsPublicKey, BlsSignature}, - engine::{ - BlobsBundleV1, ExecutionPayload, ExecutionPayloadV1, ExecutionPayloadV2, ExecutionPayloadV3, - }, +use crate::engine::{ + BlobsBundleV1, ExecutionPayload, ExecutionPayloadV1, ExecutionPayloadV2, ExecutionPayloadV3, }; use alloy_primitives::{Address, B256, U256}; +use alloy_rpc_types_beacon::beacon::{BlsPublicKey, BlsSignature}; use serde::{Deserialize, Serialize}; use serde_with::{serde_as, DisplayFromStr}; @@ -104,7 +102,7 @@ pub struct SignedBidSubmissionV1 { /// The BidTrace message associated with the submission. pub message: BidTrace, /// The execution payload for the submission. - #[serde(with = "crate::beacon::payload::beacon_payload_v1")] + #[serde(with = "alloy_rpc_types_beacon::beacon::payload::beacon_payload_v1")] pub execution_payload: ExecutionPayloadV1, /// The signature associated with the submission. pub signature: BlsSignature, @@ -118,7 +116,7 @@ pub struct SignedBidSubmissionV2 { /// The BidTrace message associated with the submission. pub message: BidTrace, /// The execution payload for the submission. - #[serde(with = "crate::beacon::payload::beacon_payload_v2")] + #[serde(with = "alloy_rpc_types_beacon::beacon::payload::beacon_payload_v2")] pub execution_payload: ExecutionPayloadV2, /// The signature associated with the submission. pub signature: BlsSignature, @@ -132,7 +130,7 @@ pub struct SignedBidSubmissionV3 { /// The BidTrace message associated with the submission. pub message: BidTrace, /// The execution payload for the submission. - #[serde(with = "crate::beacon::payload::beacon_payload_v3")] + #[serde(with = "alloy_rpc_types_beacon::beacon::payload::beacon_payload_v3")] pub execution_payload: ExecutionPayloadV3, /// The Deneb block bundle for this bid. pub blobs_bundle: BlobsBundleV1, @@ -146,7 +144,7 @@ pub struct SubmitBlockRequest { /// The BidTrace message associated with the block submission. pub message: BidTrace, /// The execution payload for the block submission. - #[serde(with = "crate::beacon::payload::beacon_payload")] + #[serde(with = "alloy_rpc_types_beacon::beacon::payload::beacon_payload")] pub execution_payload: ExecutionPayload, /// The signature associated with the block submission. pub signature: BlsSignature, diff --git a/crates/rpc/rpc/Cargo.toml b/crates/rpc/rpc/Cargo.toml index 357309de7..224866be6 100644 --- a/crates/rpc/rpc/Cargo.toml +++ b/crates/rpc/rpc/Cargo.toml @@ -21,12 +21,15 @@ reth-provider = { workspace = true, features = ["test-utils"] } reth-transaction-pool = { workspace = true, features = ["test-utils"] } reth-network-api.workspace = true reth-rpc-engine-api.workspace = true -reth-revm = { workspace = true, features = ["js-tracer"] } +reth-revm.workspace = true reth-tasks = { workspace = true, features = ["rayon"] } reth-consensus-common.workspace = true reth-rpc-types-compat.workspace = true -revm-inspectors.workspace = true +revm-inspectors = { workspace = true, features = ["js-tracer"] } reth-evm.workspace = true +reth-network-types.workspace = true + +reth-evm-optimism = { workspace = true, optional = true } # eth alloy-rlp.workspace = true @@ -74,7 +77,7 @@ tracing-futures = "0.2" schnellru.workspace = true futures.workspace = true derive_more.workspace = true -dyn-clone.workspace = true +dyn-clone.workspace = true [dev-dependencies] reth-evm-ethereum.workspace = true @@ -88,4 +91,6 @@ optimism = [ "reth-primitives/optimism", "reth-rpc-types-compat/optimism", "reth-provider/optimism", + "dep:reth-evm-optimism", + "reth-evm-optimism/optimism", ] diff --git a/crates/rpc/rpc/src/admin.rs b/crates/rpc/rpc/src/admin.rs index 3f5578433..6f3125e06 100644 --- a/crates/rpc/rpc/src/admin.rs +++ b/crates/rpc/rpc/src/admin.rs @@ -3,7 +3,8 @@ use alloy_primitives::B256; use async_trait::async_trait; use jsonrpsee::core::RpcResult; use reth_network_api::{NetworkInfo, PeerKind, Peers}; -use reth_primitives::{AnyNode, ChainSpec, NodeRecord}; +use reth_network_types::AnyNode; +use reth_primitives::{ChainSpec, NodeRecord}; use reth_rpc_api::AdminApiServer; use reth_rpc_types::{ admin::{EthProtocolInfo, NodeInfo, Ports, ProtocolInfo}, diff --git a/crates/rpc/rpc/src/debug.rs b/crates/rpc/rpc/src/debug.rs index 500f786d3..ebc52877d 100644 --- a/crates/rpc/rpc/src/debug.rs +++ b/crates/rpc/rpc/src/debug.rs @@ -17,7 +17,7 @@ use reth_primitives::{ use reth_provider::{ BlockReaderIdExt, ChainSpecProvider, HeaderProvider, StateProviderBox, TransactionVariant, }; -use reth_revm::database::{StateProviderDatabase, SubState}; +use reth_revm::database::StateProviderDatabase; use reth_rpc_api::DebugApiServer; use reth_rpc_types::{ trace::geth::{ @@ -101,24 +101,16 @@ where env: Env::boxed(cfg.cfg_env.clone(), block_env.clone(), tx), handler_cfg: cfg.handler_cfg, }; - let (result, state_changes) = this - .trace_transaction( - opts.clone(), - env, - &mut db, - Some(TransactionContext { - block_hash, - tx_hash: Some(tx_hash), - tx_index: Some(index), - }), - ) - .map_err(|err| { - results.push(TraceResult::Error { - error: err.to_string(), - tx_hash: Some(tx_hash), - }); - err - })?; + let (result, state_changes) = this.trace_transaction( + opts.clone(), + env, + &mut db, + Some(TransactionContext { + block_hash, + tx_hash: Some(tx_hash), + tx_index: Some(index), + }), + )?; results.push(TraceResult::Success { result, tx_hash: Some(tx_hash) }); if transactions.peek().is_some() { @@ -275,7 +267,7 @@ where block_id: Option, opts: GethDebugTracingCallOptions, ) -> EthResult { - let at = block_id.unwrap_or(BlockId::Number(BlockNumberOrTag::Latest)); + let at = block_id.unwrap_or_default(); let GethDebugTracingCallOptions { tracing_options, state_overrides, block_overrides } = opts; let overrides = EvmOverrides::new(state_overrides, block_overrides.map(Box::new)); @@ -331,14 +323,11 @@ where self.inner .eth_api .spawn_with_call_at(call, at, overrides, move |db, env| { - let (res, _, db) = this.eth_api().inspect_and_return_db( - db, - env, - &mut inspector, - )?; + let (res, _) = + this.eth_api().inspect(&mut *db, env, &mut inspector)?; let frame = inspector .into_geth_builder() - .geth_prestate_traces(&res, prestate_config, &db)?; + .geth_prestate_traces(&res, prestate_config, db)?; Ok(frame) }) .await?; @@ -356,12 +345,9 @@ where .inner .eth_api .spawn_with_call_at(call, at, overrides, move |db, env| { - let (res, _, db) = this.eth_api().inspect_and_return_db( - db, - env, - &mut inspector, - )?; - let frame = inspector.try_into_mux_frame(&res, &db)?; + let (res, _) = + this.eth_api().inspect(&mut *db, env, &mut inspector)?; + let frame = inspector.try_into_mux_frame(&res, db)?; Ok(frame.into()) }) .await?; @@ -378,12 +364,9 @@ where .eth_api .spawn_with_call_at(call, at, overrides, move |db, env| { let mut inspector = JsInspector::new(code, config)?; - let (res, _, db) = this.eth_api().inspect_and_return_db( - db, - env.clone(), - &mut inspector, - )?; - Ok(inspector.json_result(res, &env, &db)?) + let (res, _) = + this.eth_api().inspect(&mut *db, env.clone(), &mut inspector)?; + Ok(inspector.json_result(res, &env, db)?) }) .await?; @@ -428,7 +411,7 @@ where let StateContext { transaction_index, block_number } = state_context.unwrap_or_default(); let transaction_index = transaction_index.unwrap_or_default(); - let target_block = block_number.unwrap_or(BlockId::Number(BlockNumberOrTag::Latest)); + let target_block = block_number.unwrap_or_default(); let ((cfg, mut block_env, _), block) = futures::try_join!( self.inner.eth_api.evm_env_at(target_block), self.inner.eth_api.block_by_id_with_senders(target_block), @@ -534,7 +517,7 @@ where &self, opts: GethDebugTracingOptions, env: EnvWithHandlerCfg, - db: &mut SubState, + db: &mut CacheDB>, transaction_context: Option, ) -> EthResult<(GethTrace, revm_primitives::State)> { let GethDebugTracingOptions { config, tracer, tracer_config, .. } = opts; @@ -572,8 +555,7 @@ where let mut inspector = TracingInspector::new( TracingInspectorConfig::from_geth_prestate_config(&prestate_config), ); - let (res, _, db) = - self.eth_api().inspect_and_return_db(db, env, &mut inspector)?; + let (res, _) = self.eth_api().inspect(&mut *db, env, &mut inspector)?; let frame = inspector.into_geth_builder().geth_prestate_traces( &res, @@ -593,8 +575,7 @@ where let mut inspector = MuxInspector::try_from_config(mux_config)?; - let (res, _, db) = - self.eth_api().inspect_and_return_db(db, env, &mut inspector)?; + let (res, _) = self.eth_api().inspect(&mut *db, env, &mut inspector)?; let frame = inspector.try_into_mux_frame(&res, db)?; return Ok((frame.into(), res.state)) } @@ -606,8 +587,7 @@ where config, transaction_context.unwrap_or_default(), )?; - let (res, env, db) = - self.eth_api().inspect_and_return_db(db, env, &mut inspector)?; + let (res, env) = self.eth_api().inspect(&mut *db, env, &mut inspector)?; let state = res.state.clone(); let result = inspector.json_result(res, &env, db)?; diff --git a/crates/rpc/rpc/src/eth/api/block.rs b/crates/rpc/rpc/src/eth/api/block.rs index 95b6b6bc7..cfc3fe058 100644 --- a/crates/rpc/rpc/src/eth/api/block.rs +++ b/crates/rpc/rpc/src/eth/api/block.rs @@ -84,7 +84,7 @@ where #[cfg(feature = "optimism")] let (block_timestamp, l1_block_info) = { - let body = reth_revm::optimism::extract_l1_info(&block); + let body = reth_evm_optimism::extract_l1_info(&block); (block.timestamp, body.ok()) }; diff --git a/crates/rpc/rpc/src/eth/api/call.rs b/crates/rpc/rpc/src/eth/api/call.rs index 62be2612c..acd5c30e8 100644 --- a/crates/rpc/rpc/src/eth/api/call.rs +++ b/crates/rpc/rpc/src/eth/api/call.rs @@ -14,11 +14,11 @@ use crate::{ }; use reth_evm::ConfigureEvm; use reth_network_api::NetworkInfo; -use reth_primitives::{revm::env::tx_env_with_recovered, BlockId, BlockNumberOrTag, Bytes, U256}; +use reth_primitives::{revm::env::tx_env_with_recovered, BlockId, Bytes, TxKind, U256}; use reth_provider::{ BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, StateProvider, StateProviderFactory, }; -use reth_revm::{access_list::AccessListInspector, database::StateProviderDatabase}; +use reth_revm::database::StateProviderDatabase; use reth_rpc_types::{ state::StateOverride, AccessListWithGasUsed, Bundle, EthCallResponse, StateContext, TransactionRequest, @@ -31,6 +31,7 @@ use revm::{ }, DatabaseCommit, }; +use revm_inspectors::access_list::AccessListInspector; use tracing::trace; // Gas per transaction not creating a contract. @@ -71,13 +72,8 @@ where block_number: Option, overrides: EvmOverrides, ) -> EthResult { - let (res, _env) = self - .transact_call_at( - request, - block_number.unwrap_or(BlockId::Number(BlockNumberOrTag::Latest)), - overrides, - ) - .await?; + let (res, _env) = + self.transact_call_at(request, block_number.unwrap_or_default(), overrides).await?; ensure_success(res.result) } @@ -98,7 +94,7 @@ where let StateContext { transaction_index, block_number } = state_context.unwrap_or_default(); let transaction_index = transaction_index.unwrap_or_default(); - let target_block = block_number.unwrap_or(BlockId::Number(BlockNumberOrTag::Latest)); + let target_block = block_number.unwrap_or_default(); let is_block_target_pending = target_block.is_pending(); let ((cfg, block_env, _), block) = futures::try_join!( @@ -388,7 +384,7 @@ where mut request: TransactionRequest, at: Option, ) -> EthResult { - let block_id = at.unwrap_or(BlockId::Number(BlockNumberOrTag::Latest)); + let block_id = at.unwrap_or_default(); let (cfg, block, at) = self.evm_env_at(block_id).await?; let state = self.state_at(at)?; @@ -411,7 +407,7 @@ where } let from = request.from.unwrap_or_default(); - let to = if let Some(to) = request.to { + let to = if let Some(TxKind::Call(to)) = request.to { to } else { let nonce = db.basic_ref(from)?.unwrap_or_default().nonce; @@ -448,6 +444,7 @@ where Ok(AccessListWithGasUsed { access_list, gas_used }) } + /// Executes the requests again after an out of gas error to check if the error is gas related /// or not #[inline] @@ -455,14 +452,14 @@ where &self, env_gas_limit: U256, mut env: EnvWithHandlerCfg, - mut db: &mut CacheDB>, + db: &mut CacheDB>, ) -> EthApiError where S: StateProvider, { let req_gas_limit = env.tx.gas_limit; env.tx.gas_limit = env_gas_limit.try_into().unwrap_or(u64::MAX); - let (res, _) = match self.transact(&mut db, env) { + let (res, _) = match self.transact(db, env) { Ok(res) => res, Err(err) => return err, }; diff --git a/crates/rpc/rpc/src/eth/api/mod.rs b/crates/rpc/rpc/src/eth/api/mod.rs index c23dfe1ac..6c936808e 100644 --- a/crates/rpc/rpc/src/eth/api/mod.rs +++ b/crates/rpc/rpc/src/eth/api/mod.rs @@ -355,7 +355,7 @@ where let now = Instant::now(); *lock = Some(PendingBlock { block: pending_block.clone(), - expires_at: now + Duration::from_secs(3), + expires_at: now + Duration::from_secs(1), }); Ok(Some(pending_block)) diff --git a/crates/rpc/rpc/src/eth/api/pending_block.rs b/crates/rpc/rpc/src/eth/api/pending_block.rs index aa18bf7ec..dbb148981 100644 --- a/crates/rpc/rpc/src/eth/api/pending_block.rs +++ b/crates/rpc/rpc/src/eth/api/pending_block.rs @@ -52,8 +52,8 @@ impl PendingBlockEnv { let parent_hash = origin.build_target_hash(); let state_provider = client.history_by_block_hash(parent_hash)?; - let state = StateProviderDatabase::new(&state_provider); - let mut db = State::builder().with_database(Box::new(state)).with_bundle_update().build(); + let state = StateProviderDatabase::new(state_provider); + let mut db = State::builder().with_database(state).with_bundle_update().build(); let mut cumulative_gas_used = 0; let mut sum_blob_gas_used = 0; @@ -230,6 +230,7 @@ impl PendingBlockEnv { let logs_bloom = bundle.block_logs_bloom(block_number).expect("Block is present"); // calculate the state root + let state_provider = &db.database; let state_root = state_provider.state_root(bundle.state())?; // create the block header diff --git a/crates/rpc/rpc/src/eth/api/server.rs b/crates/rpc/rpc/src/eth/api/server.rs index 6be1a88af..a1796a71d 100644 --- a/crates/rpc/rpc/src/eth/api/server.rs +++ b/crates/rpc/rpc/src/eth/api/server.rs @@ -2,26 +2,22 @@ //! Handles RPC requests for the `eth_` namespace. use jsonrpsee::core::RpcResult as Result; -use serde_json::Value; -use tracing::trace; - use reth_evm::ConfigureEvm; use reth_network_api::NetworkInfo; -use reth_primitives::{ - serde_helper::{num::U64HexOrNumber, JsonStorageKey}, - Address, BlockId, BlockNumberOrTag, Bytes, B256, B64, U256, U64, -}; +use reth_primitives::{Address, BlockId, BlockNumberOrTag, Bytes, B256, B64, U256, U64}; use reth_provider::{ BlockIdReader, BlockReader, BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, HeaderProvider, StateProviderFactory, }; use reth_rpc_api::EthApiServer; use reth_rpc_types::{ - state::StateOverride, AccessListWithGasUsed, AnyTransactionReceipt, BlockOverrides, Bundle, - EIP1186AccountProofResponse, EthCallResponse, FeeHistory, Header, Index, RichBlock, - StateContext, SyncStatus, TransactionRequest, Work, + serde_helpers::JsonStorageKey, state::StateOverride, AccessListWithGasUsed, + AnyTransactionReceipt, BlockOverrides, Bundle, EIP1186AccountProofResponse, EthCallResponse, + FeeHistory, Header, Index, RichBlock, StateContext, SyncStatus, TransactionRequest, Work, }; use reth_transaction_pool::TransactionPool; +use serde_json::Value; +use tracing::trace; use crate::{ eth::{ @@ -315,13 +311,7 @@ where state_override: Option, ) -> Result { trace!(target: "rpc::eth", ?request, ?block_number, "Serving eth_estimateGas"); - Ok(self - .estimate_gas_at( - request, - block_number.unwrap_or(BlockId::Number(BlockNumberOrTag::Latest)), - state_override, - ) - .await?) + Ok(self.estimate_gas_at(request, block_number.unwrap_or_default(), state_override).await?) } /// Handler for: `eth_gasPrice` @@ -353,14 +343,12 @@ where /// Handler for: `eth_feeHistory` async fn fee_history( &self, - block_count: U64HexOrNumber, + block_count: u64, newest_block: BlockNumberOrTag, reward_percentiles: Option>, ) -> Result { trace!(target: "rpc::eth", ?block_count, ?newest_block, ?reward_percentiles, "Serving eth_feeHistory"); - return Ok( - EthApi::fee_history(self, block_count.to(), newest_block, reward_percentiles).await? - ) + return Ok(EthApi::fee_history(self, block_count, newest_block, reward_percentiles).await?) } /// Handler for: `eth_mining` @@ -438,8 +426,6 @@ where #[cfg(test)] mod tests { - use jsonrpsee::types::error::INVALID_PARAMS_CODE; - use crate::{ eth::{ cache::EthStateCache, gas_oracle::GasPriceOracle, FeeHistoryCache, @@ -447,6 +433,7 @@ mod tests { }, EthApi, }; + use jsonrpsee::types::error::INVALID_PARAMS_CODE; use reth_evm_ethereum::EthEvmConfig; use reth_interfaces::test_utils::{generators, generators::Rng}; use reth_network_api::noop::NoopNetwork; @@ -585,7 +572,7 @@ mod tests { async fn test_fee_history_empty() { let response = as EthApiServer>::fee_history( &build_test_eth_api(NoopProvider::default()), - 1.into(), + 1, BlockNumberOrTag::Latest, None, ) @@ -607,7 +594,7 @@ mod tests { let response = as EthApiServer>::fee_history( ð_api, - (newest_block + 1).into(), + newest_block + 1, newest_block.into(), Some(vec![10.0]), ) @@ -630,7 +617,7 @@ mod tests { let response = as EthApiServer>::fee_history( ð_api, - 1.into(), + 1, (newest_block + 1000).into(), Some(vec![10.0]), ) @@ -653,7 +640,7 @@ mod tests { let response = as EthApiServer>::fee_history( ð_api, - 0.into(), + 0, newest_block.into(), None, ) diff --git a/crates/rpc/rpc/src/eth/api/sign.rs b/crates/rpc/rpc/src/eth/api/sign.rs index 66df0e8de..5cbdefa41 100644 --- a/crates/rpc/rpc/src/eth/api/sign.rs +++ b/crates/rpc/rpc/src/eth/api/sign.rs @@ -42,7 +42,7 @@ impl EthApi, ) -> EthResult { let chain_info = self.provider().chain_info()?; - let block_id = block_id.unwrap_or(BlockId::Number(BlockNumberOrTag::Latest)); + let block_id = block_id.unwrap_or_default(); // if we are trying to create a proof for the latest block, but have a BlockId as input // that is not BlockNumberOrTag::Latest, then we need to figure out whether or not the diff --git a/crates/rpc/rpc/src/eth/api/transactions.rs b/crates/rpc/rpc/src/eth/api/transactions.rs index 43a75b68b..721cef3db 100644 --- a/crates/rpc/rpc/src/eth/api/transactions.rs +++ b/crates/rpc/rpc/src/eth/api/transactions.rs @@ -8,6 +8,7 @@ use crate::{ }, EthApi, EthApiSpec, }; +use alloy_primitives::TxKind as RpcTransactionKind; use async_trait::async_trait; use reth_evm::ConfigureEvm; use reth_network_api::NetworkInfo; @@ -15,25 +16,23 @@ use reth_primitives::{ eip4844::calc_blob_gasprice, revm::env::{fill_block_env_with_coinbase, tx_env_with_recovered}, Address, BlockId, BlockNumberOrTag, Bytes, FromRecoveredPooledTransaction, Header, - IntoRecoveredTransaction, Receipt, SealedBlock, SealedBlockWithSenders, - TransactionKind::{Call, Create}, - TransactionMeta, TransactionSigned, TransactionSignedEcRecovered, B256, U256, + IntoRecoveredTransaction, Receipt, SealedBlock, SealedBlockWithSenders, TransactionMeta, + TransactionSigned, TransactionSignedEcRecovered, + TxKind::{Call, Create}, + B256, U256, }; use reth_provider::{ BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, StateProviderBox, StateProviderFactory, }; -use reth_revm::{ - database::StateProviderDatabase, - tracing::{TracingInspector, TracingInspectorConfig}, -}; +use reth_revm::database::StateProviderDatabase; use reth_rpc_types::{ transaction::{ EIP1559TransactionRequest, EIP2930TransactionRequest, EIP4844TransactionRequest, LegacyTransactionRequest, }, AnyReceiptEnvelope, AnyTransactionReceipt, Index, Log, ReceiptWithBloom, Transaction, - TransactionInfo, TransactionKind as RpcTransactionKind, TransactionReceipt, TransactionRequest, - TypedTransactionRequest, WithOtherFields, + TransactionInfo, TransactionReceipt, TransactionRequest, TypedTransactionRequest, + WithOtherFields, }; use reth_rpc_types_compat::transaction::from_recovered_with_block_context; use reth_transaction_pool::{TransactionOrigin, TransactionPool}; @@ -45,19 +44,12 @@ use revm::{ }, GetInspector, Inspector, }; +use revm_inspectors::tracing::{TracingInspector, TracingInspectorConfig}; use std::future::Future; -#[cfg(feature = "optimism")] -use crate::eth::api::optimism::OptimismTxMeta; -#[cfg(feature = "optimism")] -use crate::eth::optimism::OptimismEthApiError; use crate::eth::revm_utils::FillableTransaction; #[cfg(feature = "optimism")] -use reth_revm::optimism::RethL1BlockInfo; -#[cfg(feature = "optimism")] use reth_rpc_types::OptimismTransactionReceiptFields; -#[cfg(feature = "optimism")] -use revm::L1BlockInfo; use revm_primitives::db::{Database, DatabaseRef}; /// Helper alias type for the state's [CacheDB] @@ -285,7 +277,7 @@ pub trait EthTransactions: Send + Sync { f: F, ) -> EthResult where - F: FnOnce(StateCacheDB, EnvWithHandlerCfg) -> EthResult + Send + 'static, + F: FnOnce(&mut StateCacheDB, EnvWithHandlerCfg) -> EthResult + Send + 'static, R: Send + 'static; /// Executes the call request at the given [BlockId]. @@ -306,7 +298,7 @@ pub trait EthTransactions: Send + Sync { inspector: I, ) -> EthResult<(ResultAndState, EnvWithHandlerCfg)> where - I: Inspector + Send + 'static; + I: for<'a> Inspector<&'a mut StateCacheDB> + Send + 'static; /// Executes the transaction on top of the given [BlockId] with a tracer configured by the /// config. @@ -374,6 +366,20 @@ pub trait EthTransactions: Send + Sync { .await } + /// Retrieves the transaction if it exists and returns its trace. + /// + /// Before the transaction is traced, all previous transaction in the block are applied to the + /// state by executing them first. + /// The callback `f` is invoked with the [ResultAndState] after the transaction was executed and + /// the database that points to the beginning of the transaction. + /// + /// Note: Implementers should use a threadpool where blocking is allowed, such as + /// [BlockingTaskPool](reth_tasks::pool::BlockingTaskPool). + async fn spawn_replay_transaction(&self, hash: B256, f: F) -> EthResult> + where + F: FnOnce(TransactionInfo, ResultAndState, StateCacheDB) -> EthResult + Send + 'static, + R: Send + 'static; + /// Retrieves the transaction if it exists and returns its trace. /// /// Before the transaction is traced, all previous transaction in the block are applied to the @@ -569,10 +575,7 @@ where ::Error: Into, I: GetInspector, { - let mut evm = self.inner.evm_config.evm_with_env_and_inspector(db, env, inspector); - let res = evm.transact()?; - let (_, env) = evm.into_db_and_env_with_handler_cfg(); - Ok((res, env)) + self.inspect_and_return_db(db, env, inspector).map(|(res, env, _)| (res, env)) } fn inspect_and_return_db( @@ -917,10 +920,7 @@ where gas_limit: U256::from(gas.unwrap_or_default()), value: value.unwrap_or_default(), input: data.into_input().unwrap_or_default(), - kind: match to { - Some(to) => RpcTransactionKind::Call(to), - None => RpcTransactionKind::Create, - }, + kind: to.unwrap_or(RpcTransactionKind::Create), chain_id: None, })) } @@ -933,10 +933,7 @@ where gas_limit: U256::from(gas.unwrap_or_default()), value: value.unwrap_or_default(), input: data.into_input().unwrap_or_default(), - kind: match to { - Some(to) => RpcTransactionKind::Call(to), - None => RpcTransactionKind::Create, - }, + kind: to.unwrap_or(RpcTransactionKind::Create), chain_id: 0, access_list, })) @@ -956,10 +953,7 @@ where gas_limit: U256::from(gas.unwrap_or_default()), value: value.unwrap_or_default(), input: data.into_input().unwrap_or_default(), - kind: match to { - Some(to) => RpcTransactionKind::Call(to), - None => RpcTransactionKind::Create, - }, + kind: to.unwrap_or(RpcTransactionKind::Create), chain_id: 0, access_list: access_list.unwrap_or_default(), })) @@ -985,10 +979,7 @@ where gas_limit: U256::from(gas.unwrap_or_default()), value: value.unwrap_or_default(), input: data.into_input().unwrap_or_default(), - kind: match to { - Some(to) => RpcTransactionKind::Call(to), - None => RpcTransactionKind::Create, - }, + kind: to.unwrap_or(RpcTransactionKind::Create), access_list: access_list.unwrap_or_default(), // eip-4844 specific. @@ -1076,7 +1067,7 @@ where f: F, ) -> EthResult where - F: FnOnce(StateCacheDB, EnvWithHandlerCfg) -> EthResult + Send + 'static, + F: FnOnce(&mut StateCacheDB, EnvWithHandlerCfg) -> EthResult + Send + 'static, R: Send + 'static, { let (cfg, block_env, at) = self.evm_env_at(at).await?; @@ -1095,7 +1086,7 @@ where &mut db, overrides, )?; - f(db, env) + f(&mut db, env) }) .await .map_err(|_| EthApiError::InternalBlockingTaskError)? @@ -1108,10 +1099,7 @@ where overrides: EvmOverrides, ) -> EthResult<(ResultAndState, EnvWithHandlerCfg)> { let this = self.clone(); - self.spawn_with_call_at(request, at, overrides, move |mut db, env| { - this.transact(&mut db, env) - }) - .await + self.spawn_with_call_at(request, at, overrides, move |db, env| this.transact(db, env)).await } async fn spawn_inspect_call_at( @@ -1122,7 +1110,7 @@ where inspector: I, ) -> EthResult<(ResultAndState, EnvWithHandlerCfg)> where - I: Inspector + Send + 'static, + I: for<'a> Inspector<&'a mut StateCacheDB> + Send + 'static, { let this = self.clone(); self.spawn_with_call_at(request, at, overrides, move |db, env| { @@ -1143,11 +1131,9 @@ where { let this = self.clone(); self.with_state_at_block(at, |state| { - let db = CacheDB::new(StateProviderDatabase::new(state)); - + let mut db = CacheDB::new(StateProviderDatabase::new(state)); let mut inspector = TracingInspector::new(config); - let (res, _) = this.inspect(db, env, &mut inspector)?; - + let (res, _) = this.inspect(&mut db, env, &mut inspector)?; f(inspector, res) }) } @@ -1165,10 +1151,9 @@ where { let this = self.clone(); self.spawn_with_state_at_block(at, move |state| { - let db = CacheDB::new(StateProviderDatabase::new(state)); + let mut db = CacheDB::new(StateProviderDatabase::new(state)); let mut inspector = TracingInspector::new(config); - let (res, _, db) = this.inspect_and_return_db(db, env, &mut inspector)?; - + let (res, _) = this.inspect(&mut db, env, &mut inspector)?; f(inspector, res, db) }) .await @@ -1192,6 +1177,47 @@ where Ok(block.map(|block| (transaction, block.seal(block_hash)))) } + async fn spawn_replay_transaction(&self, hash: B256, f: F) -> EthResult> + where + F: FnOnce(TransactionInfo, ResultAndState, StateCacheDB) -> EthResult + Send + 'static, + R: Send + 'static, + { + let (transaction, block) = match self.transaction_and_block(hash).await? { + None => return Ok(None), + Some(res) => res, + }; + let (tx, tx_info) = transaction.split(); + + let (cfg, block_env, _) = self.evm_env_at(block.hash().into()).await?; + + // we need to get the state of the parent block because we're essentially replaying the + // block the transaction is included in + let parent_block = block.parent_hash; + let block_txs = block.into_transactions_ecrecovered(); + + let this = self.clone(); + self.spawn_with_state_at_block(parent_block.into(), move |state| { + let mut db = CacheDB::new(StateProviderDatabase::new(state)); + + // replay all transactions prior to the targeted transaction + this.replay_transactions_until( + &mut db, + cfg.clone(), + block_env.clone(), + block_txs, + tx.hash, + )?; + + let env = + EnvWithHandlerCfg::new_with_cfg_env(cfg, block_env, tx_env_with_recovered(&tx)); + + let (res, _) = this.transact(&mut db, env)?; + f(tx_info, res, db) + }) + .await + .map(Some) + } + async fn spawn_trace_transaction_in_block_with_inspector( &self, hash: B256, @@ -1462,7 +1488,7 @@ where .ok_or(EthApiError::UnknownBlockNumber)?; let block = block.unseal(); - let l1_block_info = reth_revm::optimism::extract_l1_info(&block).ok(); + let l1_block_info = reth_evm_optimism::extract_l1_info(&block).ok(); let optimism_tx_meta = self.build_op_tx_meta(&tx, l1_block_info, block.timestamp)?; build_transaction_receipt_with_block_receipts( @@ -1474,17 +1500,19 @@ where ) } - /// Builds [OptimismTxMeta] object using the provided [TransactionSigned], - /// [L1BlockInfo] and `block_timestamp`. The [L1BlockInfo] is used to calculate - /// the l1 fee and l1 data gas for the transaction. - /// If the [L1BlockInfo] is not provided, the [OptimismTxMeta] will be empty. + /// Builds op metadata object using the provided [TransactionSigned], L1 block info and + /// `block_timestamp`. The L1BlockInfo is used to calculate the l1 fee and l1 data gas for the + /// transaction. If the L1BlockInfo is not provided, the meta info will be empty. #[cfg(feature = "optimism")] pub(crate) fn build_op_tx_meta( &self, tx: &TransactionSigned, - l1_block_info: Option, + l1_block_info: Option, block_timestamp: u64, - ) -> EthResult { + ) -> EthResult { + use crate::eth::{api::optimism::OptimismTxMeta, optimism::OptimismEthApiError}; + use reth_evm_optimism::RethL1BlockInfo; + let Some(l1_block_info) = l1_block_info else { return Ok(OptimismTxMeta::default()) }; let (l1_fee, l1_data_gas) = if !tx.is_deposit() { @@ -1675,7 +1703,7 @@ pub(crate) fn build_transaction_receipt_with_block_receipts( meta: TransactionMeta, receipt: Receipt, all_receipts: &[Receipt], - #[cfg(feature = "optimism")] optimism_tx_meta: OptimismTxMeta, + #[cfg(feature = "optimism")] optimism_tx_meta: crate::eth::api::optimism::OptimismTxMeta, ) -> EthResult { // Note: we assume this transaction is valid, because it's mined (or part of pending block) and // we don't need to check for pre EIP-2 diff --git a/crates/rpc/rpc/src/eth/bundle.rs b/crates/rpc/rpc/src/eth/bundle.rs index c2d56df31..0523141eb 100644 --- a/crates/rpc/rpc/src/eth/bundle.rs +++ b/crates/rpc/rpc/src/eth/bundle.rs @@ -52,7 +52,7 @@ where EthBundleError::EmptyBundleTransactions.to_string(), )) } - if block_number.to::() == 0 { + if block_number == 0 { return Err(EthApiError::InvalidParams( EthBundleError::BundleMissingBlockNumber.to_string(), )) diff --git a/crates/rpc/rpc/src/eth/error.rs b/crates/rpc/rpc/src/eth/error.rs index d8add6397..df2aef800 100644 --- a/crates/rpc/rpc/src/eth/error.rs +++ b/crates/rpc/rpc/src/eth/error.rs @@ -5,24 +5,20 @@ use alloy_sol_types::decode_revert_reason; use jsonrpsee::types::{error::CALL_EXECUTION_FAILED_CODE, ErrorObject}; use reth_interfaces::RethError; use reth_primitives::{revm_primitives::InvalidHeader, Address, Bytes, U256}; -use reth_revm::tracing::{js::JsInspectorError, MuxError}; -use reth_rpc_types::{error::EthRpcErrorCode, request::TransactionInputError, BlockError}; +use reth_rpc_types::{ + error::EthRpcErrorCode, request::TransactionInputError, BlockError, ToRpcError, +}; use reth_transaction_pool::error::{ Eip4844PoolTransactionError, InvalidPoolTransactionError, PoolError, PoolErrorKind, PoolTransactionError, }; use revm::primitives::{EVMError, ExecutionResult, HaltReason, OutOfGasError}; +use revm_inspectors::tracing::{js::JsInspectorError, MuxError}; use std::time::Duration; /// Result alias pub type EthResult = Result; -/// A tait for custom rpc errors used by [EthApiError::Other]. -pub trait ToRpcError: std::error::Error + Send + Sync + 'static { - /// Converts the error to a JSON-RPC error object. - fn to_rpc_error(&self) -> ErrorObject<'static>; -} - /// Errors that can occur when interacting with the `eth_` namespace #[derive(Debug, thiserror::Error)] pub enum EthApiError { @@ -43,7 +39,12 @@ pub enum EthApiError { UnknownBlockNumber, /// Thrown when querying for `finalized` or `safe` block before the merge transition is /// finalized, - #[error("unknown block")] + /// + /// op-node uses case sensitive string comparison to parse this error: + /// + /// + /// TODO(#8045): Temporary, until a version of is pushed through that doesn't require this to figure out the EL sync status. + #[error("Unknown block")] UnknownSafeOrFinalizedBlock, /// Thrown when an unknown block or transaction index is encountered #[error("unknown block or tx index")] @@ -119,7 +120,7 @@ pub enum EthApiError { #[error(transparent)] MuxTracerError(#[from] MuxError), /// Any other error - #[error("0")] + #[error("{0}")] Other(Box), } diff --git a/crates/rpc/rpc/src/eth/optimism.rs b/crates/rpc/rpc/src/eth/optimism.rs index 2871058f8..24f6f36ff 100644 --- a/crates/rpc/rpc/src/eth/optimism.rs +++ b/crates/rpc/rpc/src/eth/optimism.rs @@ -1,11 +1,9 @@ //! Optimism specific types. use jsonrpsee::types::ErrorObject; +use reth_rpc_types::ToRpcError; -use crate::{ - eth::error::{EthApiError, ToRpcError}, - result::internal_rpc_err, -}; +use crate::{eth::error::EthApiError, result::internal_rpc_err}; /// Eth Optimism Api Error #[cfg(feature = "optimism")] diff --git a/crates/rpc/rpc/src/eth/revm_utils.rs b/crates/rpc/rpc/src/eth/revm_utils.rs index 4b00d4662..c2855163b 100644 --- a/crates/rpc/rpc/src/eth/revm_utils.rs +++ b/crates/rpc/rpc/src/eth/revm_utils.rs @@ -7,7 +7,7 @@ use reth_primitives::revm::env::fill_op_tx_env; use reth_primitives::revm::env::fill_tx_env; use reth_primitives::{ revm::env::fill_tx_env_with_recovered, Address, TransactionSigned, - TransactionSignedEcRecovered, TxHash, B256, U256, + TransactionSignedEcRecovered, TxHash, TxKind, B256, U256, }; use reth_rpc_types::{ state::{AccountOverride, StateOverride}, @@ -250,13 +250,17 @@ pub(crate) fn create_txn_env( )?; let gas_limit = gas.unwrap_or_else(|| block_env.gas_limit.min(U256::from(u64::MAX)).to()); + let transact_to = match to { + Some(TxKind::Call(to)) => TransactTo::call(to), + _ => TransactTo::create(), + }; let env = TxEnv { gas_limit: gas_limit.try_into().map_err(|_| RpcInvalidTransactionError::GasUintOverflow)?, nonce, caller: from.unwrap_or_default(), gas_price, gas_priority_fee: max_priority_fee_per_gas, - transact_to: to.map(TransactTo::Call).unwrap_or_else(TransactTo::create), + transact_to, value: value.unwrap_or_default(), data: input.try_into_unique_input()?.unwrap_or_default(), chain_id, @@ -274,7 +278,10 @@ pub(crate) fn create_txn_env( } /// Caps the configured [TxEnv] `gas_limit` with the allowance of the caller. -pub(crate) fn cap_tx_gas_limit_with_caller_allowance(db: DB, env: &mut TxEnv) -> EthResult<()> +pub(crate) fn cap_tx_gas_limit_with_caller_allowance( + db: &mut DB, + env: &mut TxEnv, +) -> EthResult<()> where DB: Database, EthApiError: From<::Error>, @@ -292,7 +299,7 @@ where /// /// Returns an error if the caller has insufficient funds. /// Caution: This assumes non-zero `env.gas_price`. Otherwise, zero allowance will be returned. -pub(crate) fn caller_gas_allowance(mut db: DB, env: &TxEnv) -> EthResult +pub(crate) fn caller_gas_allowance(db: &mut DB, env: &TxEnv) -> EthResult where DB: Database, EthApiError: From<::Error>, diff --git a/crates/rpc/rpc/src/eth/signer.rs b/crates/rpc/rpc/src/eth/signer.rs index b744d83ef..578907604 100644 --- a/crates/rpc/rpc/src/eth/signer.rs +++ b/crates/rpc/rpc/src/eth/signer.rs @@ -53,7 +53,7 @@ impl DevSigner { /// Generates a random dev signer which satisfies [EthSigner] trait pub(crate) fn random() -> Box { let mut signers = Self::random_signers(1); - signers.pop().expect("expect to generate at leas one signer") + signers.pop().expect("expect to generate at least one signer") } /// Generates provided number of random dev signers diff --git a/crates/rpc/rpc/src/layers/auth_client_layer.rs b/crates/rpc/rpc/src/layers/auth_client_layer.rs new file mode 100644 index 000000000..4c845796e --- /dev/null +++ b/crates/rpc/rpc/src/layers/auth_client_layer.rs @@ -0,0 +1,79 @@ +use crate::{Claims, JwtSecret}; +use http::HeaderValue; +use hyper::{header::AUTHORIZATION, service::Service}; +use std::{ + task::{Context, Poll}, + time::{Duration, SystemTime, UNIX_EPOCH}, +}; +use tower::Layer; + +/// A layer that adds a new JWT token to every request using AuthClientService. +#[derive(Debug)] +pub struct AuthClientLayer { + secret: JwtSecret, +} + +impl AuthClientLayer { + /// Create a new AuthClientLayer with the given `secret`. + pub fn new(secret: JwtSecret) -> Self { + Self { secret } + } +} + +impl Layer for AuthClientLayer { + type Service = AuthClientService; + + fn layer(&self, inner: S) -> Self::Service { + AuthClientService::new(self.secret.clone(), inner) + } +} + +/// Automatically authenticates every client request with the given `secret`. +#[derive(Debug, Clone)] +pub struct AuthClientService { + secret: JwtSecret, + inner: S, +} + +impl AuthClientService { + fn new(secret: JwtSecret, inner: S) -> Self { + Self { secret, inner } + } +} + +impl Service> for AuthClientService +where + S: Service>, +{ + type Response = S::Response; + type Error = S::Error; + type Future = S::Future; + + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.inner.poll_ready(cx) + } + + fn call(&mut self, mut request: hyper::Request) -> Self::Future { + request.headers_mut().insert(AUTHORIZATION, secret_to_bearer_header(&self.secret)); + self.inner.call(request) + } +} + +/// Helper function to convert a secret into a Bearer auth header value with claims according to +/// . +/// The token is valid for 60 seconds. +pub fn secret_to_bearer_header(secret: &JwtSecret) -> HeaderValue { + format!( + "Bearer {}", + secret + .encode(&Claims { + iat: (SystemTime::now().duration_since(UNIX_EPOCH).unwrap() + + Duration::from_secs(60)) + .as_secs(), + exp: None, + }) + .unwrap() + ) + .parse() + .unwrap() +} diff --git a/crates/rpc/rpc/src/layers/auth_layer.rs b/crates/rpc/rpc/src/layers/auth_layer.rs index 0137fcd0c..ed22d607c 100644 --- a/crates/rpc/rpc/src/layers/auth_layer.rs +++ b/crates/rpc/rpc/src/layers/auth_layer.rs @@ -44,11 +44,7 @@ pub struct AuthLayer { validator: V, } -impl AuthLayer -where - V: AuthValidator, - V::ResponseBody: Body, -{ +impl AuthLayer { /// Creates an instance of [`AuthLayer`]. /// `validator` is a generic trait able to validate requests (see [`AuthValidator`]). pub fn new(validator: V) -> Self { diff --git a/crates/rpc/rpc/src/layers/mod.rs b/crates/rpc/rpc/src/layers/mod.rs index ff021a372..83a336e5f 100644 --- a/crates/rpc/rpc/src/layers/mod.rs +++ b/crates/rpc/rpc/src/layers/mod.rs @@ -1,8 +1,11 @@ use http::{HeaderMap, Response}; +mod auth_client_layer; mod auth_layer; mod jwt_secret; mod jwt_validator; + +pub use auth_client_layer::{secret_to_bearer_header, AuthClientLayer, AuthClientService}; pub use auth_layer::AuthLayer; pub use jwt_secret::{Claims, JwtError, JwtSecret}; pub use jwt_validator::JwtAuthValidator; diff --git a/crates/rpc/rpc/src/lib.rs b/crates/rpc/rpc/src/lib.rs index fe5e2a97d..d68f8a018 100644 --- a/crates/rpc/rpc/src/lib.rs +++ b/crates/rpc/rpc/src/lib.rs @@ -12,7 +12,7 @@ //! //! To avoid this, all blocking or CPU intensive handlers must be spawned to a separate task. See //! the [EthApi] handler implementations for examples. The rpc-api traits make no use of the -//! available jsonrpsee `blocking` attribute to give implementors more freedom because the +//! available jsonrpsee `blocking` attribute to give implementers more freedom because the //! `blocking` attribute and async handlers are mutually exclusive. However, as mentioned above, a //! lot of handlers make use of async functions, caching for example, but are also using blocking //! disk-io, hence these calls are spawned as futures to a blocking task manually. @@ -41,7 +41,10 @@ pub use admin::AdminApi; pub use debug::DebugApi; pub use engine::{EngineApi, EngineEthApi}; pub use eth::{EthApi, EthApiSpec, EthFilter, EthPubSub, EthSubscriptionIdProvider}; -pub use layers::{AuthLayer, AuthValidator, Claims, JwtAuthValidator, JwtError, JwtSecret}; +pub use layers::{ + secret_to_bearer_header, AuthClientLayer, AuthClientService, AuthLayer, AuthValidator, Claims, + JwtAuthValidator, JwtError, JwtSecret, +}; pub use net::NetApi; pub use otterscan::OtterscanApi; pub use reth::RethApi; diff --git a/crates/rpc/rpc/src/otterscan.rs b/crates/rpc/rpc/src/otterscan.rs index bdfbc1293..2f62e66a3 100644 --- a/crates/rpc/rpc/src/otterscan.rs +++ b/crates/rpc/rpc/src/otterscan.rs @@ -1,7 +1,6 @@ use alloy_primitives::Bytes; use async_trait::async_trait; use jsonrpsee::core::RpcResult; -use revm::inspectors::NoOpInspector; use revm_inspectors::transfer::{TransferInspector, TransferKind}; use revm_primitives::ExecutionResult; @@ -81,14 +80,10 @@ where async fn get_transaction_error(&self, tx_hash: TxHash) -> RpcResult> { let maybe_revert = self .eth - .spawn_trace_transaction_in_block_with_inspector( - tx_hash, - NoOpInspector, - |_tx_info, _inspector, res, _| match res.result { - ExecutionResult::Revert { output, .. } => Ok(Some(output)), - _ => Ok(None), - }, - ) + .spawn_replay_transaction(tx_hash, |_tx_info, res, _| match res.result { + ExecutionResult::Revert { output, .. } => Ok(Some(output)), + _ => Ok(None), + }) .await .map(Option::flatten)?; Ok(maybe_revert) diff --git a/crates/rpc/rpc/src/trace.rs b/crates/rpc/rpc/src/trace.rs index ade8291c3..710440914 100644 --- a/crates/rpc/rpc/src/trace.rs +++ b/crates/rpc/rpc/src/trace.rs @@ -11,10 +11,7 @@ use reth_primitives::{ revm::env::tx_env_with_recovered, BlockId, BlockNumberOrTag, Bytes, SealedHeader, B256, U256, }; use reth_provider::{BlockReader, ChainSpecProvider, EvmEnvProvider, StateProviderFactory}; -use reth_revm::{ - database::StateProviderDatabase, - tracing::{parity::populate_state_diff, TracingInspector, TracingInspectorConfig}, -}; +use reth_revm::database::StateProviderDatabase; use reth_rpc_api::TraceApiServer; use reth_rpc_types::{ state::StateOverride, @@ -31,7 +28,10 @@ use revm::{ db::{CacheDB, DatabaseCommit}, primitives::EnvWithHandlerCfg, }; -use revm_inspectors::opcode::OpcodeGasInspector; +use revm_inspectors::{ + opcode::OpcodeGasInspector, + tracing::{parity::populate_state_diff, TracingInspector, TracingInspectorConfig}, +}; use std::{collections::HashSet, sync::Arc}; use tokio::sync::{AcquireError, OwnedSemaphorePermit}; @@ -78,7 +78,7 @@ where { /// Executes the given call and returns a number of possible traces for it. pub async fn trace_call(&self, trace_request: TraceCallRequest) -> EthResult { - let at = trace_request.block_id.unwrap_or(BlockId::Number(BlockNumberOrTag::Latest)); + let at = trace_request.block_id.unwrap_or_default(); let config = TracingInspectorConfig::from_parity_config(&trace_request.trace_types); let overrides = EvmOverrides::new(trace_request.state_overrides, trace_request.block_overrides); @@ -86,7 +86,7 @@ where let this = self.clone(); self.eth_api() .spawn_with_call_at(trace_request.call, at, overrides, move |db, env| { - let (res, _, db) = this.eth_api().inspect_and_return_db(db, env, &mut inspector)?; + let (res, _) = this.eth_api().inspect(&mut *db, env, &mut inspector)?; let trace_res = inspector.into_parity_builder().into_trace_results_with_state( &res, &trace_request.trace_types, @@ -106,11 +106,7 @@ where ) -> EthResult { let tx = recover_raw_transaction(tx)?; - let (cfg, block, at) = self - .inner - .eth_api - .evm_env_at(block_id.unwrap_or(BlockId::Number(BlockNumberOrTag::Latest))) - .await?; + let (cfg, block, at) = self.inner.eth_api.evm_env_at(block_id.unwrap_or_default()).await?; let tx = tx_env_with_recovered(&tx.into_ecrecovered_transaction()); let env = EnvWithHandlerCfg::new_with_cfg_env(cfg, block, tx); diff --git a/crates/stages-api/Cargo.toml b/crates/stages-api/Cargo.toml index d991a47af..2101961fd 100644 --- a/crates/stages-api/Cargo.toml +++ b/crates/stages-api/Cargo.toml @@ -18,6 +18,8 @@ reth-db.workspace = true reth-interfaces.workspace = true reth-static-file.workspace = true reth-tokio-util.workspace = true +reth-consensus.workspace = true +reth-prune.workspace = true # metrics reth-metrics.workspace = true diff --git a/crates/stages-api/src/error.rs b/crates/stages-api/src/error.rs index 3b744e7cb..37fe2b3fd 100644 --- a/crates/stages-api/src/error.rs +++ b/crates/stages-api/src/error.rs @@ -1,5 +1,6 @@ +use reth_consensus::ConsensusError; use reth_interfaces::{ - consensus, db::DatabaseError as DbError, executor, p2p::error::DownloadError, RethError, + db::DatabaseError as DbError, executor, p2p::error::DownloadError, RethError, }; use reth_primitives::{BlockNumber, SealedHeader, StaticFileSegment, TxNumber}; use reth_provider::ProviderError; @@ -13,12 +14,22 @@ use tokio::sync::mpsc::error::SendError; pub enum BlockErrorKind { /// The block encountered a validation error. #[error("validation error: {0}")] - Validation(#[from] consensus::ConsensusError), + Validation(#[from] ConsensusError), /// The block encountered an execution error. #[error("execution error: {0}")] Execution(#[from] executor::BlockExecutionError), } +impl BlockErrorKind { + /// Returns `true` if the error is a state root error. + pub fn is_state_root_error(&self) -> bool { + match self { + BlockErrorKind::Validation(err) => err.is_state_root_error(), + BlockErrorKind::Execution(err) => err.is_state_root_error(), + } + } +} + /// A stage execution error. #[derive(Error, Debug)] pub enum StageError { @@ -49,7 +60,7 @@ pub enum StageError { header: Box, /// The error that occurred when attempting to attach the header. #[source] - error: Box, + error: Box, }, /// The headers stage is missing sync gap. #[error("missing sync gap")] diff --git a/crates/stages-api/src/pipeline/mod.rs b/crates/stages-api/src/pipeline/mod.rs index bb1512958..5aceb515b 100644 --- a/crates/stages-api/src/pipeline/mod.rs +++ b/crates/stages-api/src/pipeline/mod.rs @@ -7,13 +7,15 @@ use reth_db::database::Database; use reth_interfaces::RethResult; use reth_primitives::{ constants::BEACON_CONSENSUS_REORG_UNWIND_DEPTH, - stage::{StageCheckpoint, StageId}, + stage::{PipelineTarget, StageCheckpoint, StageId}, static_file::HighestStaticFiles, BlockNumber, B256, }; use reth_provider::{ providers::StaticFileWriter, ProviderFactory, StageCheckpointReader, StageCheckpointWriter, + StaticFileProviderFactory, }; +use reth_prune::PrunerBuilder; use reth_static_file::StaticFileProducer; use reth_tokio_util::EventListeners; use std::pin::Pin; @@ -129,17 +131,31 @@ where /// Consume the pipeline and run it until it reaches the provided tip, if set. Return the /// pipeline and its result as a future. #[track_caller] - pub fn run_as_fut(mut self, tip: Option) -> PipelineFut { + pub fn run_as_fut(mut self, target: Option) -> PipelineFut { // TODO: fix this in a follow up PR. ideally, consensus engine would be responsible for // updating metrics. let _ = self.register_metrics(); // ignore error Box::pin(async move { // NOTE: the tip should only be None if we are in continuous sync mode. - if let Some(tip) = tip { - self.set_tip(tip); + if let Some(target) = target { + match target { + PipelineTarget::Sync(tip) => self.set_tip(tip), + PipelineTarget::Unwind(target) => { + if let Err(err) = self.move_to_static_files() { + return (self, Err(err.into())) + } + if let Err(err) = self.unwind(target, None) { + return (self, Err(err)) + } + self.progress.update(target); + + return (self, Ok(ControlFlow::Continue { block_number: target })) + } + } } + let result = self.run_loop().await; - trace!(target: "sync::pipeline", ?tip, ?result, "Pipeline finished"); + trace!(target: "sync::pipeline", ?target, ?result, "Pipeline finished"); (self, result) }) } @@ -184,7 +200,7 @@ where /// pipeline (for example the `Finish` stage). Or [ControlFlow::Unwind] of the stage that caused /// the unwind. pub async fn run_loop(&mut self) -> Result { - self.produce_static_files()?; + self.move_to_static_files()?; let mut previous_stage = None; for stage_index in 0..self.stages.len() { @@ -221,9 +237,10 @@ where Ok(self.progress.next_ctrl()) } - /// Run [static file producer](StaticFileProducer) and move all data from the database to static - /// files for corresponding [segments](reth_primitives::static_file::StaticFileSegment), - /// according to their [stage checkpoints](StageCheckpoint): + /// Run [static file producer](StaticFileProducer) and [pruner](reth_prune::Pruner) to **move** + /// all data from the database to static files for corresponding + /// [segments](reth_primitives::static_file::StaticFileSegment), according to their [stage + /// checkpoints](StageCheckpoint): /// - [StaticFileSegment::Headers](reth_primitives::static_file::StaticFileSegment::Headers) -> /// [StageId::Headers] /// - [StaticFileSegment::Receipts](reth_primitives::static_file::StaticFileSegment::Receipts) @@ -233,22 +250,38 @@ where /// /// CAUTION: This method locks the static file producer Mutex, hence can block the thread if the /// lock is occupied. - pub fn produce_static_files(&mut self) -> RethResult<()> { + pub fn move_to_static_files(&self) -> RethResult<()> { let mut static_file_producer = self.static_file_producer.lock(); - let provider = self.provider_factory.provider()?; - let targets = static_file_producer.get_static_file_targets(HighestStaticFiles { - headers: provider - .get_stage_checkpoint(StageId::Headers)? - .map(|checkpoint| checkpoint.block_number), - receipts: provider - .get_stage_checkpoint(StageId::Execution)? - .map(|checkpoint| checkpoint.block_number), - transactions: provider - .get_stage_checkpoint(StageId::Bodies)? - .map(|checkpoint| checkpoint.block_number), - })?; - static_file_producer.run(targets)?; + // Copies data from database to static files + let lowest_static_file_height = { + let provider = self.provider_factory.provider()?; + let stages_checkpoints = [StageId::Headers, StageId::Execution, StageId::Bodies] + .into_iter() + .map(|stage| { + provider.get_stage_checkpoint(stage).map(|c| c.map(|c| c.block_number)) + }) + .collect::, _>>()?; + + let targets = static_file_producer.get_static_file_targets(HighestStaticFiles { + headers: stages_checkpoints[0], + receipts: stages_checkpoints[1], + transactions: stages_checkpoints[2], + })?; + static_file_producer.run(targets)?; + stages_checkpoints.into_iter().min().expect("exists") + }; + + // Deletes data which has been copied to static files. + if let Some(prune_tip) = lowest_static_file_height { + // Run the pruner so we don't potentially end up with higher height in the database vs + // static files during a pipeline unwind + let mut pruner = PrunerBuilder::new(Default::default()) + .prune_delete_limit(usize::MAX) + .build(self.provider_factory.clone()); + + pruner.run(prune_tip)?; + } Ok(()) } @@ -552,8 +585,8 @@ mod tests { use super::*; use crate::{test_utils::TestStage, UnwindOutput}; use assert_matches::assert_matches; + use reth_consensus::ConsensusError; use reth_interfaces::{ - consensus, provider::ProviderError, test_utils::{generators, generators::random_header}, }; @@ -922,9 +955,7 @@ mod tests { 5, Default::default(), )), - error: BlockErrorKind::Validation( - consensus::ConsensusError::BaseFeeMissing, - ), + error: BlockErrorKind::Validation(ConsensusError::BaseFeeMissing), })) .add_unwind(Ok(UnwindOutput { checkpoint: StageCheckpoint::new(0) })) .add_exec(Ok(ExecOutput { checkpoint: StageCheckpoint::new(10), done: true })), diff --git a/crates/stages/Cargo.toml b/crates/stages/Cargo.toml index df98d1dd7..ef91b2be2 100644 --- a/crates/stages/Cargo.toml +++ b/crates/stages/Cargo.toml @@ -23,6 +23,9 @@ reth-trie = { workspace = true, features = ["metrics"] } reth-etl.workspace = true reth-config.workspace = true reth-stages-api = { workspace = true, features = ["test-utils"] } +reth-consensus.workspace = true +reth-evm.workspace = true +reth-revm.workspace = true # async tokio = { workspace = true, features = ["sync"] } @@ -44,11 +47,13 @@ reth-primitives = { workspace = true, features = ["test-utils", "arbitrary"] } reth-db = { workspace = true, features = ["test-utils", "mdbx"] } reth-evm-ethereum.workspace = true reth-interfaces = { workspace = true, features = ["test-utils"] } +reth-consensus = { workspace = true, features = ["test-utils"] } reth-downloaders.workspace = true reth-revm.workspace = true reth-static-file.workspace = true reth-trie = { workspace = true, features = ["test-utils"] } reth-provider = { workspace = true, features = ["test-utils"] } +reth-network-types.workspace = true alloy-rlp.workspace = true itertools.workspace = true diff --git a/crates/stages/src/lib.rs b/crates/stages/src/lib.rs index f8e427763..2c6aaff25 100644 --- a/crates/stages/src/lib.rs +++ b/crates/stages/src/lib.rs @@ -15,19 +15,22 @@ //! # use std::sync::Arc; //! # use reth_downloaders::bodies::bodies::BodiesDownloaderBuilder; //! # use reth_downloaders::headers::reverse_headers::ReverseHeadersDownloaderBuilder; -//! # use reth_interfaces::consensus::Consensus; -//! # use reth_interfaces::test_utils::{TestBodiesClient, TestConsensus, TestHeadersClient}; -//! # use reth_revm::EvmProcessorFactory; -//! # use reth_primitives::{PeerId, MAINNET, B256, PruneModes}; +//! # use reth_interfaces::test_utils::{TestBodiesClient, TestHeadersClient}; +//! # use reth_evm_ethereum::execute::EthExecutorProvider; +//! # use reth_primitives::{MAINNET, B256, PruneModes}; +//! # use reth_network_types::PeerId; //! # use reth_stages::Pipeline; //! # use reth_stages::sets::DefaultStages; //! # use tokio::sync::watch; //! # use reth_evm_ethereum::EthEvmConfig; //! # use reth_provider::ProviderFactory; +//! # use reth_provider::StaticFileProviderFactory; //! # use reth_provider::HeaderSyncMode; //! # use reth_provider::test_utils::create_test_provider_factory; //! # use reth_static_file::StaticFileProducer; //! # use reth_config::config::EtlConfig; +//! # use reth_consensus::Consensus; +//! # use reth_consensus::test_utils::TestConsensus; //! # //! # let chain_spec = MAINNET.clone(); //! # let consensus: Arc = Arc::new(TestConsensus::default()); @@ -42,7 +45,7 @@ //! # provider_factory.clone() //! # ); //! # let (tip_tx, tip_rx) = watch::channel(B256::default()); -//! # let executor_factory = EvmProcessorFactory::new(chain_spec.clone(), EthEvmConfig::default()); +//! # let executor_provider = EthExecutorProvider::mainnet(); //! # let static_file_producer = StaticFileProducer::new( //! # provider_factory.clone(), //! # provider_factory.static_file_provider(), @@ -52,17 +55,15 @@ //! # let pipeline = //! Pipeline::builder() //! .with_tip_sender(tip_tx) -//! .add_stages( -//! DefaultStages::new( -//! provider_factory.clone(), -//! HeaderSyncMode::Tip(tip_rx), -//! consensus, -//! headers_downloader, -//! bodies_downloader, -//! executor_factory, -//! EtlConfig::default(), -//! ) -//! ) +//! .add_stages(DefaultStages::new( +//! provider_factory.clone(), +//! HeaderSyncMode::Tip(tip_rx), +//! consensus, +//! headers_downloader, +//! bodies_downloader, +//! executor_provider, +//! EtlConfig::default(), +//! )) //! .build(provider_factory, static_file_producer); //! ``` //! diff --git a/crates/stages/src/sets.rs b/crates/stages/src/sets.rs index 833f2af8e..7ec85170f 100644 --- a/crates/stages/src/sets.rs +++ b/crates/stages/src/sets.rs @@ -12,43 +12,29 @@ //! ```no_run //! # use reth_stages::Pipeline; //! # use reth_stages::sets::{OfflineStages}; -//! # use reth_revm::EvmProcessorFactory; //! # use reth_primitives::{PruneModes, MAINNET}; //! # use reth_evm_ethereum::EthEvmConfig; +//! # use reth_provider::StaticFileProviderFactory; //! # use reth_provider::test_utils::create_test_provider_factory; //! # use reth_static_file::StaticFileProducer; //! # use reth_config::config::EtlConfig; +//! # use reth_evm::execute::BlockExecutorProvider; //! -//! # let executor_factory = EvmProcessorFactory::new(MAINNET.clone(), EthEvmConfig::default()); -//! # let provider_factory = create_test_provider_factory(); -//! # let static_file_producer = StaticFileProducer::new( +//! # fn create(exec: impl BlockExecutorProvider) { +//! +//! let provider_factory = create_test_provider_factory(); +//! let static_file_producer = StaticFileProducer::new( //! provider_factory.clone(), //! provider_factory.static_file_provider(), //! PruneModes::default(), //! ); //! // Build a pipeline with all offline stages. -//! # let pipeline = Pipeline::builder() -//! .add_stages(OfflineStages::new(executor_factory, EtlConfig::default())) +//! let pipeline = Pipeline::builder() +//! .add_stages(OfflineStages::new(exec, EtlConfig::default())) //! .build(provider_factory, static_file_producer); -//! ``` //! -//! ```ignore -//! # use reth_stages::Pipeline; -//! # use reth_stages::{StageSet, sets::OfflineStages}; -//! # use reth_revm::EvmProcessorFactory; -//! # use reth_node_ethereum::EthEvmConfig; -//! # use reth_primitives::MAINNET; -//! # use reth_config::config::EtlConfig; -//! -//! // Build a pipeline with all offline stages and a custom stage at the end. -//! # let executor_factory = EvmProcessorFactory::new(MAINNET.clone(), EthEvmConfig::default()); -//! Pipeline::builder() -//! .add_stages( -//! OfflineStages::new(executor_factory, EtlConfig::default()).builder().add_stage(MyCustomStage) -//! ) -//! .build(); +//! # } //! ``` - use crate::{ stages::{ AccountHashingStage, BodyStage, ExecutionStage, FinishStage, HeaderStage, @@ -58,12 +44,13 @@ use crate::{ StageSet, StageSetBuilder, }; use reth_config::config::EtlConfig; +use reth_consensus::Consensus; use reth_db::database::Database; -use reth_interfaces::{ - consensus::Consensus, - p2p::{bodies::downloader::BodyDownloader, headers::downloader::HeaderDownloader}, +use reth_evm::execute::BlockExecutorProvider; +use reth_interfaces::p2p::{ + bodies::downloader::BodyDownloader, headers::downloader::HeaderDownloader, }; -use reth_provider::{ExecutorFactory, HeaderSyncGapProvider, HeaderSyncMode}; +use reth_provider::{HeaderSyncGapProvider, HeaderSyncMode}; use std::sync::Arc; /// A set containing all stages to run a fully syncing instance of reth. @@ -97,7 +84,7 @@ pub struct DefaultStages { etl_config: EtlConfig, } -impl DefaultStages { +impl DefaultStages { /// Create a new set of default stages with default values. pub fn new( provider: Provider, @@ -105,11 +92,11 @@ impl DefaultStages { consensus: Arc, header_downloader: H, body_downloader: B, - executor_factory: EF, + executor_factory: E, etl_config: EtlConfig, ) -> Self where - EF: ExecutorFactory, + E: BlockExecutorProvider, { Self { online: OnlineStages::new( @@ -126,14 +113,14 @@ impl DefaultStages { } } -impl DefaultStages +impl DefaultStages where - EF: ExecutorFactory, + E: BlockExecutorProvider, { /// Appends the default offline stages and default finish stage to the given builder. pub fn add_offline_stages( default_offline: StageSetBuilder, - executor_factory: EF, + executor_factory: E, etl_config: EtlConfig, ) -> StageSetBuilder { StageSetBuilder::default() @@ -143,12 +130,12 @@ where } } -impl StageSet for DefaultStages +impl StageSet for DefaultStages where Provider: HeaderSyncGapProvider + 'static, H: HeaderDownloader + 'static, B: BodyDownloader + 'static, - EF: ExecutorFactory, + E: BlockExecutorProvider, DB: Database + 'static, { fn builder(self) -> StageSetBuilder { @@ -268,7 +255,11 @@ impl OfflineStages { } } -impl StageSet for OfflineStages { +impl StageSet for OfflineStages +where + E: BlockExecutorProvider, + DB: Database, +{ fn builder(self) -> StageSetBuilder { ExecutionStages::new(self.executor_factory) .builder() @@ -280,23 +271,27 @@ impl StageSet for OfflineStages { /// A set containing all stages that are required to execute pre-existing block data. #[derive(Debug)] #[non_exhaustive] -pub struct ExecutionStages { +pub struct ExecutionStages { /// Executor factory that will create executors. - executor_factory: EF, + executor_factory: E, } -impl ExecutionStages { +impl ExecutionStages { /// Create a new set of execution stages with default values. - pub fn new(executor_factory: EF) -> Self { + pub fn new(executor_factory: E) -> Self { Self { executor_factory } } } -impl StageSet for ExecutionStages { +impl StageSet for ExecutionStages +where + DB: Database, + E: BlockExecutorProvider, +{ fn builder(self) -> StageSetBuilder { StageSetBuilder::default() .add_stage(SenderRecoveryStage::default()) - .add_stage(ExecutionStage::new_with_factory(self.executor_factory)) + .add_stage(ExecutionStage::new_with_executor(self.executor_factory)) } } diff --git a/crates/stages/src/stages/bodies.rs b/crates/stages/src/stages/bodies.rs index 6dfe7a6a8..bce56880a 100644 --- a/crates/stages/src/stages/bodies.rs +++ b/crates/stages/src/stages/bodies.rs @@ -146,8 +146,13 @@ impl Stage for BodyStage { // If static files are ahead, then we didn't reach the database commit in a previous // stage run. So, our only solution is to unwind the static files and proceed from the // database expected height. - Ordering::Greater => static_file_producer - .prune_transactions(next_static_file_tx_num - next_tx_num, from_block - 1)?, + Ordering::Greater => { + static_file_producer + .prune_transactions(next_static_file_tx_num - next_tx_num, from_block - 1)?; + // Since this is a database <-> static file inconsistency, we commit the change + // straight away. + static_file_producer.commit()?; + } // If static files are behind, then there was some corruption or loss of files. This // error will trigger an unwind, that will bring the database to the same height as the // static files. @@ -381,6 +386,7 @@ mod tests { use assert_matches::assert_matches; use reth_primitives::stage::StageUnitCheckpoint; + use reth_provider::StaticFileProviderFactory; use test_utils::*; use crate::test_utils::{ @@ -575,6 +581,7 @@ mod tests { let mut static_file_producer = static_file_provider.latest_writer(StaticFileSegment::Transactions).unwrap(); static_file_producer.prune_transactions(1, checkpoint.block_number).unwrap(); + static_file_producer.commit().unwrap(); } // Unwind all of it let unwind_to = 1; @@ -632,7 +639,8 @@ mod tests { StaticFileSegment, TxNumber, B256, }; use reth_provider::{ - providers::StaticFileWriter, HeaderProvider, ProviderFactory, TransactionsProvider, + providers::StaticFileWriter, HeaderProvider, ProviderFactory, + StaticFileProviderFactory, TransactionsProvider, }; use reth_stages_api::{ExecInput, ExecOutput, UnwindInput}; diff --git a/crates/stages/src/stages/execution.rs b/crates/stages/src/stages/execution.rs index 7f22ecaef..9d8cf6ac6 100644 --- a/crates/stages/src/stages/execution.rs +++ b/crates/stages/src/stages/execution.rs @@ -3,7 +3,8 @@ use num_traits::Zero; use reth_db::{ cursor::DbCursorRO, database::Database, static_file::HeaderMask, tables, transaction::DbTx, }; -use reth_exex::ExExManagerHandle; +use reth_evm::execute::{BatchBlockExecutionOutput, BatchExecutor, BlockExecutorProvider}; +use reth_exex::{ExExManagerHandle, ExExNotification}; use reth_primitives::{ stage::{ CheckpointBlockRange, EntitiesCheckpoint, ExecutionCheckpoint, StageCheckpoint, StageId, @@ -12,10 +13,11 @@ use reth_primitives::{ }; use reth_provider::{ providers::{StaticFileProvider, StaticFileProviderRWRefMut, StaticFileWriter}, - BlockReader, CanonStateNotification, Chain, DatabaseProviderRW, ExecutorFactory, - HeaderProvider, LatestStateProviderRef, OriginalValuesKnown, ProviderError, StatsReader, + BlockReader, BundleStateWithReceipts, Chain, DatabaseProviderRW, HeaderProvider, + LatestStateProviderRef, OriginalValuesKnown, ProviderError, StateWriter, StatsReader, TransactionVariant, }; +use reth_revm::database::StateProviderDatabase; use reth_stages_api::{ BlockErrorKind, ExecInput, ExecOutput, MetricEvent, MetricEventsSender, Stage, StageError, UnwindInput, UnwindOutput, @@ -60,10 +62,10 @@ use tracing::*; /// to [tables::PlainStorageState] // false positive, we cannot derive it if !DB: Debug. #[allow(missing_debug_implementations)] -pub struct ExecutionStage { +pub struct ExecutionStage { metrics_tx: Option, - /// The stage's internal executor - executor_factory: EF, + /// The stage's internal block executor + executor_provider: E, /// The commit thresholds of the execution stage. thresholds: ExecutionStageThresholds, /// The highest threshold (in number of blocks) for switching between incremental @@ -77,10 +79,10 @@ pub struct ExecutionStage { exex_manager_handle: ExExManagerHandle, } -impl ExecutionStage { +impl ExecutionStage { /// Create new execution stage with specified config. pub fn new( - executor_factory: EF, + executor_provider: E, thresholds: ExecutionStageThresholds, external_clean_threshold: u64, prune_modes: PruneModes, @@ -89,19 +91,19 @@ impl ExecutionStage { Self { metrics_tx: None, external_clean_threshold, - executor_factory, + executor_provider, thresholds, prune_modes, exex_manager_handle, } } - /// Create an execution stage with the provided executor factory. + /// Create an execution stage with the provided executor. /// /// The commit threshold will be set to 10_000. - pub fn new_with_factory(executor_factory: EF) -> Self { + pub fn new_with_executor(executor_provider: E) -> Self { Self::new( - executor_factory, + executor_provider, ExecutionStageThresholds::default(), MERKLE_STAGE_DEFAULT_CLEAN_THRESHOLD, PruneModes::none(), @@ -145,7 +147,10 @@ impl ExecutionStage { } } -impl ExecutionStage { +impl ExecutionStage +where + E: BlockExecutorProvider, +{ /// Execute the stage. pub fn execute_inner( &mut self, @@ -165,17 +170,20 @@ impl ExecutionStage { let static_file_producer = if self.prune_modes.receipts.is_none() && self.prune_modes.receipts_log_filter.is_empty() { - Some(prepare_static_file_producer(provider, start_block)?) + let mut producer = prepare_static_file_producer(provider, start_block)?; + // Since there might be a database <-> static file inconsistency (read + // `prepare_static_file_producer` for context), we commit the change straight away. + producer.commit()?; + Some(producer) } else { None }; - // Build executor - let mut executor = self.executor_factory.with_state(LatestStateProviderRef::new( + let db = StateProviderDatabase(LatestStateProviderRef::new( provider.tx_ref(), provider.static_file_provider().clone(), )); - executor.set_prune_modes(prune_modes); + let mut executor = self.executor_provider.batch_executor(db, prune_modes); executor.set_tip(max_block); // Progress tracking @@ -214,7 +222,8 @@ impl ExecutionStage { // Execute the block let execute_start = Instant::now(); - executor.execute_and_verify_receipt(&block, td).map_err(|error| StageError::Block { + + executor.execute_one((&block, td).into()).map_err(|error| StageError::Block { block: Box::new(block.header.clone().seal_slow()), error: BlockErrorKind::Execution(error), })?; @@ -246,10 +255,11 @@ impl ExecutionStage { } } let time = Instant::now(); - let state = executor.take_output_state(); + let BatchBlockExecutionOutput { bundle, receipts, first_block } = executor.finalize(); + let state = BundleStateWithReceipts::new(bundle, receipts, first_block); let write_preparation_duration = time.elapsed(); - // Check if we should send a [`CanonStateNotification`] to execution extensions. + // Check if we should send a [`ExExNotification`] to execution extensions. // // Note: Since we only write to `blocks` if there are any ExEx's we don't need to perform // the `has_exexs` check here as well @@ -265,7 +275,7 @@ impl ExecutionStage { // NOTE: We can ignore the error here, since an error means that the channel is closed, // which means the manager has died, which then in turn means the node is shutting down. - let _ = self.exex_manager_handle.send(CanonStateNotification::Commit { new: chain }); + let _ = self.exex_manager_handle.send(ExExNotification::ChainCommitted { new: chain }); } let time = Instant::now(); @@ -384,7 +394,11 @@ fn calculate_gas_used_from_headers( Ok(gas_total) } -impl Stage for ExecutionStage { +impl Stage for ExecutionStage +where + DB: Database, + E: BlockExecutorProvider, +{ /// Return the id of the stage fn id(&self) -> StageId { StageId::Execution @@ -428,18 +442,17 @@ impl Stage for ExecutionStage { // This also updates `PlainStorageState` and `PlainAccountState`. let bundle_state_with_receipts = provider.unwind_or_peek_state::(range.clone())?; - // Construct a `CanonStateNotification` if we have ExEx's installed. + // Construct a `ExExNotification` if we have ExEx's installed. if self.exex_manager_handle.has_exexs() { - // Get the blocks for the unwound range. This is needed for `CanonStateNotification`. + // Get the blocks for the unwound range. This is needed for `ExExNotification`. let blocks = provider.get_take_block_range::(range.clone())?; let chain = Chain::new(blocks, bundle_state_with_receipts, None); // NOTE: We can ignore the error here, since an error means that the channel is closed, // which means the manager has died, which then in turn means the node is shutting down. - let _ = self.exex_manager_handle.send(CanonStateNotification::Reorg { - old: Arc::new(chain), - new: Arc::new(Chain::default()), - }); + let _ = self + .exex_manager_handle + .send(ExExNotification::ChainReverted { old: Arc::new(chain) }); } // Unwind all receipts for transactions in the block range @@ -611,24 +624,25 @@ mod tests { use alloy_rlp::Decodable; use assert_matches::assert_matches; use reth_db::{models::AccountBeforeTx, transaction::DbTxMut}; - use reth_evm_ethereum::EthEvmConfig; + use reth_evm_ethereum::execute::EthExecutorProvider; use reth_interfaces::executor::BlockValidationError; use reth_primitives::{ address, hex_literal::hex, keccak256, stage::StageUnitCheckpoint, Account, Address, Bytecode, ChainSpecBuilder, PruneMode, ReceiptsLogPruneConfig, SealedBlock, StorageEntry, B256, U256, }; - use reth_provider::{test_utils::create_test_provider_factory, AccountReader, ReceiptProvider}; - use reth_revm::EvmProcessorFactory; + use reth_provider::{ + test_utils::create_test_provider_factory, AccountReader, ReceiptProvider, + StaticFileProviderFactory, + }; use std::collections::BTreeMap; - fn stage() -> ExecutionStage> { - let executor_factory = EvmProcessorFactory::new( - Arc::new(ChainSpecBuilder::mainnet().berlin_activated().build()), - EthEvmConfig::default(), - ); + fn stage() -> ExecutionStage { + let executor_provider = EthExecutorProvider::ethereum(Arc::new( + ChainSpecBuilder::mainnet().berlin_activated().build(), + )); ExecutionStage::new( - executor_factory, + executor_provider, ExecutionStageThresholds { max_blocks: Some(100), max_changes: None, @@ -863,7 +877,7 @@ mod tests { mode.receipts_log_filter = random_filter.clone(); } - let mut execution_stage: ExecutionStage> = stage(); + let mut execution_stage = stage(); execution_stage.prune_modes = mode.clone().unwrap_or_default(); let output = execution_stage.execute(&provider, input).unwrap(); diff --git a/crates/stages/src/stages/headers.rs b/crates/stages/src/stages/headers.rs index a862d4afc..f0a8c1811 100644 --- a/crates/stages/src/stages/headers.rs +++ b/crates/stages/src/stages/headers.rs @@ -1,6 +1,7 @@ use futures_util::StreamExt; use reth_codecs::Compact; use reth_config::config::EtlConfig; +use reth_consensus::Consensus; use reth_db::{ cursor::{DbCursorRO, DbCursorRW}, database::Database, @@ -10,7 +11,6 @@ use reth_db::{ }; use reth_etl::Collector; use reth_interfaces::{ - consensus::Consensus, p2p::headers::{downloader::HeaderDownloader, error::HeadersDownloaderError}, provider::ProviderError, }; @@ -321,28 +321,48 @@ where ) -> Result { self.sync_gap.take(); + // First unwind the db tables, until the unwind_to block number. use the walker to unwind + // HeaderNumbers based on the index in CanonicalHeaders + provider.unwind_table_by_walker::( + input.unwind_to, + )?; + provider.unwind_table_by_num::(input.unwind_to)?; + provider.unwind_table_by_num::(input.unwind_to)?; + let unfinalized_headers_unwound = + provider.unwind_table_by_num::(input.unwind_to)?; + + // determine how many headers to unwind from the static files based on the highest block and + // the unwind_to block let static_file_provider = provider.static_file_provider(); let highest_block = static_file_provider .get_highest_static_file_block(StaticFileSegment::Headers) .unwrap_or_default(); - let unwound_headers = highest_block - input.unwind_to; - - for block in (input.unwind_to + 1)..=highest_block { - let header_hash = static_file_provider - .block_hash(block)? - .ok_or(ProviderError::HeaderNotFound(block.into()))?; - - provider.tx_ref().delete::(header_hash, None)?; + let static_file_headers_to_unwind = highest_block - input.unwind_to; + for block_number in (input.unwind_to + 1)..=highest_block { + let hash = static_file_provider.block_hash(block_number)?; + // we have to delete from HeaderNumbers here as well as in the above unwind, since that + // mapping contains entries for both headers in the db and headers in static files + // + // so if we are unwinding past the lowest block in the db, we have to iterate through + // the HeaderNumbers entries that we'll delete in static files below + if let Some(header_hash) = hash { + provider.tx_ref().delete::(header_hash, None)?; + } } + // Now unwind the static files until the unwind_to block number let mut writer = static_file_provider.latest_writer(StaticFileSegment::Headers)?; - writer.prune_headers(unwound_headers)?; + writer.prune_headers(static_file_headers_to_unwind)?; + // Set the stage checkpoin entities processed based on how much we unwound - we add the + // headers unwound from static files and db let stage_checkpoint = input.checkpoint.headers_stage_checkpoint().map(|stage_checkpoint| HeadersCheckpoint { block_range: stage_checkpoint.block_range, progress: EntitiesCheckpoint { - processed: stage_checkpoint.progress.processed.saturating_sub(unwound_headers), + processed: stage_checkpoint.progress.processed.saturating_sub( + static_file_headers_to_unwind + unfinalized_headers_unwound as u64, + ), total: stage_checkpoint.progress.total, }, }); @@ -363,22 +383,25 @@ mod tests { stage_test_suite, ExecuteStageTestRunner, StageTestRunner, UnwindStageTestRunner, }; use assert_matches::assert_matches; - use reth_interfaces::test_utils::generators::random_header; - use reth_primitives::{stage::StageUnitCheckpoint, B256}; - use reth_provider::ProviderFactory; + use reth_interfaces::test_utils::generators::{self, random_header, random_header_range}; + use reth_primitives::{ + stage::StageUnitCheckpoint, BlockBody, SealedBlock, SealedBlockWithSenders, B256, + }; + use reth_provider::{ + BlockWriter, BundleStateWithReceipts, ProviderFactory, StaticFileProviderFactory, + }; + use reth_trie::{updates::TrieUpdates, HashedPostState}; use test_runner::HeadersTestRunner; mod test_runner { use super::*; use crate::test_utils::{TestRunnerError, TestStageDB}; + use reth_consensus::test_utils::TestConsensus; use reth_db::{test_utils::TempDatabase, DatabaseEnv}; use reth_downloaders::headers::reverse_headers::{ ReverseHeadersDownloader, ReverseHeadersDownloaderBuilder, }; - use reth_interfaces::test_utils::{ - generators, generators::random_header_range, TestConsensus, TestHeaderDownloader, - TestHeadersClient, - }; + use reth_interfaces::test_utils::{TestHeaderDownloader, TestHeadersClient}; use reth_provider::BlockNumReader; use tokio::sync::watch; @@ -551,6 +574,91 @@ mod tests { stage_test_suite!(HeadersTestRunner, headers); + /// Execute the stage with linear downloader, unwinds, and ensures that the database tables + /// along with the static files are cleaned up. + #[tokio::test] + async fn execute_with_linear_downloader_unwind() { + let mut runner = HeadersTestRunner::with_linear_downloader(); + let (checkpoint, previous_stage) = (1000, 1200); + let input = ExecInput { + target: Some(previous_stage), + checkpoint: Some(StageCheckpoint::new(checkpoint)), + }; + let headers = runner.seed_execution(input).expect("failed to seed execution"); + let rx = runner.execute(input); + + runner.client.extend(headers.iter().rev().map(|h| h.clone().unseal())).await; + + // skip `after_execution` hook for linear downloader + let tip = headers.last().unwrap(); + runner.send_tip(tip.hash()); + + let result = rx.await.unwrap(); + runner.db().factory.static_file_provider().commit().unwrap(); + assert_matches!(result, Ok(ExecOutput { checkpoint: StageCheckpoint { + block_number, + stage_checkpoint: Some(StageUnitCheckpoint::Headers(HeadersCheckpoint { + block_range: CheckpointBlockRange { + from, + to + }, + progress: EntitiesCheckpoint { + processed, + total, + } + })) + }, done: true }) if block_number == tip.number && + from == checkpoint && to == previous_stage && + // -1 because we don't need to download the local head + processed == checkpoint + headers.len() as u64 - 1 && total == tip.number + ); + assert!(runner.validate_execution(input, result.ok()).is_ok(), "validation failed"); + assert!(runner.stage().hash_collector.is_empty()); + assert!(runner.stage().header_collector.is_empty()); + + // let's insert some blocks using append_blocks_with_state + let sealed_headers = + random_header_range(&mut generators::rng(), tip.number..tip.number + 10, tip.hash()); + + // make them sealed blocks with senders by converting them to empty blocks + let sealed_blocks = sealed_headers + .iter() + .map(|header| { + SealedBlockWithSenders::new( + SealedBlock::new(header.clone(), BlockBody::default()), + vec![], + ) + .unwrap() + }) + .collect(); + + // append the blocks + let provider = runner.db().factory.provider_rw().unwrap(); + provider + .append_blocks_with_state( + sealed_blocks, + BundleStateWithReceipts::default(), + HashedPostState::default(), + TrieUpdates::default(), + None, + ) + .unwrap(); + provider.commit().unwrap(); + + // now we can unwind 10 blocks + let unwind_input = UnwindInput { + checkpoint: StageCheckpoint::new(tip.number + 10), + unwind_to: tip.number, + bad_block: None, + }; + + let unwind_output = runner.unwind(unwind_input).await.unwrap(); + assert_eq!(unwind_output.checkpoint.block_number, tip.number); + + // validate the unwind, ensure that the tables are cleaned up + assert!(runner.validate_unwind(unwind_input).is_ok()); + } + /// Execute the stage with linear downloader #[tokio::test] async fn execute_with_linear_downloader() { diff --git a/crates/stages/src/stages/merkle.rs b/crates/stages/src/stages/merkle.rs index 9b4eec87f..cdf33b40f 100644 --- a/crates/stages/src/stages/merkle.rs +++ b/crates/stages/src/stages/merkle.rs @@ -1,10 +1,10 @@ use reth_codecs::Compact; +use reth_consensus::ConsensusError; use reth_db::{ database::Database, tables, transaction::{DbTx, DbTxMut}, }; -use reth_interfaces::consensus; use reth_primitives::{ stage::{EntitiesCheckpoint, MerkleCheckpoint, StageCheckpoint, StageId}, trie::StoredSubNode, @@ -21,6 +21,24 @@ use reth_trie::{IntermediateStateRootState, StateRoot, StateRootProgress}; use std::fmt::Debug; use tracing::*; +// TODO: automate the process outlined below so the user can just send in a debugging package +/// The error message that we include in invalid state root errors to tell users what information +/// they should include in a bug report, since true state root errors can be impossible to debug +/// with just basic logs. +pub const INVALID_STATE_ROOT_ERROR_MESSAGE: &str = r#" +Invalid state root error on new payload! +This is an error that likely requires a report to the reth team with additional information. +Please include the following information in your report: + * This error message + * The state root of the block that was rejected + * The output of `reth db stats --checksum` from the database that was being used. This will take a long time to run! + * 50-100 lines of logs before and after the first occurrence of this log message. Please search your log output for the first observed occurrence of MAGIC_STATE_ROOT. + * The debug logs from __the same time period__. To find the default location for these logs, run: + `reth --help | grep -A 4 'log.file.directory'` + +Once you have this information, please submit a github issue at https://github.com/paradigmxyz/reth/issues/new +"#; + /// The default threshold (in number of blocks) for switching from incremental trie building /// of changes to whole rebuild. pub const MERKLE_STAGE_DEFAULT_CLEAN_THRESHOLD: u64 = 5_000; @@ -99,7 +117,7 @@ impl MerkleStage { /// Saves the hashing progress pub fn save_execution_checkpoint( - &mut self, + &self, provider: &DatabaseProviderRW, checkpoint: Option, ) -> Result<(), StageError> { @@ -196,7 +214,10 @@ impl Stage for MerkleStage { let progress = StateRoot::from_tx(tx) .with_intermediate_state(checkpoint.map(IntermediateStateRootState::from)) .root_with_progress() - .map_err(|e| StageError::Fatal(Box::new(e)))?; + .map_err(|e| { + error!(target: "sync::stages::merkle", %e, ?current_block_number, ?to_block, "State root with progress failed! {INVALID_STATE_ROOT_ERROR_MESSAGE}"); + StageError::Fatal(Box::new(e)) + })?; match progress { StateRootProgress::Progress(state, hashed_entries_walked, updates) => { updates.flush(tx)?; @@ -230,7 +251,10 @@ impl Stage for MerkleStage { debug!(target: "sync::stages::merkle::exec", current = ?current_block_number, target = ?to_block, "Updating trie"); let (root, updates) = StateRoot::incremental_root_with_updates(provider.tx_ref(), range) - .map_err(|e| StageError::Fatal(Box::new(e)))?; + .map_err(|e| { + error!(target: "sync::stages::merkle", %e, ?current_block_number, ?to_block, "Incremental state root failed! {INVALID_STATE_ROOT_ERROR_MESSAGE}"); + StageError::Fatal(Box::new(e)) + })?; updates.flush(provider.tx_ref())?; let total_hashed_entries = (provider.count_entries::()? + @@ -325,9 +349,9 @@ fn validate_state_root( if got == expected.state_root { Ok(()) } else { - warn!(target: "sync::stages::merkle", ?target_block, ?got, ?expected, "Failed to verify block state root"); + error!(target: "sync::stages::merkle", ?target_block, ?got, ?expected, "Failed to verify block state root! {INVALID_STATE_ROOT_ERROR_MESSAGE}"); Err(StageError::Block { - error: BlockErrorKind::Validation(consensus::ConsensusError::BodyStateRootDiff( + error: BlockErrorKind::Validation(ConsensusError::BodyStateRootDiff( GotExpected { got, expected: expected.state_root }.into(), )), block: Box::new(expected), @@ -353,7 +377,7 @@ mod tests { use reth_primitives::{ keccak256, stage::StageUnitCheckpoint, SealedBlock, StaticFileSegment, StorageEntry, U256, }; - use reth_provider::providers::StaticFileWriter; + use reth_provider::{providers::StaticFileWriter, StaticFileProviderFactory}; use reth_trie::test_utils::{state_root, state_root_prehashed}; use std::collections::BTreeMap; @@ -558,6 +582,7 @@ mod tests { let hash = last_header.hash_slow(); writer.prune_headers(1).unwrap(); + writer.commit().unwrap(); writer.append_header(last_header, U256::ZERO, hash).unwrap(); writer.commit().unwrap(); diff --git a/crates/stages/src/stages/mod.rs b/crates/stages/src/stages/mod.rs index a40da1c49..7bb88ff96 100644 --- a/crates/stages/src/stages/mod.rs +++ b/crates/stages/src/stages/mod.rs @@ -50,7 +50,7 @@ mod tests { transaction::{DbTx, DbTxMut}, AccountsHistory, DatabaseEnv, }; - use reth_evm_ethereum::EthEvmConfig; + use reth_evm_ethereum::execute::EthExecutorProvider; use reth_exex::ExExManagerHandle; use reth_interfaces::test_utils::generators::{self, random_block}; use reth_primitives::{ @@ -61,7 +61,6 @@ mod tests { providers::StaticFileWriter, AccountExtReader, ProviderFactory, ReceiptProvider, StorageReader, }; - use reth_revm::EvmProcessorFactory; use reth_stages_api::{ExecInput, Stage}; use std::sync::Arc; @@ -140,10 +139,9 @@ mod tests { // Check execution and create receipts and changesets according to the pruning // configuration let mut execution_stage = ExecutionStage::new( - EvmProcessorFactory::new( - Arc::new(ChainSpecBuilder::mainnet().berlin_activated().build()), - EthEvmConfig::default(), - ), + EthExecutorProvider::ethereum(Arc::new( + ChainSpecBuilder::mainnet().berlin_activated().build(), + )), ExecutionStageThresholds { max_blocks: Some(100), max_changes: None, diff --git a/crates/stages/src/stages/sender_recovery.rs b/crates/stages/src/stages/sender_recovery.rs index 04a30cb2e..e078fd954 100644 --- a/crates/stages/src/stages/sender_recovery.rs +++ b/crates/stages/src/stages/sender_recovery.rs @@ -1,3 +1,4 @@ +use reth_consensus::ConsensusError; use reth_db::{ cursor::DbCursorRW, database::Database, @@ -6,7 +7,6 @@ use reth_db::{ transaction::{DbTx, DbTxMut}, RawValue, }; -use reth_interfaces::consensus; use reth_primitives::{ stage::{EntitiesCheckpoint, StageCheckpoint, StageId}, Address, PruneSegment, StaticFileSegment, TransactionSignedNoHash, TxNumber, @@ -209,7 +209,7 @@ fn recover_range( Err(StageError::Block { block: Box::new(sealed_header), error: BlockErrorKind::Validation( - consensus::ConsensusError::TransactionSignerRecoveryError, + ConsensusError::TransactionSignerRecoveryError, ), }) } @@ -292,7 +292,10 @@ mod tests { stage::StageUnitCheckpoint, BlockNumber, PruneCheckpoint, PruneMode, SealedBlock, TransactionSigned, B256, }; - use reth_provider::{providers::StaticFileWriter, PruneCheckpointWriter, TransactionsProvider}; + use reth_provider::{ + providers::StaticFileWriter, PruneCheckpointWriter, StaticFileProviderFactory, + TransactionsProvider, + }; use super::*; use crate::test_utils::{ diff --git a/crates/stages/src/stages/tx_lookup.rs b/crates/stages/src/stages/tx_lookup.rs index 918be21c5..342183905 100644 --- a/crates/stages/src/stages/tx_lookup.rs +++ b/crates/stages/src/stages/tx_lookup.rs @@ -153,18 +153,19 @@ impl Stage for TransactionLookupStage { ); } + let key = RawKey::::from_vec(hash); if append_only { - txhash_cursor.append( - RawKey::::from_vec(hash), - RawValue::::from_vec(number), - )?; + txhash_cursor.append(key, RawValue::::from_vec(number))? } else { - txhash_cursor.insert( - RawKey::::from_vec(hash), - RawValue::::from_vec(number), - )?; + txhash_cursor.insert(key, RawValue::::from_vec(number))? } } + + trace!(target: "sync::stages::transaction_lookup", + total_hashes, + "Transaction hashes inserted" + ); + break } } @@ -248,7 +249,7 @@ mod tests { generators::{random_block, random_block_range}, }; use reth_primitives::{stage::StageUnitCheckpoint, BlockNumber, SealedBlock, B256}; - use reth_provider::providers::StaticFileWriter; + use reth_provider::{providers::StaticFileWriter, StaticFileProviderFactory}; use std::ops::Sub; // Implement stage test suite. diff --git a/crates/stages/src/test_utils/macros.rs b/crates/stages/src/test_utils/macros.rs index 0ce346d70..11fb46cde 100644 --- a/crates/stages/src/test_utils/macros.rs +++ b/crates/stages/src/test_utils/macros.rs @@ -13,7 +13,7 @@ macro_rules! stage_test_suite { // Run stage execution let result = runner.execute(input).await; - runner.db().factory.static_file_provider().commit().unwrap(); + reth_provider::StaticFileProviderFactory::static_file_provider(&runner.db().factory).commit().unwrap(); // Check that the result is returned and the stage does not panic. // The return result with empty db is stage-specific. @@ -46,7 +46,7 @@ macro_rules! stage_test_suite { // Assert the successful result let result = rx.await.unwrap(); - runner.db().factory.static_file_provider().commit().unwrap(); + reth_provider::StaticFileProviderFactory::static_file_provider(&runner.db().factory).commit().unwrap(); assert_matches::assert_matches!( result, @@ -76,7 +76,7 @@ macro_rules! stage_test_suite { // Run stage unwind let rx = runner.unwind(input).await; - runner.db().factory.static_file_provider().commit().unwrap(); + reth_provider::StaticFileProviderFactory::static_file_provider(&runner.db().factory).commit().unwrap(); assert_matches::assert_matches!( rx, @@ -110,7 +110,7 @@ macro_rules! stage_test_suite { // Assert the successful execution result let result = rx.await.unwrap(); - runner.db().factory.static_file_provider().commit().unwrap(); + reth_provider::StaticFileProviderFactory::static_file_provider(&runner.db().factory).commit().unwrap(); assert_matches::assert_matches!( result, @@ -179,7 +179,7 @@ macro_rules! stage_test_suite_ext { // Assert the successful result let result = rx.await.unwrap(); - runner.db().factory.static_file_provider().commit().unwrap(); + reth_provider::StaticFileProviderFactory::static_file_provider(&runner.db().factory).commit().unwrap(); assert_matches::assert_matches!( result, diff --git a/crates/stages/src/test_utils/test_db.rs b/crates/stages/src/test_utils/test_db.rs index a080c9c8f..5fe65a737 100644 --- a/crates/stages/src/test_utils/test_db.rs +++ b/crates/stages/src/test_utils/test_db.rs @@ -18,7 +18,7 @@ use reth_primitives::{ }; use reth_provider::{ providers::{StaticFileProviderRWRefMut, StaticFileWriter}, - HistoryWriter, ProviderError, ProviderFactory, + HistoryWriter, ProviderError, ProviderFactory, StaticFileProviderFactory, }; use std::{collections::BTreeMap, path::Path, sync::Arc}; use tempfile::TempDir; diff --git a/crates/static-file/Cargo.toml b/crates/static-file/Cargo.toml index b3fc1b93d..1345b2f23 100644 --- a/crates/static-file/Cargo.toml +++ b/crates/static-file/Cargo.toml @@ -25,7 +25,6 @@ tokio-stream.workspace = true # misc tracing.workspace = true -clap = { workspace = true, features = ["derive"], optional = true } rayon.workspace = true parking_lot = { workspace = true, features = ["send_guard", "arc_lock"] } @@ -37,4 +36,3 @@ assert_matches.workspace = true tempfile.workspace = true [features] -clap = ["dep:clap"] diff --git a/crates/static-file/README.md b/crates/static-file/README.md index b6eb385dd..3aab25a97 100644 --- a/crates/static-file/README.md +++ b/crates/static-file/README.md @@ -106,7 +106,7 @@ In descending order of abstraction hierarchy: [`StaticFileProducer`](../../crates/static-file/src/static_file_producer.rs#L25): A `reth` [hook](../../crates/consensus/beacon/src/engine/hooks/static_file.rs) service that when triggered, **copies** finalized data from the database to the latest static file. Upon completion, it updates the internal index at `StaticFileProvider` with the new highest block and transaction on each specific segment. -[`StaticFileProvider`](../../crates/storage/provider/src/providers/static_file/manager.rs#L44) A provider similar to `DatabaseProvider`, **managing all existing static_file files** and selecting the optimal one (by range and segment type) to fulfill a request. **A single instance is shared across all components and should be instantiated only once within `ProviderFactory`**. An immutable reference is given everytime `ProviderFactory` creates a new `DatabaseProvider`. +[`StaticFileProvider`](../../crates/storage/provider/src/providers/static_file/manager.rs#L44) A provider similar to `DatabaseProvider`, **managing all existing static_file files** and selecting the optimal one (by range and segment type) to fulfill a request. **A single instance is shared across all components and should be instantiated only once within `ProviderFactory`**. An immutable reference is given every time `ProviderFactory` creates a new `DatabaseProvider`. [`StaticFileJarProvider`](../../crates/storage/provider/src/providers/static_file/jar.rs#L42) A provider similar to `DatabaseProvider` that provides access to a **single static file segment data** one a specific block range. diff --git a/crates/static-file/src/static_file_producer.rs b/crates/static-file/src/static_file_producer.rs index 2af4f8cac..c7a365c9a 100644 --- a/crates/static-file/src/static_file_producer.rs +++ b/crates/static-file/src/static_file_producer.rs @@ -255,7 +255,7 @@ mod tests { }; use reth_provider::{ providers::{StaticFileProvider, StaticFileWriter}, - ProviderFactory, + ProviderFactory, StaticFileProviderFactory, }; use reth_stages::test_utils::{StorageKind, TestStageDB}; use std::{ @@ -272,12 +272,13 @@ mod tests { db.insert_blocks(blocks.iter(), StorageKind::Database(None)).expect("insert blocks"); // Unwind headers from static_files and manually insert them into the database, so we're // able to check that static_file_producer works - db.factory - .static_file_provider() + let static_file_provider = db.factory.static_file_provider(); + let mut static_file_writer = static_file_provider .latest_writer(StaticFileSegment::Headers) - .expect("get static file writer for headers") - .prune_headers(blocks.len() as u64) - .expect("prune headers"); + .expect("get static file writer for headers"); + static_file_writer.prune_headers(blocks.len() as u64).unwrap(); + static_file_writer.commit().expect("prune headers"); + let tx = db.factory.db_ref().tx_mut().expect("init tx"); blocks.iter().for_each(|block| { TestStageDB::insert_header(None, &tx, &block.header, U256::ZERO) diff --git a/crates/storage/codecs/Cargo.toml b/crates/storage/codecs/Cargo.toml index 31f954f86..958ccf917 100644 --- a/crates/storage/codecs/Cargo.toml +++ b/crates/storage/codecs/Cargo.toml @@ -11,18 +11,22 @@ repository.workspace = true workspace = true [dependencies] +# reth reth-codecs-derive = { path = "./derive", default-features = false } +# eth alloy-eips = { workspace = true, optional = true } +alloy-genesis = { workspace = true, optional = true } alloy-primitives.workspace = true +# misc bytes.workspace = true +modular-bitfield = { workspace = true, optional = true } +serde.workspace = true [dev-dependencies] alloy-eips = { workspace = true, default-features = false, features = ["arbitrary", "serde"] } alloy-primitives = { workspace = true, features = ["arbitrary", "serde"] } -serde.workspace = true -modular-bitfield.workspace = true test-fuzz.workspace = true serde_json.workspace = true @@ -33,5 +37,5 @@ proptest-derive.workspace = true [features] default = ["std", "alloy"] std = ["alloy-primitives/std", "bytes/std"] -alloy = ["alloy-eips"] +alloy = ["dep:alloy-eips", "dep:alloy-genesis", "dep:modular-bitfield"] optimism = ["reth-codecs-derive/optimism"] diff --git a/crates/storage/codecs/derive/src/compact/flags.rs b/crates/storage/codecs/derive/src/compact/flags.rs index 650d97ea8..24757d8e6 100644 --- a/crates/storage/codecs/derive/src/compact/flags.rs +++ b/crates/storage/codecs/derive/src/compact/flags.rs @@ -36,7 +36,7 @@ pub(crate) fn generate_flag_struct( }; if total_bits == 0 { - return placeholder_flag_struct(&flags_ident) + return placeholder_flag_struct(ident, &flags_ident) } let (total_bytes, unused_bits) = pad_flag_struct(total_bits, &mut field_flags); @@ -51,9 +51,16 @@ pub(crate) fn generate_flag_struct( let docs = format!("Fieldset that facilitates compacting the parent type. Used bytes: {total_bytes} | Unused bits: {unused_bits}"); + let bitflag_encoded_bytes = format!("Used bytes by [`{flags_ident}`]"); // Generate the flag struct. quote! { + impl #ident { + #[doc = #bitflag_encoded_bytes] + pub const fn bitflag_encoded_bytes() -> usize { + #total_bytes as usize + } + } pub use #mod_flags_ident::#flags_ident; #[allow(non_snake_case)] mod #mod_flags_ident { @@ -146,8 +153,22 @@ fn pad_flag_struct(total_bits: u8, field_flags: &mut Vec) -> (u8, } /// Placeholder struct for when there are no bitfields to be added. -fn placeholder_flag_struct(flags: &Ident) -> TokenStream2 { +fn placeholder_flag_struct(ident: &Ident, flags: &Ident) -> TokenStream2 { + let bitflag_encoded_bytes = format!("Used bytes by [`{flags}`]"); + let bitflag_unused_bits = format!("Unused bits for new fields by [`{flags}`]"); quote! { + impl #ident { + #[doc = #bitflag_encoded_bytes] + pub const fn bitflag_encoded_bytes() -> usize { + 0 + } + + #[doc = #bitflag_unused_bits] + pub const fn bitflag_unused_bits() -> usize { + 0 + } + } + /// Placeholder struct for when there is no need for a fieldset. Doesn't actually write or read any data. #[derive(Debug, Default)] pub struct #flags { diff --git a/crates/storage/codecs/derive/src/compact/generator.rs b/crates/storage/codecs/derive/src/compact/generator.rs index 8cd9070bb..03dab1a14 100644 --- a/crates/storage/codecs/derive/src/compact/generator.rs +++ b/crates/storage/codecs/derive/src/compact/generator.rs @@ -58,12 +58,7 @@ fn generate_from_compact(fields: &FieldList, ident: &Ident, is_zstd: bool) -> To // it's hard to figure out with derive_macro which types have Bytes fields. // // This removes the requirement of the field to be placed last in the struct. - known_types.extend_from_slice(&[ - "TransactionKind", - "AccessList", - "Signature", - "CheckpointBlockRange", - ]); + known_types.extend_from_slice(&["TxKind", "AccessList", "Signature", "CheckpointBlockRange"]); // let mut handle = FieldListHandler::new(fields); let is_enum = fields.iter().any(|field| matches!(field, FieldTypes::EnumVariant(_))); diff --git a/crates/storage/codecs/derive/src/compact/mod.rs b/crates/storage/codecs/derive/src/compact/mod.rs index abc785edd..e67adb6fd 100644 --- a/crates/storage/codecs/derive/src/compact/mod.rs +++ b/crates/storage/codecs/derive/src/compact/mod.rs @@ -161,7 +161,7 @@ fn should_use_alt_impl(ftype: &String, segment: &syn::PathSegment) -> bool { /// length. pub fn get_bit_size(ftype: &str) -> u8 { match ftype { - "TransactionKind" | "bool" | "Option" | "Signature" => 1, + "TransactionKind" | "TxKind" | "bool" | "Option" | "Signature" => 1, "TxType" => 2, "u64" | "BlockNumber" | "TxNumber" | "ChainId" | "NumTransactions" => 4, "u128" => 5, @@ -185,18 +185,18 @@ mod tests { #[test] fn gen() { let f_struct = quote! { - #[derive(Debug, PartialEq, Clone)] - pub struct TestStruct { - f_u64: u64, - f_u256: U256, - f_bool_t: bool, - f_bool_f: bool, - f_option_none: Option, - f_option_some: Option, - f_option_some_u64: Option, - f_vec_empty: Vec, - f_vec_some: Vec
, - } + #[derive(Debug, PartialEq, Clone)] + pub struct TestStruct { + f_u64: u64, + f_u256: U256, + f_bool_t: bool, + f_bool_f: bool, + f_option_none: Option, + f_option_some: Option, + f_option_some_u64: Option, + f_vec_empty: Vec, + f_vec_some: Vec
, + } }; // Generate code that will impl the `Compact` trait. @@ -208,7 +208,15 @@ mod tests { // Expected output in a TokenStream format. Commas matter! let should_output = quote! { + impl TestStruct { + #[doc = "Used bytes by [`TestStructFlags`]"] + pub const fn bitflag_encoded_bytes() -> usize { + 2u8 as usize + } + } + pub use TestStruct_flags::TestStructFlags; + #[allow(non_snake_case)] mod TestStruct_flags { use bytes::Buf; diff --git a/crates/storage/codecs/src/alloy/genesis_account.rs b/crates/storage/codecs/src/alloy/genesis_account.rs new file mode 100644 index 000000000..619d9db51 --- /dev/null +++ b/crates/storage/codecs/src/alloy/genesis_account.rs @@ -0,0 +1,67 @@ +use crate::Compact; +use alloy_genesis::GenesisAccount as AlloyGenesisAccount; +use alloy_primitives::{Bytes, B256, U256}; +use reth_codecs_derive::main_codec; + +/// GenesisAccount acts as bridge which simplifies Compact implementation for AlloyGenesisAccount. +/// +/// Notice: Make sure this struct is 1:1 with `alloy_genesis::GenesisAccount` +#[main_codec] +#[derive(Debug, Clone, PartialEq, Eq, Default)] +struct GenesisAccount { + /// The nonce of the account at genesis. + nonce: Option, + /// The balance of the account at genesis. + balance: U256, + /// The account's bytecode at genesis. + code: Option, + /// The account's storage at genesis. + storage: Option, + /// The account's private key. Should only be used for testing. + private_key: Option, +} + +#[main_codec] +#[derive(Debug, Clone, PartialEq, Eq, Default)] +struct StorageEntries { + entries: Vec, +} + +#[main_codec] +#[derive(Debug, Clone, PartialEq, Eq, Default)] +struct StorageEntry { + key: B256, + value: B256, +} + +impl Compact for AlloyGenesisAccount { + fn to_compact(self, buf: &mut B) -> usize + where + B: bytes::BufMut + AsMut<[u8]>, + { + let account = GenesisAccount { + nonce: self.nonce, + balance: self.balance, + code: self.code, + storage: self.storage.map(|s| StorageEntries { + entries: s.into_iter().map(|(key, value)| StorageEntry { key, value }).collect(), + }), + private_key: self.private_key, + }; + account.to_compact(buf) + } + + fn from_compact(buf: &[u8], len: usize) -> (Self, &[u8]) { + let (account, _) = GenesisAccount::from_compact(buf, len); + let alloy_account = AlloyGenesisAccount { + nonce: account.nonce, + balance: account.balance, + code: account.code, + storage: account + .storage + .map(|s| s.entries.into_iter().map(|entry| (entry.key, entry.value)).collect()), + private_key: account.private_key, + }; + (alloy_account, buf) + } +} diff --git a/crates/storage/codecs/src/alloy/mod.rs b/crates/storage/codecs/src/alloy/mod.rs index 7d7a794fe..664ab2607 100644 --- a/crates/storage/codecs/src/alloy/mod.rs +++ b/crates/storage/codecs/src/alloy/mod.rs @@ -1,3 +1,5 @@ mod access_list; +mod genesis_account; mod log; mod txkind; +mod withdrawal; diff --git a/crates/storage/codecs/src/alloy/txkind.rs b/crates/storage/codecs/src/alloy/txkind.rs index 220384bdd..e1dffa15b 100644 --- a/crates/storage/codecs/src/alloy/txkind.rs +++ b/crates/storage/codecs/src/alloy/txkind.rs @@ -21,7 +21,7 @@ impl Compact for TxKind { 0 => (TxKind::Create, buf), 1 => { let (addr, buf) = Address::from_compact(buf, buf.len()); - (TxKind::Call(addr), buf) + (addr.into(), buf) } _ => { unreachable!("Junk data in database: unknown TransactionKind variant",) diff --git a/crates/storage/codecs/src/alloy/withdrawal.rs b/crates/storage/codecs/src/alloy/withdrawal.rs new file mode 100644 index 000000000..5cdc1a667 --- /dev/null +++ b/crates/storage/codecs/src/alloy/withdrawal.rs @@ -0,0 +1,80 @@ +use crate::Compact; +use alloy_eips::eip4895::Withdrawal as AlloyWithdrawal; +use alloy_primitives::Address; +use reth_codecs_derive::main_codec; + +/// Withdrawal acts as bridge which simplifies Compact implementation for AlloyWithdrawal. +/// +/// Notice: Make sure this struct is 1:1 with `alloy_eips::eip4895::Withdrawal` +#[main_codec] +#[derive(Debug, Clone, PartialEq, Eq, Default)] +struct Withdrawal { + /// Monotonically increasing identifier issued by consensus layer. + index: u64, + /// Index of validator associated with withdrawal. + validator_index: u64, + /// Target address for withdrawn ether. + address: Address, + /// Value of the withdrawal in gwei. + amount: u64, +} + +impl Compact for AlloyWithdrawal { + fn to_compact(self, buf: &mut B) -> usize + where + B: bytes::BufMut + AsMut<[u8]>, + { + let withdrawal = Withdrawal { + index: self.index, + validator_index: self.validator_index, + address: self.address, + amount: self.amount, + }; + withdrawal.to_compact(buf) + } + + fn from_compact(buf: &[u8], len: usize) -> (Self, &[u8]) { + let (withdrawal, _) = Withdrawal::from_compact(buf, len); + let alloy_withdrawal = AlloyWithdrawal { + index: withdrawal.index, + validator_index: withdrawal.validator_index, + address: withdrawal.address, + amount: withdrawal.amount, + }; + (alloy_withdrawal, buf) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use proptest::proptest; + + proptest! { + #[test] + fn roundtrip(withdrawal: AlloyWithdrawal) { + let mut compacted_withdrawal = Vec::::new(); + let len = withdrawal.to_compact(&mut compacted_withdrawal); + let (decoded, _) = AlloyWithdrawal::from_compact(&compacted_withdrawal, len); + assert_eq!(withdrawal, decoded) + } + } + + // each value in the database has an extra field named flags that encodes metadata about other + // fields in the value, e.g. offset and length. + // + // this check is to ensure we do not inadvertently add too many fields to a struct which would + // expand the flags field and break backwards compatibility + #[test] + fn test_ensure_backwards_compatibility() { + #[cfg(not(feature = "optimism"))] + { + assert_eq!(Withdrawal::bitflag_encoded_bytes(), 2); + } + + #[cfg(feature = "optimism")] + { + assert_eq!(Withdrawal::bitflag_encoded_bytes(), 2); + } + } +} diff --git a/crates/storage/db/Cargo.toml b/crates/storage/db/Cargo.toml index 461a84f3e..97b556346 100644 --- a/crates/storage/db/Cargo.toml +++ b/crates/storage/db/Cargo.toml @@ -16,7 +16,10 @@ workspace = true reth-primitives.workspace = true reth-interfaces.workspace = true reth-codecs.workspace = true -reth-libmdbx = { workspace = true, optional = true, features = ["return-borrowed", "read-tx-timeouts"] } +reth-libmdbx = { workspace = true, optional = true, features = [ + "return-borrowed", + "read-tx-timeouts", +] } reth-nippy-jar.workspace = true reth-tracing.workspace = true @@ -37,7 +40,7 @@ tempfile = { workspace = true, optional = true } derive_more.workspace = true eyre.workspace = true paste.workspace = true -rustc-hash = "1.1.0" +rustc-hash.workspace = true # arbitrary utils arbitrary = { workspace = true, features = ["derive"], optional = true } @@ -58,7 +61,11 @@ serde_json.workspace = true tempfile.workspace = true test-fuzz.workspace = true -pprof = { workspace = true, features = ["flamegraph", "frame-pointer", "criterion"] } +pprof = { workspace = true, features = [ + "flamegraph", + "frame-pointer", + "criterion", +] } criterion.workspace = true iai-callgrind = "0.10.2" @@ -81,6 +88,7 @@ arbitrary = [ "dep:proptest", "dep:proptest-derive", ] +optimism = [] [[bench]] name = "hash_keys" diff --git a/crates/storage/db/benches/hash_keys.rs b/crates/storage/db/benches/hash_keys.rs index 5376bf504..ee21883fe 100644 --- a/crates/storage/db/benches/hash_keys.rs +++ b/crates/storage/db/benches/hash_keys.rs @@ -136,7 +136,7 @@ where T::Key: std::hash::Hash + Arbitrary, T::Value: Arbitrary, { - let strat = proptest::collection::vec( + let strategy = proptest::collection::vec( any_with::>(( ::Parameters::default(), ::Parameters::default(), @@ -147,8 +147,8 @@ where .boxed(); let mut runner = TestRunner::new(ProptestConfig::default()); - let mut preload = strat.new_tree(&mut runner).unwrap().current(); - let mut input = strat.new_tree(&mut runner).unwrap().current(); + let mut preload = strategy.new_tree(&mut runner).unwrap().current(); + let mut input = strategy.new_tree(&mut runner).unwrap().current(); let mut unique_keys = HashSet::new(); preload.retain(|(k, _)| unique_keys.insert(k.clone())); diff --git a/crates/storage/db/src/abstraction/common.rs b/crates/storage/db/src/abstraction/common.rs index 9bce16e39..eef412935 100644 --- a/crates/storage/db/src/abstraction/common.rs +++ b/crates/storage/db/src/abstraction/common.rs @@ -23,7 +23,7 @@ mod sealed { use crate::{database::Database, mock::DatabaseMock, DatabaseEnv}; use std::sync::Arc; - /// Sealed trait to limit the implementors of the Database trait. + /// Sealed trait to limit the implementers of the Database trait. pub trait Sealed: Sized {} impl Sealed for &DB {} diff --git a/crates/storage/db/src/lib.rs b/crates/storage/db/src/lib.rs index c0737cc42..5425c8074 100644 --- a/crates/storage/db/src/lib.rs +++ b/crates/storage/db/src/lib.rs @@ -38,7 +38,7 @@ //! //! # Overview //! -//! An overview of the current data model of reth can be found in the [`tables`] module. +//! An overview of the current data model of reth can be found in the [`mod@tables`] module. //! //! [`Database`]: crate::abstraction::database::Database //! [`DbTx`]: crate::abstraction::transaction::DbTx diff --git a/crates/storage/db/src/tables/codecs/compact.rs b/crates/storage/db/src/tables/codecs/compact.rs index c302c6a48..aed8d97ef 100644 --- a/crates/storage/db/src/tables/codecs/compact.rs +++ b/crates/storage/db/src/tables/codecs/compact.rs @@ -50,7 +50,9 @@ impl_compression_for_compact!( CompactU256, StageCheckpoint, PruneCheckpoint, - ClientVersion + ClientVersion, + // Non-DB + GenesisAccount ); macro_rules! impl_compression_fixed_compact { @@ -121,3 +123,95 @@ macro_rules! add_wrapper_struct { add_wrapper_struct!((U256, CompactU256)); add_wrapper_struct!((u64, CompactU64)); add_wrapper_struct!((ClientVersion, CompactClientVersion)); + +#[cfg(test)] +mod tests { + use crate::{ + codecs::{ + compact::{CompactClientVersion, CompactU64}, + CompactU256, + }, + models::{StoredBlockBodyIndices, StoredBlockOmmers, StoredBlockWithdrawals}, + }; + use reth_primitives::{ + stage::{ + AccountHashingCheckpoint, CheckpointBlockRange, EntitiesCheckpoint, + ExecutionCheckpoint, HeadersCheckpoint, IndexHistoryCheckpoint, StageCheckpoint, + StageUnitCheckpoint, StorageHashingCheckpoint, + }, + Account, Header, PruneCheckpoint, PruneMode, PruneSegment, Receipt, ReceiptWithBloom, + SealedHeader, TxEip1559, TxEip2930, TxEip4844, TxLegacy, Withdrawals, + }; + + // each value in the database has an extra field named flags that encodes metadata about other + // fields in the value, e.g. offset and length. + // + // this check is to ensure we do not inadvertently add too many fields to a struct which would + // expand the flags field and break backwards compatibility + #[test] + fn test_ensure_backwards_compatibility() { + #[cfg(not(feature = "optimism"))] + { + assert_eq!(Account::bitflag_encoded_bytes(), 2); + assert_eq!(AccountHashingCheckpoint::bitflag_encoded_bytes(), 1); + assert_eq!(CheckpointBlockRange::bitflag_encoded_bytes(), 1); + assert_eq!(CompactClientVersion::bitflag_encoded_bytes(), 0); + assert_eq!(CompactU256::bitflag_encoded_bytes(), 1); + assert_eq!(CompactU64::bitflag_encoded_bytes(), 1); + assert_eq!(EntitiesCheckpoint::bitflag_encoded_bytes(), 1); + assert_eq!(ExecutionCheckpoint::bitflag_encoded_bytes(), 0); + assert_eq!(Header::bitflag_encoded_bytes(), 4); + assert_eq!(HeadersCheckpoint::bitflag_encoded_bytes(), 0); + assert_eq!(IndexHistoryCheckpoint::bitflag_encoded_bytes(), 0); + assert_eq!(PruneCheckpoint::bitflag_encoded_bytes(), 1); + assert_eq!(PruneMode::bitflag_encoded_bytes(), 1); + assert_eq!(PruneSegment::bitflag_encoded_bytes(), 1); + assert_eq!(Receipt::bitflag_encoded_bytes(), 1); + assert_eq!(ReceiptWithBloom::bitflag_encoded_bytes(), 0); + assert_eq!(SealedHeader::bitflag_encoded_bytes(), 0); + assert_eq!(StageCheckpoint::bitflag_encoded_bytes(), 1); + assert_eq!(StageUnitCheckpoint::bitflag_encoded_bytes(), 1); + assert_eq!(StoredBlockBodyIndices::bitflag_encoded_bytes(), 1); + assert_eq!(StoredBlockOmmers::bitflag_encoded_bytes(), 0); + assert_eq!(StoredBlockWithdrawals::bitflag_encoded_bytes(), 0); + assert_eq!(StorageHashingCheckpoint::bitflag_encoded_bytes(), 1); + assert_eq!(TxEip1559::bitflag_encoded_bytes(), 4); + assert_eq!(TxEip2930::bitflag_encoded_bytes(), 3); + assert_eq!(TxEip4844::bitflag_encoded_bytes(), 5); + assert_eq!(TxLegacy::bitflag_encoded_bytes(), 3); + assert_eq!(Withdrawals::bitflag_encoded_bytes(), 0); + } + + #[cfg(feature = "optimism")] + { + assert_eq!(Account::bitflag_encoded_bytes(), 2); + assert_eq!(AccountHashingCheckpoint::bitflag_encoded_bytes(), 1); + assert_eq!(CheckpointBlockRange::bitflag_encoded_bytes(), 1); + assert_eq!(CompactClientVersion::bitflag_encoded_bytes(), 0); + assert_eq!(CompactU256::bitflag_encoded_bytes(), 1); + assert_eq!(CompactU64::bitflag_encoded_bytes(), 1); + assert_eq!(EntitiesCheckpoint::bitflag_encoded_bytes(), 1); + assert_eq!(ExecutionCheckpoint::bitflag_encoded_bytes(), 0); + assert_eq!(Header::bitflag_encoded_bytes(), 4); + assert_eq!(HeadersCheckpoint::bitflag_encoded_bytes(), 0); + assert_eq!(IndexHistoryCheckpoint::bitflag_encoded_bytes(), 0); + assert_eq!(PruneCheckpoint::bitflag_encoded_bytes(), 1); + assert_eq!(PruneMode::bitflag_encoded_bytes(), 1); + assert_eq!(PruneSegment::bitflag_encoded_bytes(), 1); + assert_eq!(Receipt::bitflag_encoded_bytes(), 2); + assert_eq!(ReceiptWithBloom::bitflag_encoded_bytes(), 0); + assert_eq!(SealedHeader::bitflag_encoded_bytes(), 0); + assert_eq!(StageCheckpoint::bitflag_encoded_bytes(), 1); + assert_eq!(StageUnitCheckpoint::bitflag_encoded_bytes(), 1); + assert_eq!(StoredBlockBodyIndices::bitflag_encoded_bytes(), 1); + assert_eq!(StoredBlockOmmers::bitflag_encoded_bytes(), 0); + assert_eq!(StoredBlockWithdrawals::bitflag_encoded_bytes(), 0); + assert_eq!(StorageHashingCheckpoint::bitflag_encoded_bytes(), 1); + assert_eq!(TxEip1559::bitflag_encoded_bytes(), 4); + assert_eq!(TxEip2930::bitflag_encoded_bytes(), 3); + assert_eq!(TxEip4844::bitflag_encoded_bytes(), 5); + assert_eq!(TxLegacy::bitflag_encoded_bytes(), 3); + assert_eq!(Withdrawals::bitflag_encoded_bytes(), 0); + } + } +} diff --git a/crates/storage/db/src/tables/mod.rs b/crates/storage/db/src/tables/mod.rs index 676ed5ebc..b10662325 100644 --- a/crates/storage/db/src/tables/mod.rs +++ b/crates/storage/db/src/tables/mod.rs @@ -101,6 +101,7 @@ pub trait TableViewer { } } +#[macro_export] /// Defines all the tables in the database. macro_rules! tables { (@bool) => { false }; diff --git a/crates/storage/libmdbx-rs/Cargo.toml b/crates/storage/libmdbx-rs/Cargo.toml index 2330b6f79..2042cd896 100644 --- a/crates/storage/libmdbx-rs/Cargo.toml +++ b/crates/storage/libmdbx-rs/Cargo.toml @@ -22,7 +22,7 @@ indexmap = "2" libc = "0.2" parking_lot.workspace = true thiserror.workspace = true -dashmap = { version = "5.5.3", features = ["inline"], optional = true } +dashmap = { workspace = true, features = ["inline"], optional = true } tracing.workspace = true ffi = { package = "reth-mdbx-sys", path = "./mdbx-sys" } diff --git a/crates/storage/libmdbx-rs/mdbx-sys/Cargo.toml b/crates/storage/libmdbx-rs/mdbx-sys/Cargo.toml index cebae37b3..fbdad4c51 100644 --- a/crates/storage/libmdbx-rs/mdbx-sys/Cargo.toml +++ b/crates/storage/libmdbx-rs/mdbx-sys/Cargo.toml @@ -15,6 +15,5 @@ name = "reth_mdbx_sys" libc = "0.2" [build-dependencies] -## temp pin -cc = "=1.0.83" +cc = "1.0" bindgen = { version = "0.69", default-features = false, features = ["runtime"] } diff --git a/crates/storage/libmdbx-rs/src/environment.rs b/crates/storage/libmdbx-rs/src/environment.rs index 31430fb99..ba7385b94 100644 --- a/crates/storage/libmdbx-rs/src/environment.rs +++ b/crates/storage/libmdbx-rs/src/environment.rs @@ -501,7 +501,7 @@ impl Default for Geometry { /// /// # Arguments /// -/// * `process_id` – A proceess id of the reader process. +/// * `process_id` – A process id of the reader process. /// * `thread_id` – A thread id of the reader thread. /// * `read_txn_id` – An oldest read transaction number on which stalled. /// * `gap` – A lag from the last committed txn. @@ -950,8 +950,7 @@ mod tests { .open(tempdir.path()) .unwrap(); - // Insert some data in the database, so the read transaction can lock on the static file of - // it + // Insert some data in the database, so the read transaction can lock on the snapshot of it { let tx = env.begin_rw_txn().unwrap(); let db = tx.open_db(None).unwrap(); @@ -964,8 +963,7 @@ mod tests { // Create a read transaction let _tx_ro = env.begin_ro_txn().unwrap(); - // Change previously inserted data, so the read transaction would use the previous static - // file + // Change previously inserted data, so the read transaction would use the previous snapshot { let tx = env.begin_rw_txn().unwrap(); let db = tx.open_db(None).unwrap(); @@ -976,7 +974,7 @@ mod tests { } // Insert more data in the database, so we hit the DB size limit error, and MDBX tries to - // kick long-lived readers and delete their static_files + // kick long-lived readers and delete their snapshots { let tx = env.begin_rw_txn().unwrap(); let db = tx.open_db(None).unwrap(); diff --git a/crates/storage/libmdbx-rs/src/flags.rs b/crates/storage/libmdbx-rs/src/flags.rs index e6b2697a8..843ae161c 100644 --- a/crates/storage/libmdbx-rs/src/flags.rs +++ b/crates/storage/libmdbx-rs/src/flags.rs @@ -25,7 +25,7 @@ pub enum SyncMode { /// /// [SyncMode::UtterlyNoSync] the [SyncMode::SafeNoSync] flag disable similarly flush system /// buffers to disk when committing a transaction. But there is a huge difference in how - /// are recycled the MVCC static_files corresponding to previous "steady" transactions (see + /// are recycled the MVCC snapshots corresponding to previous "steady" transactions (see /// below). /// /// With [crate::EnvironmentKind::WriteMap] the [SyncMode::SafeNoSync] instructs MDBX to use diff --git a/crates/storage/libmdbx-rs/src/lib.rs b/crates/storage/libmdbx-rs/src/lib.rs index f8c251208..ba8c6b062 100644 --- a/crates/storage/libmdbx-rs/src/lib.rs +++ b/crates/storage/libmdbx-rs/src/lib.rs @@ -5,7 +5,7 @@ issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" )] #![cfg_attr(not(test), warn(unused_crate_dependencies))] -#![allow(missing_docs)] +#![allow(missing_docs, clippy::needless_pass_by_ref_mut)] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] pub use crate::{ diff --git a/crates/storage/nippy-jar/src/cursor.rs b/crates/storage/nippy-jar/src/cursor.rs index 984206c36..541fcfa63 100644 --- a/crates/storage/nippy-jar/src/cursor.rs +++ b/crates/storage/nippy-jar/src/cursor.rs @@ -213,13 +213,13 @@ impl<'a, H: NippyJarHeader> NippyJarCursor<'a, H> { ) -> Result<(), NippyJarError> { // Find out the offset of the column value let offset_pos = self.row as usize * self.jar.columns + column; - let value_offset = self.reader.offset(offset_pos) as usize; + let value_offset = self.reader.offset(offset_pos)? as usize; let column_offset_range = if self.jar.rows * self.jar.columns == offset_pos + 1 { // It's the last column of the last row value_offset..self.reader.size() } else { - let next_value_offset = self.reader.offset(offset_pos + 1) as usize; + let next_value_offset = self.reader.offset(offset_pos + 1)? as usize; value_offset..next_value_offset }; diff --git a/crates/storage/nippy-jar/src/error.rs b/crates/storage/nippy-jar/src/error.rs index c769f0db8..d44777058 100644 --- a/crates/storage/nippy-jar/src/error.rs +++ b/crates/storage/nippy-jar/src/error.rs @@ -37,6 +37,16 @@ pub enum NippyJarError { PHFMissing, #[error("nippy jar was built without an index")] UnsupportedFilterQuery, + #[error("the size of an offset must be at most 8 bytes, got {offset_size}")] + OffsetSizeTooBig { + /// The read offset size in number of bytes. + offset_size: u64, + }, + #[error("attempted to read an out of bounds offset: {index}")] + OffsetOutOfBounds { + /// The index of the offset that was being read. + index: usize, + }, #[error("compression or decompression requires a bigger destination output")] OutputTooSmall, #[error("dictionary is not loaded.")] diff --git a/crates/storage/nippy-jar/src/lib.rs b/crates/storage/nippy-jar/src/lib.rs index 4d311f273..1abbfba75 100644 --- a/crates/storage/nippy-jar/src/lib.rs +++ b/crates/storage/nippy-jar/src/lib.rs @@ -366,7 +366,7 @@ impl NippyJar { /// Writes all data and configuration to a file and the offset index to another. pub fn freeze( - mut self, + self, columns: Vec>>>, total_rows: u64, ) -> Result { @@ -392,7 +392,7 @@ impl NippyJar { } /// Freezes [`PerfectHashingFunction`], [`InclusionFilter`] and the offset index to file. - fn freeze_filters(&mut self) -> Result<(), NippyJarError> { + fn freeze_filters(&self) -> Result<(), NippyJarError> { debug!(target: "nippy-jar", path=?self.index_path(), "Writing offsets and offsets index to file."); let mut file = File::create(self.index_path())?; @@ -405,7 +405,7 @@ impl NippyJar { /// Safety checks before creating and returning a [`File`] handle to write data to. fn check_before_freeze( - &mut self, + &self, columns: &[impl IntoIterator>>], ) -> Result<(), NippyJarError> { if columns.len() != self.columns { @@ -427,7 +427,7 @@ impl NippyJar { } /// Writes all necessary configuration to file. - fn freeze_config(&mut self) -> Result<(), NippyJarError> { + fn freeze_config(&self) -> Result<(), NippyJarError> { Ok(bincode::serialize_into(File::create(self.config_path())?, &self)?) } } @@ -486,18 +486,19 @@ impl DataReader { // SAFETY: File is read-only and its descriptor is kept alive as long as the mmap handle. let offset_mmap = unsafe { Mmap::map(&offset_file)? }; - Ok(Self { - data_file, - data_mmap, - offset_file, - // First byte is the size of one offset in bytes - offset_size: offset_mmap[0] as u64, - offset_mmap, - }) + // First byte is the size of one offset in bytes + let offset_size = offset_mmap[0] as u64; + + // Ensure that the size of an offset is at most 8 bytes. + if offset_size > 8 { + return Err(NippyJarError::OffsetSizeTooBig { offset_size }) + } + + Ok(Self { data_file, data_mmap, offset_file, offset_size, offset_mmap }) } /// Returns the offset for the requested data index - pub fn offset(&self, index: usize) -> u64 { + pub fn offset(&self, index: usize) -> Result { // + 1 represents the offset_len u8 which is in the beginning of the file let from = index * self.offset_size as usize + 1; @@ -511,7 +512,7 @@ impl DataReader { if offsets_file_size > 1 { let from = offsets_file_size - self.offset_size as usize * (index + 1); - Ok(self.offset_at(from)) + self.offset_at(from) } else { Ok(0) } @@ -524,11 +525,16 @@ impl DataReader { } /// Reads one offset-sized (determined by the offset file) u64 at the provided index. - fn offset_at(&self, index: usize) -> u64 { + fn offset_at(&self, index: usize) -> Result { let mut buffer: [u8; 8] = [0; 8]; - buffer[..self.offset_size as usize] - .copy_from_slice(&self.offset_mmap[index..(index + self.offset_size as usize)]); - u64::from_le_bytes(buffer) + + let offset_end = index + self.offset_size as usize; + if offset_end > self.offset_mmap.len() { + return Err(NippyJarError::OffsetOutOfBounds { index }); + } + + buffer[..self.offset_size as usize].copy_from_slice(&self.offset_mmap[index..offset_end]); + Ok(u64::from_le_bytes(buffer)) } /// Returns number of bytes that represent one offset. @@ -1071,7 +1077,7 @@ mod tests { let num_rows = 2; // (missing_offsets, expected number of rows) - // If a row wasnt fully pruned, then it should clear it up as well + // If a row wasn't fully pruned, then it should clear it up as well let missing_offsets_scenarios = [(1, 1), (2, 1), (3, 0)]; for (missing_offsets, expected_rows) in missing_offsets_scenarios { @@ -1194,7 +1200,7 @@ mod tests { fn append_two_rows(num_columns: usize, file_path: &Path, col1: &[Vec], col2: &[Vec]) { // Create and add 1 row { - let mut nippy = NippyJar::new_without_header(num_columns, file_path); + let nippy = NippyJar::new_without_header(num_columns, file_path); nippy.freeze_config().unwrap(); assert_eq!(nippy.max_row_size, 0); assert_eq!(nippy.rows, 0); @@ -1291,7 +1297,7 @@ mod tests { let data_reader = nippy.open_data_reader().unwrap(); // there are only two valid offsets. so index 2 actually represents the expected file // data size. - assert_eq!(data_reader.offset(2), expected_data_size as u64); + assert_eq!(data_reader.offset(2).unwrap(), expected_data_size as u64); } // This should prune from the ondisk offset list and clear the jar. diff --git a/crates/storage/nippy-jar/src/writer.rs b/crates/storage/nippy-jar/src/writer.rs index e1f4af10b..6417e6007 100644 --- a/crates/storage/nippy-jar/src/writer.rs +++ b/crates/storage/nippy-jar/src/writer.rs @@ -43,7 +43,7 @@ pub struct NippyJarWriter { impl NippyJarWriter { /// Creates a [`NippyJarWriter`] from [`NippyJar`]. - pub fn new(mut jar: NippyJar) -> Result { + pub fn new(jar: NippyJar) -> Result { let (data_file, offsets_file, is_created) = Self::create_or_open_files(jar.data_path(), &jar.offsets_path())?; diff --git a/crates/storage/provider/Cargo.toml b/crates/storage/provider/Cargo.toml index 6f9305e88..672f6a7fc 100644 --- a/crates/storage/provider/Cargo.toml +++ b/crates/storage/provider/Cargo.toml @@ -15,6 +15,7 @@ workspace = true # reth reth-primitives.workspace = true reth-interfaces.workspace = true +reth-rpc-types.workspace = true reth-db.workspace = true reth-trie = { workspace = true, features = ["metrics"] } reth-nippy-jar.workspace = true @@ -39,7 +40,7 @@ auto_impl.workspace = true itertools.workspace = true pin-project.workspace = true parking_lot.workspace = true -dashmap = { version = "5.5", features = ["inline"] } +dashmap = { workspace = true, features = ["inline"] } strum.workspace = true # test-utils diff --git a/crates/storage/provider/src/bundle_state/bundle_state_with_receipts.rs b/crates/storage/provider/src/bundle_state/bundle_state_with_receipts.rs index 5e595532c..5f6d4af3f 100644 --- a/crates/storage/provider/src/bundle_state/bundle_state_with_receipts.rs +++ b/crates/storage/provider/src/bundle_state/bundle_state_with_receipts.rs @@ -1,9 +1,10 @@ -use crate::{providers::StaticFileProviderRWRefMut, StateChanges, StateReverts}; +use crate::{providers::StaticFileProviderRWRefMut, StateChanges, StateReverts, StateWriter}; use reth_db::{ cursor::{DbCursorRO, DbCursorRW}, tables, transaction::{DbTx, DbTxMut}, }; +use reth_evm::execute::BatchBlockExecutionOutput; use reth_interfaces::provider::{ProviderError, ProviderResult}; use reth_primitives::{ logs_bloom, @@ -34,6 +35,22 @@ pub struct BundleStateWithReceipts { first_block: BlockNumber, } +// TODO(mattsse): unify the types, currently there's a cyclic dependency between +impl From for BundleStateWithReceipts { + fn from(value: BatchBlockExecutionOutput) -> Self { + let BatchBlockExecutionOutput { bundle, receipts, first_block } = value; + Self { bundle, receipts, first_block } + } +} + +// TODO(mattsse): unify the types, currently there's a cyclic dependency between +impl From for BatchBlockExecutionOutput { + fn from(value: BundleStateWithReceipts) -> Self { + let BundleStateWithReceipts { bundle, receipts, first_block } = value; + Self { bundle, receipts, first_block } + } +} + /// Type used to initialize revms bundle state. pub type BundleStateInit = HashMap, Option, HashMap)>; @@ -292,14 +309,10 @@ impl BundleStateWithReceipts { // swap bundles std::mem::swap(&mut self.bundle, &mut other) } +} - /// Write the [BundleStateWithReceipts] to database and receipts to either database or static - /// files if `static_file_producer` is `Some`. It should be none if there is any kind of - /// pruning/filtering over the receipts. - /// - /// `omit_changed_check` should be set to true of bundle has some of it data - /// detached, This would make some original values not known. - pub fn write_to_storage( +impl StateWriter for BundleStateWithReceipts { + fn write_to_storage( self, tx: &TX, mut static_file_producer: Option>, @@ -316,7 +329,12 @@ impl BundleStateWithReceipts { let mut bodies_cursor = tx.cursor_read::()?; let mut receipts_cursor = tx.cursor_write::()?; - for (idx, receipts) in self.receipts.into_iter().enumerate() { + // ATTENTION: Any potential future refactor or change to how this loop works should keep in + // mind that the static file producer must always call `increment_block` even if the block + // has no receipts. Keeping track of the exact block range of the segment is needed for + // consistency, querying and file range segmentation. + let blocks = self.receipts.into_iter().enumerate(); + for (idx, receipts) in blocks { let block_number = self.first_block + idx as u64; let first_tx_index = bodies_cursor .seek_exact(block_number)? diff --git a/crates/storage/provider/src/bundle_state/mod.rs b/crates/storage/provider/src/bundle_state/mod.rs index 3f5da6ec6..5df4a213a 100644 --- a/crates/storage/provider/src/bundle_state/mod.rs +++ b/crates/storage/provider/src/bundle_state/mod.rs @@ -10,4 +10,4 @@ pub use bundle_state_with_receipts::{ }; pub use hashed_state_changes::HashedStateChanges; pub use state_changes::StateChanges; -pub use state_reverts::StateReverts; +pub use state_reverts::{StateReverts, StorageRevertsIter}; diff --git a/crates/storage/provider/src/bundle_state/state_changes.rs b/crates/storage/provider/src/bundle_state/state_changes.rs index a62606ded..7f7bde79e 100644 --- a/crates/storage/provider/src/bundle_state/state_changes.rs +++ b/crates/storage/provider/src/bundle_state/state_changes.rs @@ -77,6 +77,7 @@ impl StateChanges { } } } + Ok(()) } } diff --git a/crates/storage/provider/src/bundle_state/state_reverts.rs b/crates/storage/provider/src/bundle_state/state_reverts.rs index 63c5595c5..cc16a50cc 100644 --- a/crates/storage/provider/src/bundle_state/state_reverts.rs +++ b/crates/storage/provider/src/bundle_state/state_reverts.rs @@ -74,10 +74,12 @@ impl StateReverts { // Write account changes tracing::trace!(target: "provider::reverts", "Writing account changes"); let mut account_changeset_cursor = tx.cursor_dup_write::()?; + for (block_index, mut account_block_reverts) in self.0.accounts.into_iter().enumerate() { let block_number = first_block + block_index as BlockNumber; // Sort accounts by address. account_block_reverts.par_sort_by_key(|a| a.0); + for (address, info) in account_block_reverts { account_changeset_cursor.append_dup( block_number, @@ -92,7 +94,8 @@ impl StateReverts { /// Iterator over storage reverts. /// See [StorageRevertsIter::next] for more details. -struct StorageRevertsIter { +#[allow(missing_debug_implementations)] +pub struct StorageRevertsIter { reverts: Peekable, wiped: Peekable, } @@ -102,7 +105,8 @@ where R: Iterator, W: Iterator, { - fn new( + /// Create a new iterator over storage reverts. + pub fn new( reverts: impl IntoIterator, wiped: impl IntoIterator, ) -> Self { diff --git a/crates/storage/provider/src/chain.rs b/crates/storage/provider/src/chain.rs index 5acd84599..9b9c66d4b 100644 --- a/crates/storage/provider/src/chain.rs +++ b/crates/storage/provider/src/chain.rs @@ -16,6 +16,10 @@ use std::{borrow::Cow, collections::BTreeMap, fmt, ops::RangeInclusive}; /// changesets for those blocks (and their transactions), as well as the blocks themselves. /// /// Used inside the BlockchainTree. +/// +/// # Warning +/// +/// A chain of blocks should not be empty. #[derive(Clone, Debug, Default, PartialEq, Eq)] pub struct Chain { /// All blocks in this chain. @@ -33,16 +37,19 @@ pub struct Chain { impl Chain { /// Create new Chain from blocks and state. + /// + /// # Warning + /// + /// A chain of blocks should not be empty. pub fn new( blocks: impl IntoIterator, state: BundleStateWithReceipts, trie_updates: Option, ) -> Self { - Self { - blocks: BTreeMap::from_iter(blocks.into_iter().map(|b| (b.number, b))), - state, - trie_updates, - } + let blocks = BTreeMap::from_iter(blocks.into_iter().map(|b| (b.number, b))); + debug_assert!(!blocks.is_empty(), "Chain should have at least one block"); + + Self { blocks, state, trie_updates } } /// Create new Chain from a single block and its state. @@ -158,16 +165,20 @@ impl Chain { } /// Get the first block in this chain. + /// + /// # Panics + /// + /// If chain doesn't have any blocks. #[track_caller] pub fn first(&self) -> &SealedBlockWithSenders { - self.blocks.first_key_value().expect("Chain has at least one block for first").1 + self.blocks.first_key_value().expect("Chain should have at least one block").1 } /// Get the tip of the chain. /// - /// # Note + /// # Panics /// - /// Chains always have at least one block. + /// If chain doesn't have any blocks. #[track_caller] pub fn tip(&self) -> &SealedBlockWithSenders { self.blocks.last_key_value().expect("Chain should have at least one block").1 @@ -179,6 +190,10 @@ impl Chain { } /// Returns the range of block numbers in the chain. + /// + /// # Panics + /// + /// If chain doesn't have any blocks. pub fn range(&self) -> RangeInclusive { self.first().number..=self.tip().number } @@ -255,6 +270,10 @@ impl Chain { /// The second chain only contains the changes that were reverted on the first chain; however, /// it retains the up to date state as if the chains were one, i.e. the second chain is an /// extension of the first. + /// + /// # Panics + /// + /// If chain doesn't have any blocks. #[track_caller] pub fn split(mut self, split_at: ChainSplitTarget) -> ChainSplit { let chain_tip = *self.blocks.last_entry().expect("chain is never empty").key(); @@ -479,7 +498,7 @@ mod tests { let chain2 = Chain { blocks: BTreeMap::from([(3, block3), (4, block4)]), ..Default::default() }; - assert_eq!(chain1.append_chain(chain2.clone()), Ok(())); + assert!(chain1.append_chain(chain2.clone()).is_ok()); // chain1 got changed so this will fail assert!(chain1.append_chain(chain2).is_err()); diff --git a/crates/storage/provider/src/lib.rs b/crates/storage/provider/src/lib.rs index 838edd620..2b146245e 100644 --- a/crates/storage/provider/src/lib.rs +++ b/crates/storage/provider/src/lib.rs @@ -21,6 +21,7 @@ pub mod providers; pub use providers::{ DatabaseProvider, DatabaseProviderRO, DatabaseProviderRW, HistoricalStateProvider, HistoricalStateProviderRef, LatestStateProvider, LatestStateProviderRef, ProviderFactory, + StaticFileWriter, }; #[cfg(any(test, feature = "test-utils"))] diff --git a/crates/storage/provider/src/providers/database/mod.rs b/crates/storage/provider/src/providers/database/mod.rs index a2bf883d5..c84e9d8ce 100644 --- a/crates/storage/provider/src/providers/database/mod.rs +++ b/crates/storage/provider/src/providers/database/mod.rs @@ -5,7 +5,7 @@ use crate::{ BlockHashReader, BlockNumReader, BlockReader, ChainSpecProvider, DatabaseProviderFactory, EvmEnvProvider, HeaderProvider, HeaderSyncGap, HeaderSyncGapProvider, HeaderSyncMode, ProviderError, PruneCheckpointReader, StageCheckpointReader, StateProviderBox, - TransactionVariant, TransactionsProvider, WithdrawalsProvider, + StaticFileProviderFactory, TransactionVariant, TransactionsProvider, WithdrawalsProvider, }; use reth_db::{database::Database, init_db, models::StoredBlockBodyIndices, DatabaseEnv}; use reth_evm::ConfigureEvmEnv; @@ -34,10 +34,10 @@ use reth_db::mdbx::DatabaseArguments; /// A common provider that fetches data from a database or static file. /// /// This provider implements most provider or provider factory traits. -#[derive(Debug, Clone)] +#[derive(Debug)] pub struct ProviderFactory { /// Database - db: DB, + db: Arc, /// Chain spec chain_spec: Arc, /// Static File Provider @@ -52,7 +52,7 @@ impl ProviderFactory { static_files_path: PathBuf, ) -> RethResult> { Ok(Self { - db, + db: Arc::new(db), chain_spec, static_file_provider: StaticFileProvider::new(static_files_path)?, }) @@ -69,14 +69,9 @@ impl ProviderFactory { &self.db } - /// Returns static file provider - pub fn static_file_provider(&self) -> StaticFileProvider { - self.static_file_provider.clone() - } - #[cfg(any(test, feature = "test-utils"))] /// Consumes Self and returns DB - pub fn into_db(self) -> DB { + pub fn into_db(self) -> Arc { self.db } } @@ -91,7 +86,7 @@ impl ProviderFactory { static_files_path: PathBuf, ) -> RethResult { Ok(ProviderFactory:: { - db: init_db(path, args).map_err(|e| RethError::Custom(e.to_string()))?, + db: Arc::new(init_db(path, args).map_err(|e| RethError::Custom(e.to_string()))?), chain_spec, static_file_provider: StaticFileProvider::new(static_files_path)?, }) @@ -161,6 +156,13 @@ impl DatabaseProviderFactory for ProviderFactory { } } +impl StaticFileProviderFactory for ProviderFactory { + /// Returns static file provider + fn static_file_provider(&self) -> StaticFileProvider { + self.static_file_provider.clone() + } +} + impl HeaderSyncGapProvider for ProviderFactory { fn sync_gap( &self, @@ -556,6 +558,15 @@ impl PruneCheckpointReader for ProviderFactory { } } +impl Clone for ProviderFactory { + fn clone(&self) -> Self { + ProviderFactory { + db: Arc::clone(&self.db), + chain_spec: self.chain_spec.clone(), + static_file_provider: self.static_file_provider.clone(), + } + } +} #[cfg(test)] mod tests { use super::ProviderFactory; diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index ba85a4a40..6e07b7c46 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -9,8 +9,8 @@ use crate::{ Chain, EvmEnvProvider, HashingWriter, HeaderProvider, HeaderSyncGap, HeaderSyncGapProvider, HeaderSyncMode, HistoricalStateProvider, HistoryWriter, LatestStateProvider, OriginalValuesKnown, ProviderError, PruneCheckpointReader, PruneCheckpointWriter, - StageCheckpointReader, StateProviderBox, StatsReader, StorageReader, TransactionVariant, - TransactionsProvider, TransactionsProviderExt, WithdrawalsProvider, + StageCheckpointReader, StateProviderBox, StateWriter, StatsReader, StorageReader, + TransactionVariant, TransactionsProvider, TransactionsProviderExt, WithdrawalsProvider, }; use itertools::{izip, Itertools}; use reth_db::{ @@ -354,6 +354,11 @@ impl DatabaseProvider { |_| true, ) } + + /// Returns a reference to the [`ChainSpec`]. + pub fn chain_spec(&self) -> &ChainSpec { + &self.chain_spec + } } impl DatabaseProvider { @@ -387,7 +392,7 @@ impl DatabaseProvider { /// 1. Take the old value from the changeset /// 2. Take the new value from the local state /// 3. Set the local state to the value in the changeset - pub fn unwind_or_peek_state( + pub fn unwind_or_peek_state( &self, range: RangeInclusive, ) -> ProviderResult { @@ -408,8 +413,8 @@ impl DatabaseProvider { let storage_range = BlockNumberAddress::range(range.clone()); let storage_changeset = - self.get_or_take::(storage_range)?; - let account_changeset = self.get_or_take::(range)?; + self.get_or_take::(storage_range)?; + let account_changeset = self.get_or_take::(range)?; // iterate previous value and get plain state value to create changeset // Double option around Account represent if Account state is know (first option) and @@ -478,7 +483,7 @@ impl DatabaseProvider { .push(old_storage); } - if UNWIND { + if TAKE { // iterate over local plain state remove all account and all storages. for (address, (old_account, new_account, storage)) in state.iter() { // revert account if needed. @@ -515,7 +520,7 @@ impl DatabaseProvider { // iterate over block body and create ExecutionResult let mut receipt_iter = self - .get_or_take::(from_transaction_num..=to_transaction_num)? + .get_or_take::(from_transaction_num..=to_transaction_num)? .into_iter(); let mut receipts = Vec::new(); @@ -1107,7 +1112,10 @@ impl HeaderSyncGapProvider for DatabaseProvider { Ordering::Greater => { let mut static_file_producer = static_file_provider.latest_writer(StaticFileSegment::Headers)?; - static_file_producer.prune_headers(next_static_file_block_num - next_block)? + static_file_producer.prune_headers(next_static_file_block_num - next_block)?; + // Since this is a database <-> static file inconsistency, we commit the change + // straight away. + static_file_producer.commit()? } Ordering::Less => { // There's either missing or corrupted files. diff --git a/crates/storage/provider/src/providers/mod.rs b/crates/storage/provider/src/providers/mod.rs index f696c86d7..bf94e32cf 100644 --- a/crates/storage/provider/src/providers/mod.rs +++ b/crates/storage/provider/src/providers/mod.rs @@ -4,7 +4,8 @@ use crate::{ CanonStateNotifications, CanonStateSubscriptions, ChainSpecProvider, ChangeSetReader, DatabaseProviderFactory, EvmEnvProvider, HeaderProvider, ProviderError, PruneCheckpointReader, ReceiptProvider, ReceiptProviderIdExt, StageCheckpointReader, StateProviderBox, - StateProviderFactory, TransactionVariant, TransactionsProvider, WithdrawalsProvider, + StateProviderFactory, StaticFileProviderFactory, TransactionVariant, TransactionsProvider, + TreeViewer, WithdrawalsProvider, }; use reth_db::{ database::Database, @@ -17,7 +18,6 @@ use reth_interfaces::{ BlockValidationKind, BlockchainTreeEngine, BlockchainTreeViewer, CanonicalOutcome, InsertPayloadOk, }, - consensus::ForkchoiceState, provider::ProviderResult, RethResult, }; @@ -61,37 +61,50 @@ use chain_info::ChainInfoTracker; mod consistent_view; pub use consistent_view::{ConsistentDbView, ConsistentViewError}; +use reth_rpc_types::engine::ForkchoiceState; /// The main type for interacting with the blockchain. /// /// This type serves as the main entry point for interacting with the blockchain and provides data /// from database storage and from the blockchain tree (pending state etc.) It is a simple wrapper /// type that holds an instance of the database and the blockchain tree. -#[derive(Clone, Debug)] -pub struct BlockchainProvider { +#[derive(Clone)] +#[allow(missing_debug_implementations)] +pub struct BlockchainProvider { /// Provider type used to access the database. database: ProviderFactory, /// The blockchain tree instance. - tree: Tree, + tree: Arc, /// Tracks the chain info wrt forkchoice updates chain_info: ChainInfoTracker, } -impl BlockchainProvider { +impl BlockchainProvider { /// Create new provider instance that wraps the database and the blockchain tree, using the /// provided latest header to initialize the chain info tracker. - pub fn with_latest(database: ProviderFactory, tree: Tree, latest: SealedHeader) -> Self { + pub fn with_latest( + database: ProviderFactory, + tree: Arc, + latest: SealedHeader, + ) -> Self { Self { database, tree, chain_info: ChainInfoTracker::new(latest) } } + + /// Sets the treeviewer for the provider. + #[doc(hidden)] + pub fn with_tree(mut self, tree: Arc) -> Self { + self.tree = tree; + self + } } -impl BlockchainProvider +impl BlockchainProvider where DB: Database, { /// Create a new provider using only the database and the tree, fetching the latest header from /// the database to initialize the provider. - pub fn new(database: ProviderFactory, tree: Tree) -> ProviderResult { + pub fn new(database: ProviderFactory, tree: Arc) -> ProviderResult { let provider = database.provider()?; let best: ChainInfo = provider.chain_info()?; match provider.header_by_number(best.best_number)? { @@ -104,10 +117,9 @@ where } } -impl BlockchainProvider +impl BlockchainProvider where DB: Database, - Tree: BlockchainTreeViewer, { /// Ensures that the given block number is canonical (synced) /// @@ -128,7 +140,7 @@ where } } -impl DatabaseProviderFactory for BlockchainProvider +impl DatabaseProviderFactory for BlockchainProvider where DB: Database, { @@ -137,10 +149,15 @@ where } } -impl HeaderProvider for BlockchainProvider +impl StaticFileProviderFactory for BlockchainProvider { + fn static_file_provider(&self) -> StaticFileProvider { + self.database.static_file_provider() + } +} + +impl HeaderProvider for BlockchainProvider where DB: Database, - Tree: Send + Sync, { fn header(&self, block_hash: &BlockHash) -> ProviderResult> { self.database.header(block_hash) @@ -182,10 +199,9 @@ where } } -impl BlockHashReader for BlockchainProvider +impl BlockHashReader for BlockchainProvider where DB: Database, - Tree: Send + Sync, { fn block_hash(&self, number: u64) -> ProviderResult> { self.database.block_hash(number) @@ -200,10 +216,9 @@ where } } -impl BlockNumReader for BlockchainProvider +impl BlockNumReader for BlockchainProvider where DB: Database, - Tree: BlockchainTreeViewer + Send + Sync, { fn chain_info(&self) -> ProviderResult { Ok(self.chain_info.chain_info()) @@ -222,10 +237,9 @@ where } } -impl BlockIdReader for BlockchainProvider +impl BlockIdReader for BlockchainProvider where DB: Database, - Tree: BlockchainTreeViewer + Send + Sync, { fn pending_block_num_hash(&self) -> ProviderResult> { Ok(self.tree.pending_block_num_hash()) @@ -240,10 +254,9 @@ where } } -impl BlockReader for BlockchainProvider +impl BlockReader for BlockchainProvider where DB: Database, - Tree: BlockchainTreeViewer + Send + Sync, { fn find_block_by_hash(&self, hash: B256, source: BlockSource) -> ProviderResult> { let block = match source { @@ -320,10 +333,9 @@ where } } -impl TransactionsProvider for BlockchainProvider +impl TransactionsProvider for BlockchainProvider where DB: Database, - Tree: BlockchainTreeViewer + Send + Sync, { fn transaction_id(&self, tx_hash: TxHash) -> ProviderResult> { self.database.transaction_id(tx_hash) @@ -388,10 +400,9 @@ where } } -impl ReceiptProvider for BlockchainProvider +impl ReceiptProvider for BlockchainProvider where DB: Database, - Tree: Send + Sync, { fn receipt(&self, id: TxNumber) -> ProviderResult> { self.database.receipt(id) @@ -412,10 +423,10 @@ where self.database.receipts_by_tx_range(range) } } -impl ReceiptProviderIdExt for BlockchainProvider + +impl ReceiptProviderIdExt for BlockchainProvider where DB: Database, - Tree: BlockchainTreeViewer + Send + Sync, { fn receipts_by_block_id(&self, block: BlockId) -> ProviderResult>> { match block { @@ -440,10 +451,9 @@ where } } -impl WithdrawalsProvider for BlockchainProvider +impl WithdrawalsProvider for BlockchainProvider where DB: Database, - Tree: Send + Sync, { fn withdrawals_by_block( &self, @@ -458,10 +468,9 @@ where } } -impl StageCheckpointReader for BlockchainProvider +impl StageCheckpointReader for BlockchainProvider where DB: Database, - Tree: Send + Sync, { fn get_stage_checkpoint(&self, id: StageId) -> ProviderResult> { self.database.provider()?.get_stage_checkpoint(id) @@ -472,10 +481,9 @@ where } } -impl EvmEnvProvider for BlockchainProvider +impl EvmEnvProvider for BlockchainProvider where DB: Database, - Tree: Send + Sync, { fn fill_env_at( &self, @@ -544,10 +552,9 @@ where } } -impl PruneCheckpointReader for BlockchainProvider +impl PruneCheckpointReader for BlockchainProvider where DB: Database, - Tree: Send + Sync, { fn get_prune_checkpoint( &self, @@ -557,20 +564,18 @@ where } } -impl ChainSpecProvider for BlockchainProvider +impl ChainSpecProvider for BlockchainProvider where DB: Send + Sync, - Tree: Send + Sync, { fn chain_spec(&self) -> Arc { self.database.chain_spec() } } -impl StateProviderFactory for BlockchainProvider +impl StateProviderFactory for BlockchainProvider where DB: Database, - Tree: BlockchainTreePendingStateProvider + BlockchainTreeViewer, { /// Storage provider for latest block fn latest(&self) -> ProviderResult { @@ -644,10 +649,9 @@ where } } -impl BlockchainTreeEngine for BlockchainProvider +impl BlockchainTreeEngine for BlockchainProvider where DB: Send + Sync, - Tree: BlockchainTreeEngine, { fn buffer_block(&self, block: SealedBlockWithSenders) -> Result<(), InsertBlockError> { self.tree.buffer_block(block) @@ -665,6 +669,10 @@ where self.tree.finalize_block(finalized_block) } + fn update_block_hashes_and_clear_buffered(&self) -> RethResult> { + self.tree.update_block_hashes_and_clear_buffered() + } + fn connect_buffered_blocks_to_canonical_hashes_and_finalize( &self, last_finalized_block: BlockNumber, @@ -681,10 +689,9 @@ where } } -impl BlockchainTreeViewer for BlockchainProvider +impl BlockchainTreeViewer for BlockchainProvider where DB: Send + Sync, - Tree: BlockchainTreeViewer, { fn blocks(&self) -> BTreeMap> { self.tree.blocks() @@ -743,10 +750,9 @@ where } } -impl CanonChainTracker for BlockchainProvider +impl CanonChainTracker for BlockchainProvider where DB: Send + Sync, - Tree: Send + Sync, Self: BlockReader, { fn on_forkchoice_update_received(&self, _update: &ForkchoiceState) { @@ -779,10 +785,9 @@ where } } -impl BlockReaderIdExt for BlockchainProvider +impl BlockReaderIdExt for BlockchainProvider where Self: BlockReader + BlockIdReader + ReceiptProviderIdExt, - Tree: BlockchainTreeEngine, { fn block_by_id(&self, id: BlockId) -> ProviderResult> { match id { @@ -859,10 +864,9 @@ where } } -impl BlockchainTreePendingStateProvider for BlockchainProvider +impl BlockchainTreePendingStateProvider for BlockchainProvider where DB: Send + Sync, - Tree: BlockchainTreePendingStateProvider, { fn find_pending_state_provider( &self, @@ -872,20 +876,18 @@ where } } -impl CanonStateSubscriptions for BlockchainProvider +impl CanonStateSubscriptions for BlockchainProvider where DB: Send + Sync, - Tree: CanonStateSubscriptions, { fn subscribe_to_canonical_state(&self) -> CanonStateNotifications { self.tree.subscribe_to_canonical_state() } } -impl ChangeSetReader for BlockchainProvider +impl ChangeSetReader for BlockchainProvider where DB: Database, - Tree: Sync + Send, { fn account_block_changeset( &self, @@ -895,10 +897,9 @@ where } } -impl AccountReader for BlockchainProvider +impl AccountReader for BlockchainProvider where DB: Database + Sync + Send, - Tree: Sync + Send, { /// Get basic account information. fn basic_account(&self, address: Address) -> ProviderResult> { diff --git a/crates/storage/provider/src/providers/state/historical.rs b/crates/storage/provider/src/providers/state/historical.rs index a2dba78a0..ed64314aa 100644 --- a/crates/storage/provider/src/providers/state/historical.rs +++ b/crates/storage/provider/src/providers/state/historical.rs @@ -127,7 +127,7 @@ impl<'b, TX: DbTx> HistoricalStateProviderRef<'b, TX> { tracing::warn!( target: "provider::historical_sp", target = self.block_number, - "Attempt to calculate state root for an old block might result in OOM, tread carefully" + "Attempt to calculate state root for an old block might result in OOM, treat carefully" ); } @@ -405,6 +405,7 @@ mod tests { providers::state::historical::{HistoryInfo, LowestAvailableBlocks}, test_utils::create_test_provider_factory, AccountReader, HistoricalStateProvider, HistoricalStateProviderRef, StateProvider, + StaticFileProviderFactory, }; use reth_db::{ models::{storage_sharded_key::StorageShardedKey, AccountBeforeTx, ShardedKey}, diff --git a/crates/storage/provider/src/providers/static_file/writer.rs b/crates/storage/provider/src/providers/static_file/writer.rs index c61736b5e..3a0f2d031 100644 --- a/crates/storage/provider/src/providers/static_file/writer.rs +++ b/crates/storage/provider/src/providers/static_file/writer.rs @@ -30,10 +30,17 @@ pub struct StaticFileProviderRW { /// stored in a [dashmap::DashMap] inside the parent [StaticFileProvider].which is an [Arc]. /// If we were to use an [Arc] here, we would create a reference cycle. reader: Weak, + /// A [`NippyJarWriter`] instance. writer: NippyJarWriter, + /// Path to opened file. data_path: PathBuf, + /// Reusable buffer for encoding appended data. buf: Vec, + /// Metrics. metrics: Option>, + /// On commit, does the instructed pruning: number of lines, and if it applies, the last block + /// it ends at. + prune_on_commit: Option<(u64, Option)>, } impl StaticFileProviderRW { @@ -45,7 +52,14 @@ impl StaticFileProviderRW { metrics: Option>, ) -> ProviderResult { let (writer, data_path) = Self::open(segment, block, reader.clone(), metrics.clone())?; - Ok(Self { writer, data_path, buf: Vec::with_capacity(100), reader, metrics }) + Ok(Self { + writer, + data_path, + buf: Vec::with_capacity(100), + reader, + metrics, + prune_on_commit: None, + }) } fn open( @@ -100,6 +114,18 @@ impl StaticFileProviderRW { pub fn commit(&mut self) -> ProviderResult<()> { let start = Instant::now(); + // Truncates the data file if instructed to. + if let Some((to_delete, last_block_number)) = self.prune_on_commit.take() { + match self.writer.user_header().segment() { + StaticFileSegment::Headers => self.prune_header_data(to_delete)?, + StaticFileSegment::Transactions => self + .prune_transaction_data(to_delete, last_block_number.expect("should exist"))?, + StaticFileSegment::Receipts => { + self.prune_receipt_data(to_delete, last_block_number.expect("should exist"))? + } + } + } + // Commits offsets and new user_header to disk self.writer.commit().map_err(|e| ProviderError::NippyJar(e.to_string()))?; @@ -225,7 +251,7 @@ impl StaticFileProviderRW { /// Verifies if the incoming block number matches the next expected block number /// for a static file. This ensures data continuity when adding new blocks. fn check_next_block_number( - &mut self, + &self, expected_block_number: u64, segment: StaticFileSegment, ) -> ProviderResult<()> { @@ -372,6 +398,7 @@ impl StaticFileProviderRW { hash: BlockHash, ) -> ProviderResult { let start = Instant::now(); + self.ensure_no_queued_prune()?; debug_assert!(self.writer.user_header().segment() == StaticFileSegment::Headers); @@ -404,6 +431,7 @@ impl StaticFileProviderRW { tx: TransactionSignedNoHash, ) -> ProviderResult { let start = Instant::now(); + self.ensure_no_queued_prune()?; let result = self.append_with_tx_number(StaticFileSegment::Transactions, tx_num, tx)?; @@ -430,6 +458,7 @@ impl StaticFileProviderRW { receipt: Receipt, ) -> ProviderResult { let start = Instant::now(); + self.ensure_no_queued_prune()?; let result = self.append_with_tx_number(StaticFileSegment::Receipts, tx_num, receipt)?; @@ -444,13 +473,64 @@ impl StaticFileProviderRW { Ok(result) } - /// Removes the last `number` of transactions from static files. + /// Adds an instruction to prune `to_delete`transactions during commit. /// - /// # Note - /// Commits to the configuration file at the end. + /// Note: `last_block` refers to the block the unwinds ends at. pub fn prune_transactions( &mut self, - number: u64, + to_delete: u64, + last_block: BlockNumber, + ) -> ProviderResult<()> { + debug_assert_eq!(self.writer.user_header().segment(), StaticFileSegment::Transactions); + self.queue_prune(to_delete, Some(last_block)) + } + + /// Adds an instruction to prune `to_delete` receipts during commit. + /// + /// Note: `last_block` refers to the block the unwinds ends at. + pub fn prune_receipts( + &mut self, + to_delete: u64, + last_block: BlockNumber, + ) -> ProviderResult<()> { + debug_assert_eq!(self.writer.user_header().segment(), StaticFileSegment::Receipts); + self.queue_prune(to_delete, Some(last_block)) + } + + /// Adds an instruction to prune `to_delete` headers during commit. + pub fn prune_headers(&mut self, to_delete: u64) -> ProviderResult<()> { + debug_assert_eq!(self.writer.user_header().segment(), StaticFileSegment::Headers); + self.queue_prune(to_delete, None) + } + + /// Adds an instruction to prune `to_delete` elements during commit. + /// + /// Note: `last_block` refers to the block the unwinds ends at if dealing with transaction-based + /// data. + fn queue_prune( + &mut self, + to_delete: u64, + last_block: Option, + ) -> ProviderResult<()> { + self.ensure_no_queued_prune()?; + self.prune_on_commit = Some((to_delete, last_block)); + Ok(()) + } + + /// Returns Error if there is a pruning instruction that needs to be applied. + fn ensure_no_queued_prune(&self) -> ProviderResult<()> { + if self.prune_on_commit.is_some() { + return Err(ProviderError::NippyJar( + "Pruning should be comitted before appending or pruning more data".to_string(), + )); + } + Ok(()) + } + + /// Removes the last `to_delete` transactions from the data file. + fn prune_transaction_data( + &mut self, + to_delete: u64, last_block: BlockNumber, ) -> ProviderResult<()> { let start = Instant::now(); @@ -458,7 +538,7 @@ impl StaticFileProviderRW { let segment = StaticFileSegment::Transactions; debug_assert!(self.writer.user_header().segment() == segment); - self.truncate(segment, number, Some(last_block))?; + self.truncate(segment, to_delete, Some(last_block))?; if let Some(metrics) = &self.metrics { metrics.record_segment_operation( @@ -471,11 +551,8 @@ impl StaticFileProviderRW { Ok(()) } - /// Prunes `to_delete` number of receipts from static_files. - /// - /// # Note - /// Commits to the configuration file at the end. - pub fn prune_receipts( + /// Prunes the last `to_delete` receipts from the data file. + fn prune_receipt_data( &mut self, to_delete: u64, last_block: BlockNumber, @@ -498,11 +575,8 @@ impl StaticFileProviderRW { Ok(()) } - /// Prunes `to_delete` number of headers from static_files. - /// - /// # Note - /// Commits to the configuration file at the end. - pub fn prune_headers(&mut self, to_delete: u64) -> ProviderResult<()> { + /// Prunes the last `to_delete` headers from the data file. + fn prune_header_data(&mut self, to_delete: u64) -> ProviderResult<()> { let start = Instant::now(); let segment = StaticFileSegment::Headers; diff --git a/crates/storage/provider/src/test_utils/blocks.rs b/crates/storage/provider/src/test_utils/blocks.rs index 39b6d3535..32ecb4897 100644 --- a/crates/storage/provider/src/test_utils/blocks.rs +++ b/crates/storage/provider/src/test_utils/blocks.rs @@ -62,14 +62,14 @@ const BLOCK_RLP: [u8; 610] = hex!("f9025ff901f7a0c86e8cc0310ae7c531c758678ddbfd1 /// Test chain with genesis, blocks, execution results /// that have valid changesets. #[derive(Debug)] -pub struct BlockChainTestData { +pub struct BlockchainTestData { /// Genesis pub genesis: SealedBlock, /// Blocks with its execution result pub blocks: Vec<(SealedBlockWithSenders, BundleStateWithReceipts)>, } -impl BlockChainTestData { +impl BlockchainTestData { /// Create test data with two blocks that are connected, specifying their block numbers. pub fn default_from_number(first: BlockNumber) -> Self { let one = block1(first); @@ -85,7 +85,7 @@ impl BlockChainTestData { } } -impl Default for BlockChainTestData { +impl Default for BlockchainTestData { fn default() -> Self { let one = block1(1); let mut extended_state = one.1.clone(); diff --git a/crates/storage/provider/src/test_utils/events.rs b/crates/storage/provider/src/test_utils/events.rs index 34c426661..baa6bc470 100644 --- a/crates/storage/provider/src/test_utils/events.rs +++ b/crates/storage/provider/src/test_utils/events.rs @@ -12,14 +12,14 @@ pub struct TestCanonStateSubscriptions { impl TestCanonStateSubscriptions { /// Adds new block commit to the queue that can be consumed with /// [`TestCanonStateSubscriptions::subscribe_to_canonical_state`] - pub fn add_next_commit(&mut self, new: Arc) { + pub fn add_next_commit(&self, new: Arc) { let event = CanonStateNotification::Commit { new }; self.canon_notif_tx.lock().as_mut().unwrap().retain(|tx| tx.send(event.clone()).is_ok()) } /// Adds reorg to the queue that can be consumed with /// [`TestCanonStateSubscriptions::subscribe_to_canonical_state`] - pub fn add_next_reorg(&mut self, old: Arc, new: Arc) { + pub fn add_next_reorg(&self, old: Arc, new: Arc) { let event = CanonStateNotification::Reorg { old, new }; self.canon_notif_tx.lock().as_mut().unwrap().retain(|tx| tx.send(event.clone()).is_ok()) } diff --git a/crates/storage/provider/src/test_utils/executor.rs b/crates/storage/provider/src/test_utils/executor.rs deleted file mode 100644 index 8ac963e93..000000000 --- a/crates/storage/provider/src/test_utils/executor.rs +++ /dev/null @@ -1,71 +0,0 @@ -use crate::{ - bundle_state::BundleStateWithReceipts, BlockExecutor, ExecutorFactory, PrunableBlockExecutor, - StateProvider, -}; -use parking_lot::Mutex; -use reth_interfaces::executor::BlockExecutionError; -use reth_primitives::{BlockNumber, BlockWithSenders, PruneModes, Receipt, U256}; -use std::sync::Arc; -/// Test executor with mocked result. -#[derive(Debug)] -pub struct TestExecutor(pub Option); - -impl BlockExecutor for TestExecutor { - type Error = BlockExecutionError; - - fn execute_and_verify_receipt( - &mut self, - _block: &BlockWithSenders, - _total_difficulty: U256, - ) -> Result<(), BlockExecutionError> { - if self.0.is_none() { - return Err(BlockExecutionError::UnavailableForTest) - } - Ok(()) - } - - fn execute_transactions( - &mut self, - _block: &BlockWithSenders, - _total_difficulty: U256, - ) -> Result<(Vec, u64), BlockExecutionError> { - Err(BlockExecutionError::UnavailableForTest) - } - - fn take_output_state(&mut self) -> BundleStateWithReceipts { - self.0.clone().unwrap_or_default() - } - - fn size_hint(&self) -> Option { - None - } -} - -impl PrunableBlockExecutor for TestExecutor { - fn set_tip(&mut self, _tip: BlockNumber) {} - - fn set_prune_modes(&mut self, _prune_modes: PruneModes) {} -} - -/// Executor factory with pre-set execution results. -#[derive(Clone, Debug, Default)] -pub struct TestExecutorFactory { - exec_results: Arc>>, -} - -impl TestExecutorFactory { - /// Extend the mocked execution results - pub fn extend(&self, results: Vec) { - self.exec_results.lock().extend(results); - } -} - -impl ExecutorFactory for TestExecutorFactory { - fn with_state<'a, SP: StateProvider + 'a>( - &'a self, - _sp: SP, - ) -> Box::Error> + 'a> { - let exec_res = self.exec_results.lock().pop(); - Box::new(TestExecutor(exec_res)) - } -} diff --git a/crates/storage/provider/src/test_utils/mod.rs b/crates/storage/provider/src/test_utils/mod.rs index f4a5626f6..2f5462309 100644 --- a/crates/storage/provider/src/test_utils/mod.rs +++ b/crates/storage/provider/src/test_utils/mod.rs @@ -8,12 +8,10 @@ use std::sync::Arc; pub mod blocks; mod events; -mod executor; mod mock; mod noop; pub use events::TestCanonStateSubscriptions; -pub use executor::{TestExecutor, TestExecutorFactory}; pub use mock::{ExtendedAccount, MockEthProvider}; pub use noop::NoopProvider; diff --git a/crates/storage/provider/src/traits/chain_info.rs b/crates/storage/provider/src/traits/chain_info.rs index 82d879df4..5e6379f01 100644 --- a/crates/storage/provider/src/traits/chain_info.rs +++ b/crates/storage/provider/src/traits/chain_info.rs @@ -1,5 +1,5 @@ -use reth_interfaces::consensus::ForkchoiceState; use reth_primitives::SealedHeader; +use reth_rpc_types::engine::ForkchoiceState; use std::time::Instant; /// A type that can track updates related to fork choice updates. diff --git a/crates/storage/provider/src/traits/executor.rs b/crates/storage/provider/src/traits/executor.rs deleted file mode 100644 index f12d64169..000000000 --- a/crates/storage/provider/src/traits/executor.rs +++ /dev/null @@ -1,68 +0,0 @@ -//! Executor Factory - -use crate::{bundle_state::BundleStateWithReceipts, StateProvider}; -use reth_interfaces::executor::BlockExecutionError; -use reth_primitives::{BlockNumber, BlockWithSenders, PruneModes, Receipt, U256}; - -/// A factory capable of creating an executor with the given state provider. -pub trait ExecutorFactory: Send + Sync + 'static { - /// Executor with [`StateProvider`] - fn with_state<'a, SP: StateProvider + 'a>( - &'a self, - sp: SP, - ) -> Box + 'a>; -} - -/// An executor capable of executing a block. -/// -/// This type is capable of executing (multiple) blocks by applying the state changes made by each -/// block. The final state of the executor can extracted using -/// [`Self::take_output_state`]. -pub trait BlockExecutor { - /// The error type returned by the executor. - type Error; - - /// Executes the entire block and verifies: - /// - receipts (receipts root) - /// - /// This will update the state of the executor with the changes made by the block. - fn execute_and_verify_receipt( - &mut self, - block: &BlockWithSenders, - total_difficulty: U256, - ) -> Result<(), Self::Error>; - - /// Runs the provided transactions and commits their state to the run-time database. - /// - /// The returned [BundleStateWithReceipts] can be used to persist the changes to disk, and - /// contains the changes made by each transaction. - /// - /// The changes in [BundleStateWithReceipts] have a transition ID associated with them: there is - /// one transition ID for each transaction (with the first executed tx having transition ID - /// 0, and so on). - /// - /// The second returned value represents the total gas used by this block of transactions. - /// - /// See [execute_and_verify_receipt](BlockExecutor::execute_and_verify_receipt) for more - /// details. - fn execute_transactions( - &mut self, - block: &BlockWithSenders, - total_difficulty: U256, - ) -> Result<(Vec, u64), Self::Error>; - - /// Return bundle state. This is output of executed blocks. - fn take_output_state(&mut self) -> BundleStateWithReceipts; - - /// Returns the size hint of current in-memory changes. - fn size_hint(&self) -> Option; -} - -/// A [BlockExecutor] capable of in-memory pruning of the data that will be written to the database. -pub trait PrunableBlockExecutor: BlockExecutor { - /// Set tip - highest known block number. - fn set_tip(&mut self, tip: BlockNumber); - - /// Set prune modes. - fn set_prune_modes(&mut self, prune_modes: PruneModes); -} diff --git a/crates/storage/provider/src/traits/full.rs b/crates/storage/provider/src/traits/full.rs index e73357f4a..9214cc273 100644 --- a/crates/storage/provider/src/traits/full.rs +++ b/crates/storage/provider/src/traits/full.rs @@ -2,13 +2,15 @@ use crate::{ AccountReader, BlockReaderIdExt, CanonStateSubscriptions, ChainSpecProvider, ChangeSetReader, - DatabaseProviderFactory, EvmEnvProvider, StateProviderFactory, + DatabaseProviderFactory, EvmEnvProvider, StageCheckpointReader, StateProviderFactory, + StaticFileProviderFactory, }; use reth_db::database::Database; /// Helper trait to unify all provider traits for simplicity. pub trait FullProvider: DatabaseProviderFactory + + StaticFileProviderFactory + BlockReaderIdExt + AccountReader + StateProviderFactory @@ -16,6 +18,7 @@ pub trait FullProvider: + ChainSpecProvider + ChangeSetReader + CanonStateSubscriptions + + StageCheckpointReader + Clone + Unpin + 'static @@ -24,6 +27,7 @@ pub trait FullProvider: impl FullProvider for T where T: DatabaseProviderFactory + + StaticFileProviderFactory + BlockReaderIdExt + AccountReader + StateProviderFactory @@ -31,6 +35,7 @@ impl FullProvider for T where + ChainSpecProvider + ChangeSetReader + CanonStateSubscriptions + + StageCheckpointReader + Clone + Unpin + 'static diff --git a/crates/storage/provider/src/traits/mod.rs b/crates/storage/provider/src/traits/mod.rs index c9623cb0c..c966cd9ef 100644 --- a/crates/storage/provider/src/traits/mod.rs +++ b/crates/storage/provider/src/traits/mod.rs @@ -36,7 +36,7 @@ pub use receipts::{ReceiptProvider, ReceiptProviderIdExt}; mod state; pub use state::{ BlockchainTreePendingStateProvider, BundleStateDataProvider, StateProvider, StateProviderBox, - StateProviderFactory, + StateProviderFactory, StateWriter, }; mod trie; @@ -48,9 +48,6 @@ pub use transactions::{TransactionsProvider, TransactionsProviderExt}; mod withdrawals; pub use withdrawals::WithdrawalsProvider; -mod executor; -pub use executor::{BlockExecutor, ExecutorFactory, PrunableBlockExecutor}; - mod chain; pub use chain::{ CanonStateNotification, CanonStateNotificationSender, CanonStateNotificationStream, @@ -75,8 +72,14 @@ pub use prune_checkpoint::{PruneCheckpointReader, PruneCheckpointWriter}; mod database_provider; pub use database_provider::DatabaseProviderFactory; +mod static_file_provider; +pub use static_file_provider::StaticFileProviderFactory; + mod stats; pub use stats::StatsReader; mod full; pub use full::FullProvider; + +mod tree_viewer; +pub use tree_viewer::TreeViewer; diff --git a/crates/storage/provider/src/traits/state.rs b/crates/storage/provider/src/traits/state.rs index b5251ca75..4cb74dec6 100644 --- a/crates/storage/provider/src/traits/state.rs +++ b/crates/storage/provider/src/traits/state.rs @@ -1,11 +1,16 @@ use super::AccountReader; -use crate::{BlockHashReader, BlockIdReader, BundleStateWithReceipts, StateRootProvider}; +use crate::{ + providers::StaticFileProviderRWRefMut, BlockHashReader, BlockIdReader, BundleStateWithReceipts, + StateRootProvider, +}; use auto_impl::auto_impl; +use reth_db::transaction::{DbTx, DbTxMut}; use reth_interfaces::provider::{ProviderError, ProviderResult}; use reth_primitives::{ trie::AccountProof, Address, BlockHash, BlockId, BlockNumHash, BlockNumber, BlockNumberOrTag, Bytecode, StorageKey, StorageValue, B256, KECCAK_EMPTY, U256, }; +use revm::db::OriginalValuesKnown; /// Type alias of boxed [StateProvider]. pub type StateProviderBox = Box; @@ -226,3 +231,17 @@ pub trait BundleStateDataProvider: Send + Sync { /// Needed to create state provider. fn canonical_fork(&self) -> BlockNumHash; } + +/// A helper trait for [BundleStateWithReceipts] to write state and receipts to storage. +pub trait StateWriter { + /// Write the data and receipts to the database or static files if `static_file_producer` is + /// `Some`. It should be `None` if there is any kind of pruning/filtering over the receipts. + fn write_to_storage( + self, + tx: &TX, + static_file_producer: Option>, + is_value_known: OriginalValuesKnown, + ) -> ProviderResult<()> + where + TX: DbTxMut + DbTx; +} diff --git a/crates/storage/provider/src/traits/static_file_provider.rs b/crates/storage/provider/src/traits/static_file_provider.rs new file mode 100644 index 000000000..24d695692 --- /dev/null +++ b/crates/storage/provider/src/traits/static_file_provider.rs @@ -0,0 +1,7 @@ +use crate::providers::StaticFileProvider; + +/// Static file provider factory. +pub trait StaticFileProviderFactory { + /// Create new instance of static file provider. + fn static_file_provider(&self) -> StaticFileProvider; +} diff --git a/crates/storage/provider/src/traits/transactions.rs b/crates/storage/provider/src/traits/transactions.rs index 9041593b5..3e798bb41 100644 --- a/crates/storage/provider/src/traits/transactions.rs +++ b/crates/storage/provider/src/traits/transactions.rs @@ -15,7 +15,7 @@ pub trait TransactionsProvider: BlockNumReader + Send + Sync { /// Returns None if the transaction is not found. fn transaction_id(&self, tx_hash: TxHash) -> ProviderResult>; - /// Get transaction by id, computes hash everytime so more expensive. + /// Get transaction by id, computes hash every time so more expensive. fn transaction_by_id(&self, id: TxNumber) -> ProviderResult>; /// Get transaction by id without computing the hash. diff --git a/crates/storage/provider/src/traits/tree_viewer.rs b/crates/storage/provider/src/traits/tree_viewer.rs new file mode 100644 index 000000000..db3b19c4d --- /dev/null +++ b/crates/storage/provider/src/traits/tree_viewer.rs @@ -0,0 +1,22 @@ +use crate::{BlockchainTreePendingStateProvider, CanonStateSubscriptions}; + +use reth_interfaces::blockchain_tree::{BlockchainTreeEngine, BlockchainTreeViewer}; + +/// Helper trait to combine all the traits we need for the BlockchainProvider +/// +/// This is a temporary solution +pub trait TreeViewer: + BlockchainTreeViewer + + BlockchainTreePendingStateProvider + + CanonStateSubscriptions + + BlockchainTreeEngine +{ +} + +impl TreeViewer for T where + T: BlockchainTreeViewer + + BlockchainTreePendingStateProvider + + CanonStateSubscriptions + + BlockchainTreeEngine +{ +} diff --git a/crates/tasks/src/lib.rs b/crates/tasks/src/lib.rs index 0f93e5bc5..3e526a344 100644 --- a/crates/tasks/src/lib.rs +++ b/crates/tasks/src/lib.rs @@ -19,12 +19,12 @@ use crate::{ use dyn_clone::DynClone; use futures_util::{ future::{select, BoxFuture}, - pin_mut, Future, FutureExt, TryFutureExt, + Future, FutureExt, TryFutureExt, }; use std::{ any::Any, fmt::{Display, Formatter}, - pin::Pin, + pin::{pin, Pin}, sync::{ atomic::{AtomicUsize, Ordering}, Arc, @@ -334,7 +334,7 @@ impl TaskExecutor { async move { // Create an instance of IncCounterOnDrop with the counter to increment let _inc_counter_on_drop = IncCounterOnDrop::new(finished_regular_tasks_metrics); - pin_mut!(fut); + let fut = pin!(fut); let _ = select(on_shutdown, fut).await; } } @@ -409,7 +409,7 @@ impl TaskExecutor { let task = async move { // Create an instance of IncCounterOnDrop with the counter to increment let _inc_counter_on_drop = IncCounterOnDrop::new(finished_critical_tasks_metrics); - pin_mut!(task); + let task = pin!(task); let _ = select(on_shutdown, task).await; }; diff --git a/crates/transaction-pool/Cargo.toml b/crates/transaction-pool/Cargo.toml index 5b6b85486..ebb6e497f 100644 --- a/crates/transaction-pool/Cargo.toml +++ b/crates/transaction-pool/Cargo.toml @@ -20,6 +20,7 @@ reth-tasks.workspace = true revm.workspace = true alloy-rlp.workspace = true reth-revm = { workspace = true, optional = true } +reth-network-types.workspace = true # async/futures futures-util.workspace = true @@ -35,9 +36,9 @@ metrics.workspace = true aquamarine.workspace = true thiserror.workspace = true tracing.workspace = true +rustc-hash.workspace = true schnellru.workspace = true serde = { workspace = true, features = ["derive", "rc"], optional = true } -fnv = "1.0.7" bitflags.workspace = true auto_impl.workspace = true smallvec.workspace = true diff --git a/crates/transaction-pool/src/blobstore/disk.rs b/crates/transaction-pool/src/blobstore/disk.rs index ae6ff97b2..5f44c87f5 100644 --- a/crates/transaction-pool/src/blobstore/disk.rs +++ b/crates/transaction-pool/src/blobstore/disk.rs @@ -445,7 +445,6 @@ pub enum OpenDiskFileBlobStore { #[cfg(test)] mod tests { use super::*; - use proptest::{prelude::*, strategy::ValueTree, test_runner::TestRunner}; use std::sync::atomic::Ordering; fn tmp_store() -> (DiskFileBlobStore, tempfile::TempDir) { @@ -455,11 +454,15 @@ mod tests { } fn rng_blobs(num: usize) -> Vec<(TxHash, BlobTransactionSidecar)> { - let mut runner = TestRunner::new(Default::default()); - prop::collection::vec(any::<(TxHash, BlobTransactionSidecar)>(), num) - .new_tree(&mut runner) - .unwrap() - .current() + let mut rng = rand::thread_rng(); + (0..num) + .map(|_| { + let tx = TxHash::random_with(&mut rng); + let blob = + BlobTransactionSidecar { blobs: vec![], commitments: vec![], proofs: vec![] }; + (tx, blob) + }) + .collect() } #[test] diff --git a/crates/transaction-pool/src/identifier.rs b/crates/transaction-pool/src/identifier.rs index 6ec1527bd..4e4bec4d1 100644 --- a/crates/transaction-pool/src/identifier.rs +++ b/crates/transaction-pool/src/identifier.rs @@ -1,5 +1,5 @@ -use fnv::FnvHashMap; use reth_primitives::Address; +use rustc_hash::FxHashMap; use std::collections::HashMap; /// An internal mapping of addresses. @@ -13,7 +13,7 @@ pub(crate) struct SenderIdentifiers { /// Assigned `SenderId` for an `Address`. address_to_id: HashMap, /// Reverse mapping of `SenderId` to `Address`. - sender_to_address: FnvHashMap, + sender_to_address: FxHashMap, } impl SenderIdentifiers { diff --git a/crates/transaction-pool/src/noop.rs b/crates/transaction-pool/src/noop.rs index 5f2a11048..b550a2bc1 100644 --- a/crates/transaction-pool/src/noop.rs +++ b/crates/transaction-pool/src/noop.rs @@ -11,10 +11,10 @@ use crate::{ TransactionListenerKind, }, validate::ValidTransaction, - AllPoolTransactions, AllTransactionsEvents, BestTransactions, BlockInfo, EthPooledTransaction, - NewTransactionEvent, PoolResult, PoolSize, PoolTransaction, PooledTransactionsElement, - PropagatedTransactions, TransactionEvents, TransactionOrigin, TransactionPool, - TransactionValidationOutcome, TransactionValidator, ValidPoolTransaction, + AllPoolTransactions, AllTransactionsEvents, BestTransactions, BlockInfo, EthPoolTransaction, + EthPooledTransaction, NewTransactionEvent, PoolResult, PoolSize, PoolTransaction, + PooledTransactionsElement, PropagatedTransactions, TransactionEvents, TransactionOrigin, + TransactionPool, TransactionValidationOutcome, TransactionValidator, ValidPoolTransaction, }; use reth_eth_wire::HandleMempoolData; use reth_primitives::{Address, BlobTransactionSidecar, TxHash, U256}; @@ -252,20 +252,21 @@ pub struct MockTransactionValidator { _marker: PhantomData, } -impl TransactionValidator for MockTransactionValidator { +impl TransactionValidator for MockTransactionValidator { type Transaction = T; async fn validate_transaction( &self, origin: TransactionOrigin, - transaction: Self::Transaction, + mut transaction: Self::Transaction, ) -> TransactionValidationOutcome { + let maybe_sidecar = transaction.take_blob().maybe_sidecar().cloned(); // we return `balance: U256::MAX` to simulate a valid transaction which will never go into // overdraft TransactionValidationOutcome::Valid { balance: U256::MAX, state_nonce: 0, - transaction: ValidTransaction::Valid(transaction), + transaction: ValidTransaction::new(transaction, maybe_sidecar), propagate: match origin { TransactionOrigin::External => true, TransactionOrigin::Local => self.propagate_local, @@ -285,7 +286,7 @@ impl MockTransactionValidator { impl Default for MockTransactionValidator { fn default() -> Self { - MockTransactionValidator { propagate_local: true, _marker: Default::default() } + Self { propagate_local: true, _marker: Default::default() } } } diff --git a/crates/transaction-pool/src/pool/parked.rs b/crates/transaction-pool/src/pool/parked.rs index 2815deaee..ef0766bed 100644 --- a/crates/transaction-pool/src/pool/parked.rs +++ b/crates/transaction-pool/src/pool/parked.rs @@ -3,7 +3,7 @@ use crate::{ pool::size::SizeTracker, PoolTransaction, SubPoolLimit, ValidPoolTransaction, TXPOOL_MAX_ACCOUNT_SLOTS_PER_SENDER, }; -use fnv::FnvHashMap; +use rustc_hash::FxHashMap; use smallvec::SmallVec; use std::{ cmp::Ordering, @@ -40,7 +40,7 @@ pub struct ParkedPool { last_sender_submission: BTreeSet, /// Keeps track of the number of transactions in the pool by the sender and the last submission /// id. - sender_transaction_count: FnvHashMap, + sender_transaction_count: FxHashMap, /// Keeps track of the size of this pool. /// /// See also [`PoolTransaction::size`]. diff --git a/crates/transaction-pool/src/pool/pending.rs b/crates/transaction-pool/src/pool/pending.rs index 970321323..7e733a659 100644 --- a/crates/transaction-pool/src/pool/pending.rs +++ b/crates/transaction-pool/src/pool/pending.rs @@ -399,7 +399,7 @@ impl PendingPool { unique_senders = self.highest_nonces.len(); non_local_senders -= unique_removed; - // we can re-use the temp array + // we can reuse the temp array removed.clear(); // loop through the highest nonces set, removing transactions until we reach the limit diff --git a/crates/transaction-pool/src/pool/txpool.rs b/crates/transaction-pool/src/pool/txpool.rs index dfc63c921..bcad71edb 100644 --- a/crates/transaction-pool/src/pool/txpool.rs +++ b/crates/transaction-pool/src/pool/txpool.rs @@ -18,7 +18,6 @@ use crate::{ PoolConfig, PoolResult, PoolTransaction, PriceBumpConfig, TransactionOrdering, ValidPoolTransaction, U256, }; -use fnv::FnvHashMap; use itertools::Itertools; use reth_primitives::{ constants::{ @@ -26,6 +25,7 @@ use reth_primitives::{ }, Address, TxHash, B256, }; +use rustc_hash::FxHashMap; use smallvec::SmallVec; use std::{ cmp::Ordering, @@ -44,7 +44,7 @@ use tracing::trace; /// include_mmd!("docs/mermaid/txpool.mmd") pub struct TxPool { /// Contains the currently known information about the senders. - sender_info: FnvHashMap, + sender_info: FxHashMap, /// pending subpool /// /// Holds transactions that are ready to be executed on the current state. @@ -428,7 +428,7 @@ impl TxPool { } /// Update sub-pools size metrics. - pub(crate) fn update_size_metrics(&mut self) { + pub(crate) fn update_size_metrics(&self) { let stats = self.size(); self.metrics.pending_pool_transactions.set(stats.pending as f64); self.metrics.pending_pool_size_bytes.set(stats.pending_size as f64); @@ -903,7 +903,7 @@ pub(crate) struct AllTransactions { /// _All_ transaction in the pool sorted by their sender and nonce pair. txs: BTreeMap>, /// Tracks the number of transactions by sender that are currently in the pool. - tx_counter: FnvHashMap, + tx_counter: FxHashMap, /// The current block number the pool keeps track of. last_seen_block_number: u64, /// The current block hash the pool keeps track of. @@ -990,7 +990,7 @@ impl AllTransactions { } /// Updates the size metrics - pub(crate) fn update_size_metrics(&mut self) { + pub(crate) fn update_size_metrics(&self) { self.metrics.all_transactions_by_hash.set(self.by_hash.len() as f64); self.metrics.all_transactions_by_id.set(self.txs.len() as f64); } @@ -1766,8 +1766,8 @@ pub(crate) struct PoolInternalTransaction { pub(crate) transaction: Arc>, /// The `SubPool` that currently contains this transaction. pub(crate) subpool: SubPool, - /// Keeps track of the current state of the transaction and therefor in which subpool it should - /// reside + /// Keeps track of the current state of the transaction and therefore in which subpool it + /// should reside pub(crate) state: TxState, /// The total cost all transactions before this transaction. /// diff --git a/crates/transaction-pool/src/test_utils/gen.rs b/crates/transaction-pool/src/test_utils/gen.rs index 52a3127c7..5c335e5d6 100644 --- a/crates/transaction-pool/src/test_utils/gen.rs +++ b/crates/transaction-pool/src/test_utils/gen.rs @@ -2,8 +2,8 @@ use crate::EthPooledTransaction; use rand::Rng; use reth_primitives::{ constants::MIN_PROTOCOL_BASE_FEE, sign_message, AccessList, Address, Bytes, Transaction, - TransactionKind, TransactionSigned, TryFromRecoveredTransaction, TxEip1559, TxEip4844, - TxLegacy, B256, MAINNET, U256, + TransactionSigned, TryFromRecoveredTransaction, TxEip1559, TxEip4844, TxKind, TxLegacy, B256, + MAINNET, U256, }; /// A generator for transactions for testing purposes. @@ -129,7 +129,7 @@ pub struct TransactionBuilder { /// processing. pub max_priority_fee_per_gas: u128, /// The recipient or contract address of the transaction. - pub to: TransactionKind, + pub to: TxKind, /// The value to be transferred in the transaction. pub value: U256, /// The list of addresses and storage keys that the transaction can access. @@ -246,7 +246,7 @@ impl TransactionBuilder { /// Sets the recipient or contract address for the transaction builder. pub const fn to(mut self, to: Address) -> Self { - self.to = TransactionKind::Call(to); + self.to = TxKind::Call(to); self } @@ -306,7 +306,7 @@ impl TransactionBuilder { /// Sets the recipient or contract address for the transaction, mutable reference version. pub fn set_to(&mut self, to: Address) -> &mut Self { - self.to = TransactionKind::Call(to); + self.to = to.into(); self } diff --git a/crates/transaction-pool/src/test_utils/mock.rs b/crates/transaction-pool/src/test_utils/mock.rs index d250b6c10..948c47109 100644 --- a/crates/transaction-pool/src/test_utils/mock.rs +++ b/crates/transaction-pool/src/test_utils/mock.rs @@ -4,7 +4,8 @@ use crate::{ identifier::{SenderIdentifiers, TransactionId}, pool::txpool::TxPool, traits::TransactionOrigin, - CoinbaseTipOrdering, PoolTransaction, ValidPoolTransaction, + CoinbaseTipOrdering, EthBlobTransactionSidecar, EthPoolTransaction, PoolTransaction, + ValidPoolTransaction, }; use paste::paste; use rand::{ @@ -14,11 +15,11 @@ use rand::{ use reth_primitives::{ constants::{eip4844::DATA_GAS_PER_BLOB, MIN_PROTOCOL_BASE_FEE}, transaction::TryFromRecoveredTransactionError, - AccessList, Address, BlobTransactionSidecar, Bytes, FromRecoveredPooledTransaction, - IntoRecoveredTransaction, PooledTransactionsElementEcRecovered, Signature, Transaction, - TransactionKind, TransactionSigned, TransactionSignedEcRecovered, TryFromRecoveredTransaction, - TxEip1559, TxEip2930, TxEip4844, TxHash, TxLegacy, TxType, B256, EIP1559_TX_TYPE_ID, - EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, LEGACY_TX_TYPE_ID, U256, + AccessList, Address, BlobTransactionSidecar, BlobTransactionValidationError, Bytes, ChainId, + FromRecoveredPooledTransaction, IntoRecoveredTransaction, PooledTransactionsElementEcRecovered, + Signature, Transaction, TransactionSigned, TransactionSignedEcRecovered, + TryFromRecoveredTransaction, TxEip1559, TxEip2930, TxEip4844, TxHash, TxKind, TxLegacy, TxType, + B256, EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, LEGACY_TX_TYPE_ID, U256, }; use std::{ops::Range, sync::Arc, time::Instant, vec::IntoIter}; @@ -94,6 +95,8 @@ macro_rules! make_setters_getters { pub enum MockTransaction { /// Legacy transaction type. Legacy { + /// The chain id of the transaction. + chain_id: Option, /// The hash of the transaction. hash: B256, /// The sender's address. @@ -105,7 +108,7 @@ pub enum MockTransaction { /// The gas limit for the transaction. gas_limit: u64, /// The transaction's destination. - to: TransactionKind, + to: TxKind, /// The value of the transaction. value: U256, /// The transaction input data. @@ -113,8 +116,35 @@ pub enum MockTransaction { /// The size of the transaction, returned in the implementation of [PoolTransaction]. size: usize, }, + /// EIP-2930 transaction type. + Eip2930 { + /// The chain id of the transaction. + chain_id: ChainId, + /// The hash of the transaction. + hash: B256, + /// The sender's address. + sender: Address, + /// The transaction nonce. + nonce: u64, + /// The transaction's destination. + to: TxKind, + /// The gas limit for the transaction. + gas_limit: u64, + /// The transaction input data. + input: Bytes, + /// The value of the transaction. + value: U256, + /// The gas price for the transaction. + gas_price: u128, + /// The access list associated with the transaction. + access_list: AccessList, + /// The size of the transaction, returned in the implementation of [PoolTransaction]. + size: usize, + }, /// EIP-1559 transaction type. Eip1559 { + /// The chain id of the transaction. + chain_id: ChainId, /// The hash of the transaction. hash: B256, /// The sender's address. @@ -128,11 +158,11 @@ pub enum MockTransaction { /// The gas limit for the transaction. gas_limit: u64, /// The transaction's destination. - to: TransactionKind, + to: TxKind, /// The value of the transaction. value: U256, /// The access list associated with the transaction. - accesslist: AccessList, + access_list: AccessList, /// The transaction input data. input: Bytes, /// The size of the transaction, returned in the implementation of [PoolTransaction]. @@ -140,6 +170,8 @@ pub enum MockTransaction { }, /// EIP-4844 transaction type. Eip4844 { + /// The chain id of the transaction. + chain_id: ChainId, /// The hash of the transaction. hash: B256, /// The sender's address. @@ -155,11 +187,11 @@ pub enum MockTransaction { /// The gas limit for the transaction. gas_limit: u64, /// The transaction's destination. - to: TransactionKind, + to: TxKind, /// The value of the transaction. value: U256, /// The access list associated with the transaction. - accesslist: AccessList, + access_list: AccessList, /// The transaction input data. input: Bytes, /// The sidecar information for the transaction. @@ -167,29 +199,6 @@ pub enum MockTransaction { /// The size of the transaction, returned in the implementation of [PoolTransaction]. size: usize, }, - /// EIP-2930 transaction type. - Eip2930 { - /// The hash of the transaction. - hash: B256, - /// The sender's address. - sender: Address, - /// The transaction nonce. - nonce: u64, - /// The transaction's destination. - to: TransactionKind, - /// The gas limit for the transaction. - gas_limit: u64, - /// The transaction input data. - input: Bytes, - /// The value of the transaction. - value: U256, - /// The gas price for the transaction. - gas_price: u128, - /// The access list associated with the transaction. - accesslist: AccessList, - /// The size of the transaction, returned in the implementation of [PoolTransaction]. - size: usize, - }, } // === impl MockTransaction === @@ -208,31 +217,50 @@ impl MockTransaction { /// Returns a new legacy transaction with random address and hash and empty values pub fn legacy() -> Self { MockTransaction::Legacy { + chain_id: Some(1), hash: B256::random(), sender: Address::random(), nonce: 0, gas_price: 0, gas_limit: 0, - to: TransactionKind::Call(Address::random()), + to: Address::random().into(), value: Default::default(), input: Default::default(), size: Default::default(), } } + /// Returns a new EIP2930 transaction with random address and hash and empty values + pub fn eip2930() -> Self { + MockTransaction::Eip2930 { + chain_id: 1, + hash: B256::random(), + sender: Address::random(), + nonce: 0, + to: Address::random().into(), + gas_limit: 0, + input: Bytes::new(), + value: Default::default(), + gas_price: 0, + access_list: Default::default(), + size: Default::default(), + } + } + /// Returns a new EIP1559 transaction with random address and hash and empty values pub fn eip1559() -> Self { MockTransaction::Eip1559 { + chain_id: 1, hash: B256::random(), sender: Address::random(), nonce: 0, max_fee_per_gas: MIN_PROTOCOL_BASE_FEE as u128, max_priority_fee_per_gas: MIN_PROTOCOL_BASE_FEE as u128, gas_limit: 0, - to: TransactionKind::Call(Address::random()), + to: Address::random().into(), value: Default::default(), input: Bytes::new(), - accesslist: Default::default(), + access_list: Default::default(), size: Default::default(), } } @@ -240,6 +268,7 @@ impl MockTransaction { /// Returns a new EIP4844 transaction with random address and hash and empty values pub fn eip4844() -> Self { MockTransaction::Eip4844 { + chain_id: 1, hash: B256::random(), sender: Address::random(), nonce: 0, @@ -247,10 +276,10 @@ impl MockTransaction { max_priority_fee_per_gas: MIN_PROTOCOL_BASE_FEE as u128, max_fee_per_blob_gas: DATA_GAS_PER_BLOB as u128, gas_limit: 0, - to: TransactionKind::Call(Address::random()), + to: Address::random().into(), value: Default::default(), input: Bytes::new(), - accesslist: Default::default(), + access_list: Default::default(), sidecar: Default::default(), size: Default::default(), } @@ -266,22 +295,6 @@ impl MockTransaction { transaction } - /// Returns a new EIP2930 transaction with random address and hash and empty values - pub fn eip2930() -> Self { - MockTransaction::Eip2930 { - hash: B256::random(), - sender: Address::random(), - nonce: 0, - to: TransactionKind::Call(Address::random()), - gas_limit: 0, - input: Bytes::new(), - value: Default::default(), - gas_price: 0, - accesslist: Default::default(), - size: Default::default(), - } - } - /// Creates a new transaction with the given [TxType]. /// /// See the default constructors for each of the transaction types: @@ -372,9 +385,9 @@ impl MockTransaction { pub fn set_accesslist(&mut self, list: AccessList) -> &mut Self { match self { MockTransaction::Legacy { .. } => {} - MockTransaction::Eip1559 { accesslist, .. } | - MockTransaction::Eip4844 { accesslist, .. } | - MockTransaction::Eip2930 { accesslist, .. } => { + MockTransaction::Eip1559 { access_list: accesslist, .. } | + MockTransaction::Eip4844 { access_list: accesslist, .. } | + MockTransaction::Eip2930 { access_list: accesslist, .. } => { *accesslist = list; } } @@ -611,9 +624,9 @@ impl PoolTransaction for MockTransaction { fn access_list(&self) -> Option<&AccessList> { match self { MockTransaction::Legacy { .. } => None, - MockTransaction::Eip1559 { accesslist, .. } | - MockTransaction::Eip4844 { accesslist, .. } | - MockTransaction::Eip2930 { accesslist, .. } => Some(accesslist), + MockTransaction::Eip1559 { access_list: accesslist, .. } | + MockTransaction::Eip4844 { access_list: accesslist, .. } | + MockTransaction::Eip2930 { access_list: accesslist, .. } => Some(accesslist), } } @@ -671,7 +684,7 @@ impl PoolTransaction for MockTransaction { } /// Returns the transaction kind associated with the transaction. - fn kind(&self) -> &TransactionKind { + fn kind(&self) -> &TxKind { match self { MockTransaction::Legacy { to, .. } | MockTransaction::Eip1559 { to, .. } | @@ -717,7 +730,39 @@ impl PoolTransaction for MockTransaction { /// Returns the chain ID associated with the transaction. fn chain_id(&self) -> Option { - Some(1) + match self { + MockTransaction::Legacy { chain_id, .. } => *chain_id, + MockTransaction::Eip1559 { chain_id, .. } | + MockTransaction::Eip4844 { chain_id, .. } | + MockTransaction::Eip2930 { chain_id, .. } => Some(*chain_id), + } + } +} + +impl EthPoolTransaction for MockTransaction { + fn take_blob(&mut self) -> EthBlobTransactionSidecar { + match self { + Self::Eip4844 { sidecar, .. } => EthBlobTransactionSidecar::Present(sidecar.clone()), + _ => EthBlobTransactionSidecar::None, + } + } + + fn blob_count(&self) -> usize { + match self { + Self::Eip4844 { sidecar, .. } => sidecar.blobs.len(), + _ => 0, + } + } + + fn validate_blob( + &self, + _blob: &BlobTransactionSidecar, + _settings: &revm::primitives::KzgSettings, + ) -> Result<(), reth_primitives::BlobTransactionValidationError> { + match &self { + Self::Eip4844 { .. } => Ok(()), + _ => Err(BlobTransactionValidationError::NotBlobTransaction(self.tx_type())), + } } } @@ -735,7 +780,7 @@ impl TryFromRecoveredTransaction for MockTransaction { #[allow(unreachable_patterns)] match transaction.transaction { Transaction::Legacy(TxLegacy { - chain_id: _, + chain_id, nonce, gas_price, gas_limit, @@ -743,6 +788,7 @@ impl TryFromRecoveredTransaction for MockTransaction { value, input, }) => Ok(MockTransaction::Legacy { + chain_id, hash, sender, nonce, @@ -753,31 +799,30 @@ impl TryFromRecoveredTransaction for MockTransaction { input, size, }), - Transaction::Eip1559(TxEip1559 { - chain_id: _, + Transaction::Eip2930(TxEip2930 { + chain_id, nonce, + gas_price, gas_limit, - max_fee_per_gas, - max_priority_fee_per_gas, to, value, input, access_list, - }) => Ok(MockTransaction::Eip1559 { + }) => Ok(MockTransaction::Eip2930 { + chain_id, hash, sender, nonce, - max_fee_per_gas, - max_priority_fee_per_gas, + gas_price, gas_limit, to, value, input, - accesslist: access_list, + access_list, size, }), - Transaction::Eip4844(TxEip4844 { - chain_id: _, + Transaction::Eip1559(TxEip1559 { + chain_id, nonce, gas_limit, max_fee_per_gas, @@ -786,42 +831,46 @@ impl TryFromRecoveredTransaction for MockTransaction { value, input, access_list, - blob_versioned_hashes: _, - max_fee_per_blob_gas, - }) => Ok(MockTransaction::Eip4844 { + }) => Ok(MockTransaction::Eip1559 { + chain_id, hash, sender, nonce, max_fee_per_gas, max_priority_fee_per_gas, - max_fee_per_blob_gas, gas_limit, to, value, input, - accesslist: access_list, - sidecar: BlobTransactionSidecar::default(), + access_list, size, }), - Transaction::Eip2930(TxEip2930 { - chain_id: _, + Transaction::Eip4844(TxEip4844 { + chain_id, nonce, - gas_price, gas_limit, + max_fee_per_gas, + max_priority_fee_per_gas, to, value, input, access_list, - }) => Ok(MockTransaction::Eip2930 { + blob_versioned_hashes: _, + max_fee_per_blob_gas, + }) => Ok(MockTransaction::Eip4844 { + chain_id, hash, sender, nonce, - gas_price, + max_fee_per_gas, + max_priority_fee_per_gas, + max_fee_per_blob_gas, gas_limit, to, value, input, - accesslist: access_list, + access_list, + sidecar: BlobTransactionSidecar::default(), size, }), _ => unreachable!("Invalid transaction type"), @@ -856,6 +905,7 @@ impl From for Transaction { fn from(mock: MockTransaction) -> Self { match mock { MockTransaction::Legacy { + chain_id, hash: _, sender: _, nonce, @@ -865,16 +915,31 @@ impl From for Transaction { value, input, size: _, - } => Self::Legacy(TxLegacy { - chain_id: Some(1), + } => Self::Legacy(TxLegacy { chain_id, nonce, gas_price, gas_limit, to, value, input }), + MockTransaction::Eip2930 { + chain_id, + hash: _, + sender: _, + nonce, + to, + gas_limit, + input, + value, + gas_price, + access_list, + size: _, + } => Self::Eip2930(TxEip2930 { + chain_id, nonce, gas_price, gas_limit, to, value, + access_list, input, }), MockTransaction::Eip1559 { + chain_id, hash: _, sender: _, nonce, @@ -883,22 +948,23 @@ impl From for Transaction { gas_limit, to, value, - accesslist, + access_list, input, size: _, } => Self::Eip1559(TxEip1559 { - chain_id: 1, + chain_id, nonce, gas_limit, max_fee_per_gas, max_priority_fee_per_gas, to, value, - access_list: accesslist, + access_list, input, }), MockTransaction::Eip4844 { - hash, + chain_id, + hash: _, sender: _, nonce, max_fee_per_gas, @@ -907,44 +973,23 @@ impl From for Transaction { gas_limit, to, value, - accesslist, + access_list, input, - sidecar: _, + sidecar, size: _, } => Self::Eip4844(TxEip4844 { - chain_id: 1, + chain_id, nonce, gas_limit, max_fee_per_gas, max_priority_fee_per_gas, to, value, - access_list: accesslist, - blob_versioned_hashes: vec![hash], + access_list, + blob_versioned_hashes: sidecar.versioned_hashes().collect(), max_fee_per_blob_gas, input, }), - MockTransaction::Eip2930 { - hash: _, - sender: _, - nonce, - to, - gas_limit, - input, - value, - gas_price, - accesslist, - size: _, - } => Self::Eip2930(TxEip2930 { - chain_id: 1, - nonce, - gas_price, - gas_limit, - to, - value, - access_list: accesslist, - input, - }), } } } @@ -958,23 +1003,37 @@ impl proptest::arbitrary::Arbitrary for MockTransaction { any::<(Transaction, Address, B256)>() .prop_map(|(tx, sender, tx_hash)| match &tx { Transaction::Legacy(TxLegacy { + chain_id, nonce, gas_price, gas_limit, to, value, input, - .. - }) | + }) => MockTransaction::Legacy { + chain_id: *chain_id, + sender, + hash: tx_hash, + nonce: *nonce, + gas_price: *gas_price, + gas_limit: *gas_limit, + to: *to, + value: *value, + input: input.clone(), + size: tx.size(), + }, + Transaction::Eip2930(TxEip2930 { + chain_id, nonce, gas_price, gas_limit, to, value, + access_list, input, - .. - }) => MockTransaction::Legacy { + }) => MockTransaction::Eip2930 { + chain_id: *chain_id, sender, hash: tx_hash, nonce: *nonce, @@ -982,10 +1041,12 @@ impl proptest::arbitrary::Arbitrary for MockTransaction { gas_limit: *gas_limit, to: *to, value: *value, - input: (*input).clone(), + input: input.clone(), + access_list: access_list.clone(), size: tx.size(), }, Transaction::Eip1559(TxEip1559 { + chain_id, nonce, gas_limit, max_fee_per_gas, @@ -994,8 +1055,8 @@ impl proptest::arbitrary::Arbitrary for MockTransaction { value, input, access_list, - .. }) => MockTransaction::Eip1559 { + chain_id: *chain_id, sender, hash: tx_hash, nonce: *nonce, @@ -1004,11 +1065,12 @@ impl proptest::arbitrary::Arbitrary for MockTransaction { gas_limit: *gas_limit, to: *to, value: *value, - input: (*input).clone(), - accesslist: (*access_list).clone(), + input: input.clone(), + access_list: access_list.clone(), size: tx.size(), }, Transaction::Eip4844(TxEip4844 { + chain_id, nonce, gas_limit, max_fee_per_gas, @@ -1018,8 +1080,9 @@ impl proptest::arbitrary::Arbitrary for MockTransaction { input, max_fee_per_blob_gas, access_list, - .. + blob_versioned_hashes: _, }) => MockTransaction::Eip4844 { + chain_id: *chain_id, sender, hash: tx_hash, nonce: *nonce, @@ -1029,8 +1092,8 @@ impl proptest::arbitrary::Arbitrary for MockTransaction { gas_limit: *gas_limit, to: *to, value: *value, - input: (*input).clone(), - accesslist: (*access_list).clone(), + input: input.clone(), + access_list: access_list.clone(), // only generate a sidecar if it is a 4844 tx - also for the sake of // performance just use a default sidecar sidecar: BlobTransactionSidecar::default(), diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index 459c0bf10..ca91b00da 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -9,13 +9,13 @@ use crate::{ }; use futures_util::{ready, Stream}; use reth_eth_wire::HandleMempoolData; +use reth_network_types::PeerId; use reth_primitives::{ kzg::KzgSettings, transaction::TryFromRecoveredTransactionError, AccessList, Address, BlobTransactionSidecar, BlobTransactionValidationError, FromRecoveredPooledTransaction, - IntoRecoveredTransaction, PeerId, PooledTransactionsElement, - PooledTransactionsElementEcRecovered, SealedBlock, Transaction, TransactionKind, - TransactionSignedEcRecovered, TryFromRecoveredTransaction, TxEip4844, TxHash, B256, - EIP1559_TX_TYPE_ID, EIP4844_TX_TYPE_ID, U256, + IntoRecoveredTransaction, PooledTransactionsElement, PooledTransactionsElementEcRecovered, + SealedBlock, Transaction, TransactionSignedEcRecovered, TryFromRecoveredTransaction, TxHash, + TxKind, B256, EIP1559_TX_TYPE_ID, EIP4844_TX_TYPE_ID, U256, }; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; @@ -811,14 +811,14 @@ pub trait PoolTransaction: /// otherwise returns the gas price. fn priority_fee_or_price(&self) -> u128; - /// Returns the transaction's [`TransactionKind`], which is the address of the recipient or - /// [`TransactionKind::Create`] if the transaction is a contract creation. - fn kind(&self) -> &TransactionKind; + /// Returns the transaction's [`TxKind`], which is the address of the recipient or + /// [`TxKind::Create`] if the transaction is a contract creation. + fn kind(&self) -> &TxKind; - /// Returns the recipient of the transaction if it is not a [TransactionKind::Create] + /// Returns the recipient of the transaction if it is not a [TxKind::Create] /// transaction. fn to(&self) -> Option
{ - (*self.kind()).to() + (*self.kind()).to().copied() } /// Returns the input data of this transaction. @@ -856,12 +856,7 @@ pub trait EthPoolTransaction: PoolTransaction { fn take_blob(&mut self) -> EthBlobTransactionSidecar; /// Returns the number of blobs this transaction has. - fn blob_count(&self) -> usize { - self.as_eip4844().map(|tx| tx.blob_versioned_hashes.len()).unwrap_or_default() - } - - /// Returns the transaction as EIP-4844 transaction if it is one. - fn as_eip4844(&self) -> Option<&TxEip4844>; + fn blob_count(&self) -> usize; /// Validates the blob sidecar of the transaction with the given settings. fn validate_blob( @@ -908,6 +903,16 @@ pub enum EthBlobTransactionSidecar { Present(BlobTransactionSidecar), } +impl EthBlobTransactionSidecar { + /// Returns the blob sidecar if it is present + pub const fn maybe_sidecar(&self) -> Option<&BlobTransactionSidecar> { + match self { + EthBlobTransactionSidecar::Present(sidecar) => Some(sidecar), + _ => None, + } + } +} + impl EthPooledTransaction { /// Create new instance of [Self]. /// @@ -1056,9 +1061,9 @@ impl PoolTransaction for EthPooledTransaction { self.transaction.priority_fee_or_price() } - /// Returns the transaction's [`TransactionKind`], which is the address of the recipient or - /// [`TransactionKind::Create`] if the transaction is a contract creation. - fn kind(&self) -> &TransactionKind { + /// Returns the transaction's [`TxKind`], which is the address of the recipient or + /// [`TxKind::Create`] if the transaction is a contract creation. + fn kind(&self) -> &TxKind { self.transaction.kind() } @@ -1096,8 +1101,11 @@ impl EthPoolTransaction for EthPooledTransaction { } } - fn as_eip4844(&self) -> Option<&TxEip4844> { - self.transaction.as_eip4844() + fn blob_count(&self) -> usize { + match &self.transaction.transaction { + Transaction::Eip4844(tx) => tx.blob_versioned_hashes.len(), + _ => 0, + } } fn validate_blob( @@ -1125,13 +1133,13 @@ impl TryFromRecoveredTransaction for EthPooledTransaction { } EIP4844_TX_TYPE_ID => { // doesn't have a blob sidecar - return Err(TryFromRecoveredTransactionError::BlobSidecarMissing); + return Err(TryFromRecoveredTransactionError::BlobSidecarMissing) } unsupported => { // unsupported transaction type return Err(TryFromRecoveredTransactionError::UnsupportedTransactionType( unsupported, - )); + )) } }; diff --git a/crates/transaction-pool/src/validate/eth.rs b/crates/transaction-pool/src/validate/eth.rs index a07e6fc97..b31a3af48 100644 --- a/crates/transaction-pool/src/validate/eth.rs +++ b/crates/transaction-pool/src/validate/eth.rs @@ -361,25 +361,17 @@ where } } EthBlobTransactionSidecar::Present(blob) => { - if let Some(eip4844) = transaction.as_eip4844() { - // validate the blob - if let Err(err) = eip4844.validate_blob(&blob, &self.kzg_settings) { - return TransactionValidationOutcome::Invalid( - transaction, - InvalidPoolTransactionError::Eip4844( - Eip4844PoolTransactionError::InvalidEip4844Blob(err), - ), - ) - } - // store the extracted blob - maybe_blob_sidecar = Some(blob); - } else { - // this should not happen + // validate the blob + if let Err(err) = transaction.validate_blob(&blob, &self.kzg_settings) { return TransactionValidationOutcome::Invalid( transaction, - InvalidTransactionError::TxTypeNotSupported.into(), + InvalidPoolTransactionError::Eip4844( + Eip4844PoolTransactionError::InvalidEip4844Blob(err), + ), ) } + // store the extracted blob + maybe_blob_sidecar = Some(blob); } } } diff --git a/deny.toml b/deny.toml index 347b60965..38994d197 100644 --- a/deny.toml +++ b/deny.toml @@ -58,6 +58,7 @@ exceptions = [ { allow = ["CC0-1.0"], name = "secp256k1-sys" }, { allow = ["CC0-1.0"], name = "tiny-keccak" }, { allow = ["CC0-1.0"], name = "more-asserts" }, + { allow = ["CC0-1.0"], name = "to_method" }, { allow = ["CC0-1.0"], name = "aurora-engine-modexp" }, # TODO: decide on MPL-2.0 handling # These dependencies are grandfathered in in https://github.com/paradigmxyz/reth/pull/6980 @@ -89,6 +90,7 @@ unknown-git = "deny" allow-git = [ # TODO: remove, see ./Cargo.toml "https://github.com/alloy-rs/alloy", + "https://github.com/foundry-rs/block-explorers", "https://github.com/paradigmxyz/evm-inspectors", "https://github.com/sigp/discv5", ] diff --git a/etc/grafana/dashboards/overview.json b/etc/grafana/dashboards/overview.json index e9b322f1b..eacc3a25c 100644 --- a/etc/grafana/dashboards/overview.json +++ b/etc/grafana/dashboards/overview.json @@ -7942,6 +7942,491 @@ ], "title": "RPC Throughput", "type": "timeseries" + }, + { + "collapsed": false, + "gridPos": { + "h": 9, + "w": 24, + "x": 0, + "y": 273 + }, + "id": 214, + "panels": [], + "title": "Execution Extensions", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "The total number of canonical state notifications sent to an ExEx.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unitScale": true + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 282 + }, + "id": 215, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "builder", + "expr": "reth_exex_notifications_sent_total{instance=~\"$instance\"}", + "hide": false, + "legendFormat": "Total Notifications Sent", + "range": true, + "refId": "B" + } + ], + "title": "Total Notifications Sent", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "The total number of events an ExEx has sent to the manager.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unitScale": true + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 282 + }, + "id": 216, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "builder", + "expr": "reth_exex_events_sent_total{instance=~\"$instance\"}", + "hide": false, + "legendFormat": "Total Events Sent", + "range": true, + "refId": "B" + } + ], + "title": "Total Events Sent", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "Current and Max capacity of the internal state notifications buffer.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unitScale": true + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 290 + }, + "id": 218, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "builder", + "expr": "reth_exex_manager_current_capacity{instance=~\"$instance\"}", + "hide": false, + "legendFormat": "Current size", + "range": true, + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "builder", + "expr": "max_over_time(reth_exex_manager_max_capacity{instance=~\"$instance\"}[1h])", + "hide": false, + "legendFormat": "Max size", + "range": true, + "refId": "C" + } + ], + "title": "Current and Max Capacity", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "Current size of the internal state notifications buffer.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unitScale": true + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 290 + }, + "id": 219, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "builder", + "expr": "reth_exex_manager_buffer_size{instance=~\"$instance\"}", + "hide": false, + "legendFormat": "Max size", + "range": true, + "refId": "B" + } + ], + "title": "Buffer Size", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "Number of ExExs on the node", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "align": "auto", + "filterable": false + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 298 + }, + "id": 220, + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "textMode": "auto" + }, + "pluginVersion": "8.0.6", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "builder", + "expr": "reth_exex_manager_num_exexs{instance=~\"$instance\"}", + "hide": false, + "legendFormat": "Number of ExExs", + "range": true, + "refId": "A" + } + ], + "title": "Number of ExExs", + "type": "stat" } ], "refresh": "30s", @@ -8026,7 +8511,7 @@ }, "timepicker": {}, "timezone": "", - "title": "reth", + "title": "Reth", "uid": "2k8BXz24x", "version": 1, "weekStart": "" diff --git a/etc/grafana/dashboards/reth-discovery.json b/etc/grafana/dashboards/reth-discovery.json index 53d71cd76..4a1ef344c 100644 --- a/etc/grafana/dashboards/reth-discovery.json +++ b/etc/grafana/dashboards/reth-discovery.json @@ -1,976 +1,1194 @@ { - "__inputs": [ + "__inputs": [ + { + "name": "DS_PROMETHEUS", + "label": "Prometheus", + "description": "", + "type": "datasource", + "pluginId": "prometheus", + "pluginName": "Prometheus" + } + ], + "__elements": {}, + "__requires": [ + { + "type": "grafana", + "id": "grafana", + "name": "Grafana", + "version": "10.3.3" + }, + { + "type": "datasource", + "id": "prometheus", + "name": "Prometheus", + "version": "1.0.0" + }, + { + "type": "panel", + "id": "stat", + "name": "Stat", + "version": "" + }, + { + "type": "panel", + "id": "timeseries", + "name": "Time series", + "version": "" + } + ], + "annotations": { + "list": [ { - "name": "DS_PROMETHEUS", - "label": "Prometheus", - "description": "", - "type": "datasource", - "pluginId": "prometheus", - "pluginName": "Prometheus" + "builtIn": 1, + "datasource": { + "type": "grafana", + "uid": "-- Grafana --" + }, + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" } - ], - "__elements": {}, - "__requires": [ - { - "type": "grafana", - "id": "grafana", - "name": "Grafana", - "version": "10.3.3" + ] + }, + "description": "Devp2p peer discovery protocols", + "editable": true, + "fiscalYearStartMonth": 0, + "graphTooltip": 0, + "id": null, + "links": [], + "liveNow": false, + "panels": [ + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 }, - { - "type": "datasource", - "id": "prometheus", - "name": "Prometheus", - "version": "1.0.0" + "id": 96, + "panels": [], + "repeat": "instance", + "repeatDirection": "h", + "title": "Overview", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, - { - "type": "panel", - "id": "stat", - "name": "Stat", - "version": "" + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "light-purple", + "value": null + } + ] + }, + "unitScale": true + }, + "overrides": [] }, - { - "type": "panel", - "id": "timeseries", - "name": "Time series", - "version": "" - } - ], - "annotations": { - "list": [ + "gridPos": { + "h": 3, + "w": 3, + "x": 0, + "y": 1 + }, + "id": 22, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "text": { + "valueSize": 20 + }, + "textMode": "name", + "wideLayout": true + }, + "pluginVersion": "10.3.3", + "targets": [ { - "builtIn": 1, "datasource": { - "type": "grafana", - "uid": "-- Grafana --" - }, - "enable": true, - "hide": true, - "iconColor": "rgba(0, 211, 255, 1)", - "name": "Annotations & Alerts", - "type": "dashboard" + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "builder", + "exemplar": false, + "expr": "reth_info{instance=~\"$instance\"}", + "instant": true, + "legendFormat": "{{version}}", + "range": false, + "refId": "A" } - ] + ], + "title": "Version", + "transparent": true, + "type": "stat" }, - "editable": true, - "fiscalYearStartMonth": 0, - "graphTooltip": 0, - "id": null, - "links": [], - "liveNow": false, - "panels": [ - { - "collapsed": false, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 0 - }, - "id": 96, - "panels": [], - "repeat": "instance", - "repeatDirection": "h", - "title": "Overview", - "type": "row" + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "light-purple", + "value": null + } + ] + }, + "unitScale": true }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - } - ] - }, - "unitScale": true - }, - "overrides": [] - }, - "gridPos": { - "h": 3, - "w": 3, - "x": 0, - "y": 1 - }, - "id": 22, - "options": { - "colorMode": "value", - "graphMode": "area", - "justifyMode": "auto", - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": false - }, - "showPercentChange": false, - "text": { - "valueSize": 20 - }, - "textMode": "name", - "wideLayout": true - }, - "pluginVersion": "10.3.3", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "builder", - "exemplar": false, - "expr": "reth_info{instance=~\"$instance\"}", - "instant": true, - "legendFormat": "{{version}}", - "range": false, - "refId": "A" - } - ], - "title": "Version", - "transparent": true, - "type": "stat" + "overrides": [] }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "gridPos": { + "h": 3, + "w": 6, + "x": 3, + "y": 1 + }, + "id": 192, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - } - ] - }, - "unitScale": true - }, - "overrides": [] - }, - "gridPos": { - "h": 3, - "w": 6, - "x": 3, - "y": 1 - }, - "id": 192, - "options": { - "colorMode": "value", - "graphMode": "area", - "justifyMode": "auto", - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": false - }, - "showPercentChange": false, - "text": { - "valueSize": 20 - }, - "textMode": "name", - "wideLayout": true - }, - "pluginVersion": "10.3.3", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "builder", - "exemplar": false, - "expr": "reth_info{instance=~\"$instance\"}", - "instant": true, - "legendFormat": "{{build_timestamp}}", - "range": false, - "refId": "A" - } - ], - "title": "Build Timestamp", - "transparent": true, - "type": "stat" + "showPercentChange": false, + "text": { + "valueSize": 20 + }, + "textMode": "name", + "wideLayout": true }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "pluginVersion": "10.3.3", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "builder", + "exemplar": false, + "expr": "reth_info{instance=~\"$instance\"}", + "instant": true, + "legendFormat": "{{build_timestamp}}", + "range": false, + "refId": "A" + } + ], + "title": "Build Timestamp", + "transparent": true, + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "light-purple", + "value": null + } + ] + }, + "unitScale": true }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - } - ] - }, - "unitScale": true - }, - "overrides": [] - }, - "gridPos": { - "h": 3, - "w": 3, - "x": 9, - "y": 1 - }, - "id": 193, - "options": { - "colorMode": "value", - "graphMode": "area", - "justifyMode": "auto", - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": false - }, - "showPercentChange": false, - "text": { - "valueSize": 20 - }, - "textMode": "name", - "wideLayout": true - }, - "pluginVersion": "10.3.3", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "builder", - "exemplar": false, - "expr": "reth_info{instance=~\"$instance\"}", - "instant": true, - "legendFormat": "{{git_sha}}", - "range": false, - "refId": "A" - } - ], - "title": "Git SHA", - "transparent": true, - "type": "stat" + "overrides": [] }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "gridPos": { + "h": 3, + "w": 3, + "x": 9, + "y": 1 + }, + "id": 193, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - } - ] - }, - "unitScale": true - }, - "overrides": [] - }, - "gridPos": { - "h": 3, - "w": 2, - "x": 12, - "y": 1 - }, - "id": 195, - "options": { - "colorMode": "value", - "graphMode": "area", - "justifyMode": "auto", - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": false - }, - "showPercentChange": false, - "text": { - "valueSize": 20 - }, - "textMode": "name", - "wideLayout": true - }, - "pluginVersion": "10.3.3", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "builder", - "exemplar": false, - "expr": "reth_info{instance=~\"$instance\"}", - "instant": true, - "legendFormat": "{{build_profile}}", - "range": false, - "refId": "A" - } - ], - "title": "Build Profile", - "transparent": true, - "type": "stat" + "showPercentChange": false, + "text": { + "valueSize": 20 + }, + "textMode": "name", + "wideLayout": true }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "pluginVersion": "10.3.3", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "builder", + "exemplar": false, + "expr": "reth_info{instance=~\"$instance\"}", + "instant": true, + "legendFormat": "{{git_sha}}", + "range": false, + "refId": "A" + } + ], + "title": "Git SHA", + "transparent": true, + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "light-purple", + "value": null + } + ] + }, + "unitScale": true }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - } - ] - }, - "unitScale": true - }, - "overrides": [] - }, - "gridPos": { - "h": 3, - "w": 5, - "x": 14, - "y": 1 - }, - "id": 196, - "options": { - "colorMode": "value", - "graphMode": "area", - "justifyMode": "auto", - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": false - }, - "showPercentChange": false, - "text": { - "valueSize": 20 - }, - "textMode": "name", - "wideLayout": true - }, - "pluginVersion": "10.3.3", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "builder", - "exemplar": false, - "expr": "reth_info{instance=~\"$instance\"}", - "instant": true, - "legendFormat": "{{target_triple}}", - "range": false, - "refId": "A" - } - ], - "title": "Target Triple", - "transparent": true, - "type": "stat" + "overrides": [] }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "gridPos": { + "h": 3, + "w": 2, + "x": 12, + "y": 1 + }, + "id": 195, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - } - ] - }, - "unitScale": true - }, - "overrides": [] - }, - "gridPos": { - "h": 3, - "w": 5, - "x": 19, - "y": 1 - }, - "id": 197, - "options": { - "colorMode": "value", - "graphMode": "area", - "justifyMode": "auto", - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": false - }, - "showPercentChange": false, - "text": { - "valueSize": 20 - }, - "textMode": "name", - "wideLayout": true - }, - "pluginVersion": "10.3.3", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "builder", - "exemplar": false, - "expr": "reth_info{instance=~\"$instance\"}", - "instant": true, - "legendFormat": "{{cargo_features}}", - "range": false, - "refId": "A" - } - ], - "title": "Cargo Features", - "transparent": true, - "type": "stat" + "showPercentChange": false, + "text": { + "valueSize": 20 + }, + "textMode": "name", + "wideLayout": true }, - { - "collapsed": false, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 4 - }, - "id": 89, - "panels": [], - "repeat": "instance", - "repeatDirection": "h", - "title": "Discv5", - "type": "row" + "pluginVersion": "10.3.3", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "builder", + "exemplar": false, + "expr": "reth_info{instance=~\"$instance\"}", + "instant": true, + "legendFormat": "{{build_profile}}", + "range": false, + "refId": "A" + } + ], + "title": "Build Profile", + "transparent": true, + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "light-purple", + "value": null + } + ] + }, + "unitScale": true }, - "description": "Peers managed by underlying sigp/discv5 node. \n\nOnly peers in the kbuckets are queried in FINDNODE lookups, and included in NODES responses to other peers.\n\nNot all peers with an established session will make it into the kbuckets, due to e.g. reachability issues (NAT) and capacity of kbuckets furthest log2distance away from local node (XOR metrics).", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 5, + "x": 14, + "y": 1 + }, + "id": 196, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "text": { + "valueSize": 20 + }, + "textMode": "name", + "wideLayout": true + }, + "pluginVersion": "10.3.3", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "builder", + "exemplar": false, + "expr": "reth_info{instance=~\"$instance\"}", + "instant": true, + "legendFormat": "{{target_triple}}", + "range": false, + "refId": "A" + } + ], + "title": "Target Triple", + "transparent": true, + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "light-purple", + "value": null } + ] + }, + "unitScale": true + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 5, + "x": 19, + "y": 1 + }, + "id": 197, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "text": { + "valueSize": 20 + }, + "textMode": "name", + "wideLayout": true + }, + "pluginVersion": "10.3.3", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "builder", + "exemplar": false, + "expr": "reth_info{instance=~\"$instance\"}", + "instant": true, + "legendFormat": "{{cargo_features}}", + "range": false, + "refId": "A" + } + ], + "title": "Cargo Features", + "transparent": true, + "type": "stat" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 4 + }, + "id": 89, + "panels": [], + "repeat": "instance", + "repeatDirection": "h", + "title": "Discv5", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "Peers managed by underlying sigp/discv5 node. \n\nOnly peers in the kbuckets are queried in FINDNODE lookups, and included in NODES responses to other peers.\n\nNot all peers with an established session will make it into the kbuckets, due to e.g. reachability issues (NAT) and capacity of kbuckets furthest log2distance away from local node (XOR metrics).", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" }, - "unitScale": true - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 5 - }, - "id": 198, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "single", - "sort": "none" - } + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unitScale": true }, - "targets": [ + "overrides": [ { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "matcher": { + "id": "byName", + "options": "Total peers kbuckets" }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_discv5_total_kbucket_peers_raw{instance=\"$instance\"}", - "fullMetaSearch": false, - "includeNullMetadata": true, - "instant": false, - "legendFormat": "Total peers kbuckets", - "range": true, - "refId": "A", - "useBackend": false + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "#9b73d6", + "mode": "fixed" + } + } + ] }, { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "matcher": { + "id": "byName", + "options": "Total connected sessions" }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_discv5_total_sessions_raw{instance=\"$instance\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "instant": false, - "legendFormat": "Total connected sessions", - "range": true, - "refId": "B", - "useBackend": false + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "super-light-red", + "mode": "fixed" + } + } + ] } - ], - "title": "Peers", - "type": "timeseries" + ] }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 5 + }, + "id": 198, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_discv5_kbucket_peers_raw_total{instance=\"$instance\"}", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Total peers kbuckets", + "range": true, + "refId": "A", + "useBackend": false }, - "description": "Frequency of session establishment and kbuckets insertions.\n\nSince discv5 favours long-lived connections, kbuckets insertions are expected to be less frequent the longer the node stays online.\n\nSome incoming connections may be from peers with unreachable ENRs, ENRs that don't advertise a UDP socket. These peers are not useful for the discv5 node, nor for RLPx.\n\nDiscovered peers are filtered w.r.t. what they advertise in their ENR. By default peers advertising 'eth2' are filtered out. Unreachable ENRs are also filtered out. Only peers that pass the filter are useful. These peers get passed up the node, to attempt an RLPx connection.\n\n", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_discv5_sessions_raw_total{instance=\"$instance\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Total connected sessions", + "range": true, + "refId": "B", + "useBackend": false + } + ], + "title": "Peers", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "Frequency of session establishment and kbuckets insertions.\n\nSince discv5 favours long-lived connections, kbuckets insertions are expected to be less frequent the longer the node stays online.\n\nSome incoming connections may be from peers with unreachable ENRs, ENRs that don't advertise a UDP socket. These peers are not useful for the discv5 node, nor for RLPx.\n\nDiscovered peers are filtered w.r.t. what they advertise in their ENR. By default peers advertising 'eth2' are filtered out. Unreachable ENRs are also filtered out. Only peers that pass the filter are useful. These peers get passed up the node, to attempt an RLPx connection.\n\nSessions will succeed to peers that advertise no UDP socket in their ENR. This allows peers to discover their reachable socket. On the other hand, for DoS protection, peers that advertise a different socket than the socket from which they make the connection, are denied a sigp/discv5 session. These peers have an unverifiable ENR. The peers are passed to RLPx nonetheless (some EL implementations of discv5 are more lax about ENR and source socket matching). ", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null }, - "thresholdsStyle": { - "mode": "off" + { + "color": "red", + "value": 80 } + ] + }, + "unit": "cps", + "unitScale": true + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Total Session Establishments" }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-green", + "mode": "fixed" } - ] + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Total KBucket Insertions" }, - "unit": "cps", - "unitScale": true - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 5 - }, - "id": 199, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "single", - "sort": "none" - } - }, - "targets": [ + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "#9958f4", + "mode": "fixed" + } + } + ] + }, { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "matcher": { + "id": "byName", + "options": "Session Establishments (pass filter)" }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "rate(reth_discv5_total_inserted_kbucket_peers_raw{instance=\"$instance\"}[$__rate_interval])", - "fullMetaSearch": false, - "includeNullMetadata": false, - "instant": false, - "legendFormat": "Total KBucket Insertions", - "range": true, - "refId": "A", - "useBackend": false + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "#ff0ada", + "mode": "fixed" + } + } + ] }, { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "matcher": { + "id": "byName", + "options": "Session Establishments (unreachable ENR)" }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "rate(reth_discv5_total_established_sessions_raw{instance=\"$instance\"}[$__rate_interval])", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": false, - "instant": false, - "legendFormat": "Total Session Establishments", - "range": true, - "refId": "B", - "useBackend": false + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-orange", + "mode": "fixed" + } + } + ] }, { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "matcher": { + "id": "byName", + "options": "Session Establishment Failed (unverifiable ENR)" }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "rate(reth_discv5_total_established_sessions_unreachable_enr{instance=\"$instance\"}[$__rate_interval])", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": false, - "instant": false, - "legendFormat": "Session Establishments (unreachable ENR)", - "range": true, - "refId": "C", - "useBackend": false + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "#8ab8ff", + "mode": "fixed" + } + } + ] }, { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "matcher": { + "id": "byName", + "options": "Failed Session Establishments (unverifiable ENR)" }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "rate(reth_discv5_total_established_sessions_raw{instance=\"$instance\"}[$__rate_interval]) - rate(reth_discv5_total_established_sessions_custom_filtered{instance=\"$instance\"}[$__rate_interval])", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": false, - "instant": false, - "legendFormat": "Session Establishments (pass filter)", - "range": true, - "refId": "D", - "useBackend": false + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "#8ab8ff", + "mode": "fixed" + } + } + ] } - ], - "title": "Peer Churn", - "type": "timeseries" + ] }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 5 + }, + "id": 199, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "rate(reth_discv5_inserted_kbucket_peers_raw_total{instance=\"$instance\"}[$__rate_interval])", + "fullMetaSearch": false, + "includeNullMetadata": false, + "instant": false, + "legendFormat": "Total KBucket Insertions", + "range": true, + "refId": "A", + "useBackend": false }, - "description": "Frequency of discovering peers from some popular networks.\n\nSome nodes miss advertising a fork ID kv-pair in their ENR. They will be counted as 'unknown', but may belong to a popular network.", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "rate(reth_discv5_established_sessions_raw_total{instance=\"$instance\"}[$__rate_interval])", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": false, + "instant": false, + "legendFormat": "Total Session Establishments", + "range": true, + "refId": "B", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "rate(reth_discv5_established_sessions_unreachable_enr_total{instance=\"$instance\"}[$__rate_interval])", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": false, + "instant": false, + "legendFormat": "Session Establishments (unreachable ENR)", + "range": true, + "refId": "C", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "rate(reth_discv5_established_sessions_raw_total{instance=\"$instance\"}[$__rate_interval]) - rate(reth_discv5_established_sessions_custom_filtered_total{instance=\"$instance\"}[$__rate_interval])", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": false, + "instant": false, + "legendFormat": "Session Establishments (pass filter)", + "range": true, + "refId": "D", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "rate(reth_discv5_unverifiable_enrs_raw_total{instance=\"$instance\"}[$__rate_interval])", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Failed Session Establishments (unverifiable ENR)", + "range": true, + "refId": "E", + "useBackend": false + } + ], + "title": "Peer Churn", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "Frequency of discovering peers from some popular network stacks.\n\nSome nodes miss advertising a fork ID kv-pair in their ENR. They will be counted as 'unknown', but may belong to a popular network.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" }, - "unit": "cps", - "unitScale": true - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 13 - }, - "id": 200, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "single", - "sort": "none" - } + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "cps", + "unitScale": true }, - "targets": [ + "overrides": [ { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "matcher": { + "id": "byName", + "options": "Eth" }, - "disableTextWrap": false, - "editorMode": "code", - "expr": "rate(reth_discv5_eth{instance=\"$instance\"}[$__rate_interval])", - "fullMetaSearch": false, - "includeNullMetadata": false, - "instant": false, - "legendFormat": "Eth", - "range": true, - "refId": "A", - "useBackend": false + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "#b677d9", + "mode": "fixed" + } + } + ] }, { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "matcher": { + "id": "byName", + "options": "Eth2" }, - "disableTextWrap": false, - "editorMode": "code", - "expr": "rate(reth_discv5_eth2{instance=\"$instance\"}[$__rate_interval])", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": false, - "instant": false, - "legendFormat": "Eth2", - "range": true, - "refId": "B", - "useBackend": false + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-green", + "mode": "fixed" + } + } + ] }, { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "matcher": { + "id": "byName", + "options": "Unknown" }, - "disableTextWrap": false, - "editorMode": "code", - "expr": "rate(reth_discv5_opstack{instance=\"$instance\"}[$__rate_interval])", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": false, - "instant": false, - "legendFormat": "OP", - "range": true, - "refId": "C", - "useBackend": false + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "#ff0ae5", + "mode": "fixed" + } + } + ] }, { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "matcher": { + "id": "byName", + "options": "OP EL" }, - "disableTextWrap": false, - "editorMode": "code", - "expr": "rate(reth_discv5_total_established_sessions_raw{instance=\"$instance\"}[$__rate_interval]) - (rate(reth_discv5_eth{instance=\"$instance\"}[$__rate_interval]) + rate(reth_discv5_eth2{instance=\"$instance\"}[$__rate_interval]) + rate(reth_discv5_opstack{instance=\"$instance\"}[$__rate_interval]))", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": false, - "instant": false, - "legendFormat": "Unknown", - "range": true, - "refId": "D", - "useBackend": false + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "#8AB8FF", + "mode": "fixed" + } + } + ] } - ], - "title": "Advertised Networks", - "type": "timeseries" - } - ], - "refresh": "30s", - "schemaVersion": 39, - "tags": [], - "templating": { - "list": [ + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 13 + }, + "id": 200, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "code", + "expr": "rate(reth_discv5_eth{instance=\"$instance\"}[$__rate_interval])", + "fullMetaSearch": false, + "includeNullMetadata": false, + "instant": false, + "legendFormat": "Eth", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "code", + "expr": "rate(reth_discv5_eth2{instance=\"$instance\"}[$__rate_interval])", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": false, + "instant": false, + "legendFormat": "Eth2", + "range": true, + "refId": "B", + "useBackend": false + }, { - "current": {}, "datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "definition": "query_result(reth_info)", - "hide": 0, - "includeAll": false, - "multi": false, - "name": "instance", - "options": [], - "query": { - "query": "query_result(reth_info)", - "refId": "PrometheusVariableQueryEditor-VariableQuery" - }, - "refresh": 1, - "regex": "/.*instance=\\\"([^\\\"]*).*/", - "skipUrlSync": false, - "sort": 0, - "type": "query" + "disableTextWrap": false, + "editorMode": "code", + "expr": "rate(reth_discv5_opel{instance=\"$instance\"}[$__rate_interval])", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": false, + "instant": false, + "legendFormat": "OP EL", + "range": true, + "refId": "E", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "code", + "expr": "rate(reth_discv5_opstack{instance=\"$instance\"}[$__rate_interval])", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": false, + "instant": false, + "legendFormat": "OP CL", + "range": true, + "refId": "C", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "code", + "expr": "(rate(reth_discv5_established_sessions_raw_total{instance=\"$instance\"}[$__rate_interval]) + rate(reth_discv5_unverifiable_enrs_raw_total{instance=\"$instance\"}[$__rate_interval])) - (rate(reth_discv5_eth{instance=\"$instance\"}[$__rate_interval]) + rate(reth_discv5_eth2{instance=\"$instance\"}[$__rate_interval]) + rate(reth_discv5_opstack{instance=\"$instance\"}[$__rate_interval]) + rate(reth_discv5_opel{instance=\"$instance\"}[$__rate_interval]))", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": false, + "instant": false, + "legendFormat": "Unknown", + "range": true, + "refId": "D", + "useBackend": false } - ] - }, - "time": { - "from": "now-1h", - "to": "now" - }, - "timepicker": {}, - "timezone": "", - "title": "reth - discovery", - "uid": "de6e87b2-7630-40b2-b2c4-a500476e799d", - "version": 11, - "weekStart": "" - } \ No newline at end of file + ], + "title": "Advertised Network Stacks", + "type": "timeseries" + } + ], + "refresh": "30s", + "schemaVersion": 39, + "tags": [], + "templating": { + "list": [ + { + "current": {}, + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "definition": "query_result(reth_info)", + "hide": 0, + "includeAll": false, + "multi": false, + "name": "instance", + "options": [], + "query": { + "query": "query_result(reth_info)", + "refId": "PrometheusVariableQueryEditor-VariableQuery" + }, + "refresh": 1, + "regex": "/.*instance=\\\"([^\\\"]*).*/", + "skipUrlSync": false, + "sort": 0, + "type": "query" + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": {}, + "timezone": "", + "title": "Reth - Peer Discovery", + "uid": "fd2d69b5-ca32-45d0-946e-c00ddcd7052c", + "version": 1, + "weekStart": "" +} \ No newline at end of file diff --git a/etc/grafana/dashboards/reth-mempool.json b/etc/grafana/dashboards/reth-mempool.json index 07212ac3b..3ba499a9a 100644 --- a/etc/grafana/dashboards/reth-mempool.json +++ b/etc/grafana/dashboards/reth-mempool.json @@ -573,7 +573,7 @@ } ] }, - "unit": "decbytes", + "unit": "bytes", "unitScale": true }, "overrides": [] @@ -726,7 +726,7 @@ } ] }, - "unit": "decbytes", + "unit": "bytes", "unitScale": true }, "overrides": [] @@ -858,7 +858,7 @@ } ] }, - "unit": "decbytes", + "unit": "bytes", "unitScale": true }, "overrides": [] @@ -1638,8 +1638,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -1773,8 +1772,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -1904,8 +1902,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -2024,8 +2021,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -2144,8 +2140,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -2367,8 +2362,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -2487,8 +2481,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -2624,8 +2617,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -2793,8 +2785,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -2889,8 +2880,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -3032,8 +3022,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -3340,8 +3329,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -3425,8 +3413,8 @@ }, "timepicker": {}, "timezone": "", - "title": "reth - mempool", + "title": "Reth - Transaction Pool", "uid": "bee34f59-c79c-4669-a000-198057b3703d", - "version": 1, + "version": 3, "weekStart": "" } \ No newline at end of file diff --git a/etc/grafana/dashboards/reth-state-growth.json b/etc/grafana/dashboards/reth-state-growth.json new file mode 100644 index 000000000..35077706e --- /dev/null +++ b/etc/grafana/dashboards/reth-state-growth.json @@ -0,0 +1,1757 @@ +{ + "__inputs": [ + { + "name": "DS_PROMETHEUS", + "label": "Prometheus", + "description": "", + "type": "datasource", + "pluginId": "prometheus", + "pluginName": "Prometheus" + } + ], + "__elements": {}, + "__requires": [ + { + "type": "grafana", + "id": "grafana", + "name": "Grafana", + "version": "10.1.0" + }, + { + "type": "datasource", + "id": "prometheus", + "name": "Prometheus", + "version": "1.0.0" + }, + { + "type": "panel", + "id": "stat", + "name": "Stat", + "version": "" + }, + { + "type": "panel", + "id": "timeseries", + "name": "Time series", + "version": "" + } + ], + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": { + "type": "grafana", + "uid": "-- Grafana --" + }, + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "description": "Ethereum state growth", + "editable": true, + "fiscalYearStartMonth": 0, + "graphTooltip": 0, + "id": null, + "links": [], + "liveNow": false, + "panels": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 3, + "x": 0, + "y": 0 + }, + "id": 22, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "text": { + "valueSize": 20 + }, + "textMode": "name", + "wideLayout": true + }, + "pluginVersion": "10.1.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "builder", + "exemplar": false, + "expr": "reth_info{instance=~\"$instance\"}", + "instant": true, + "legendFormat": "{{version}}", + "range": false, + "refId": "A" + } + ], + "title": "Version", + "transparent": true, + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 6, + "x": 3, + "y": 0 + }, + "id": 192, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "text": { + "valueSize": 20 + }, + "textMode": "name", + "wideLayout": true + }, + "pluginVersion": "10.1.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "builder", + "exemplar": false, + "expr": "reth_info{instance=~\"$instance\"}", + "instant": true, + "legendFormat": "{{build_timestamp}}", + "range": false, + "refId": "A" + } + ], + "title": "Build Timestamp", + "transparent": true, + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 3, + "x": 9, + "y": 0 + }, + "id": 193, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "text": { + "valueSize": 20 + }, + "textMode": "name", + "wideLayout": true + }, + "pluginVersion": "10.1.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "builder", + "exemplar": false, + "expr": "reth_info{instance=~\"$instance\"}", + "instant": true, + "legendFormat": "{{git_sha}}", + "range": false, + "refId": "A" + } + ], + "title": "Git SHA", + "transparent": true, + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 2, + "x": 12, + "y": 0 + }, + "id": 195, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "text": { + "valueSize": 20 + }, + "textMode": "name", + "wideLayout": true + }, + "pluginVersion": "10.1.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "builder", + "exemplar": false, + "expr": "reth_info{instance=~\"$instance\"}", + "instant": true, + "legendFormat": "{{build_profile}}", + "range": false, + "refId": "A" + } + ], + "title": "Build Profile", + "transparent": true, + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 5, + "x": 14, + "y": 0 + }, + "id": 196, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "text": { + "valueSize": 20 + }, + "textMode": "name", + "wideLayout": true + }, + "pluginVersion": "10.1.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "builder", + "exemplar": false, + "expr": "reth_info{instance=~\"$instance\"}", + "instant": true, + "legendFormat": "{{target_triple}}", + "range": false, + "refId": "A" + } + ], + "title": "Target Triple", + "transparent": true, + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 5, + "x": 19, + "y": 0 + }, + "id": 197, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "text": { + "valueSize": 20 + }, + "textMode": "name", + "wideLayout": true + }, + "pluginVersion": "10.1.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "builder", + "exemplar": false, + "expr": "reth_info{instance=~\"$instance\"}", + "instant": true, + "legendFormat": "{{cargo_features}}", + "range": false, + "refId": "A" + } + ], + "title": "Cargo Features", + "transparent": true, + "type": "stat" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 3 + }, + "id": 7, + "panels": [], + "title": "State", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 12, + "x": 0, + "y": 4 + }, + "id": 6, + "options": { + "legend": { + "calcs": [ + "last" + ], + "displayMode": "table", + "placement": "right", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "avg(delta(reth_db_table_size{instance=~\"$instance\", table=\"PlainAccountState\"}[$interval]))", + "instant": false, + "interval": "$interval", + "legendFormat": "Account", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "avg(delta(reth_db_table_size{instance=~\"$instance\", table=\"PlainStorageState\"}[$interval]))", + "hide": false, + "instant": false, + "interval": "$interval", + "legendFormat": "Storage", + "range": true, + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "avg(delta(reth_db_table_size{instance=~\"$instance\", table=\"Bytecodes\"}[$interval]))", + "hide": false, + "instant": false, + "interval": "$interval", + "legendFormat": "Bytecodes", + "range": true, + "refId": "C" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "avg(delta(reth_db_table_size{instance=~\"$instance\", table=\"PlainAccountState\"}[$interval])) + avg(delta(reth_db_table_size{instance=~\"$instance\", table=\"PlainStorageState\"}[$interval])) + avg(delta(reth_db_table_size{instance=~\"$instance\", table=\"Bytecodes\"}[$interval]))", + "hide": false, + "instant": false, + "interval": "$interval", + "legendFormat": "Total", + "range": true, + "refId": "D" + } + ], + "title": "State Growth (interval = ${interval})", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 12, + "x": 12, + "y": 4 + }, + "id": 13, + "options": { + "legend": { + "calcs": [ + "last" + ], + "displayMode": "table", + "placement": "right", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "sum(reth_db_table_size{instance=~\"$instance\", table=\"PlainAccountState\"})", + "instant": false, + "interval": "$interval", + "legendFormat": "Account", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "sum(reth_db_table_size{instance=~\"$instance\", table=\"PlainStorageState\"})", + "hide": false, + "instant": false, + "interval": "$interval", + "legendFormat": "Storage", + "range": true, + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "sum(reth_db_table_size{instance=~\"$instance\", table=\"Bytecodes\"})", + "hide": false, + "instant": false, + "interval": "$interval", + "legendFormat": "Bytecodes", + "range": true, + "refId": "C" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "sum(reth_db_table_size{instance=~\"$instance\", table=~\"PlainAccountState|PlainStorageState|Bytecodes\"})", + "hide": false, + "instant": false, + "interval": "$interval", + "legendFormat": "Total", + "range": true, + "refId": "D" + } + ], + "title": "State Size", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 12, + "x": 0, + "y": 14 + }, + "id": 1, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": false + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "avg(delta(reth_db_table_size{instance=~\"$instance\", table=\"PlainAccountState\"}[$interval]))", + "instant": false, + "interval": "$interval", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Account State Growth (interval = ${interval})", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 12, + "x": 12, + "y": 14 + }, + "id": 2, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": false + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "avg(delta(reth_db_table_size{instance=~\"$instance\", table=\"PlainStorageState\"}[$interval]))", + "instant": false, + "interval": "$interval", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Storage State Growth (interval = ${interval})", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 12, + "x": 0, + "y": 24 + }, + "id": 9, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": false + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "avg(delta(reth_db_table_size{instance=~\"$instance\", table=\"Bytecodes\"}[$interval]))", + "instant": false, + "interval": "$interval", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Bytecodes Growth (interval = ${interval})", + "type": "timeseries" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 34 + }, + "id": 8, + "panels": [], + "title": "History", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 12, + "x": 0, + "y": 35 + }, + "id": 12, + "options": { + "legend": { + "calcs": [ + "last" + ], + "displayMode": "table", + "placement": "right", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "avg(delta(reth_db_table_size{instance=~\"$instance\", table=\"Headers\"}[$interval])) + avg(delta(reth_static_files_segment_size{instance=~\"$instance\", segment=\"headers\"}[$interval]))", + "instant": false, + "interval": "$interval", + "legendFormat": "Headers", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "avg(delta(reth_db_table_size{instance=~\"$instance\", table=\"Receipts\"}[$interval])) + avg(delta(reth_static_files_segment_size{instance=~\"$instance\", segment=\"receipts\"}[$interval]))", + "hide": false, + "instant": false, + "interval": "$interval", + "legendFormat": "Receipts", + "range": true, + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "avg(delta(reth_db_table_size{instance=~\"$instance\", table=\"Transactions\"}[$interval])) + avg(delta(reth_static_files_segment_size{instance=~\"$instance\", segment=\"transactions\"}[$interval]))", + "hide": false, + "instant": false, + "interval": "$interval", + "legendFormat": "Transactions", + "range": true, + "refId": "C" + } + ], + "title": "History Growth (interval = ${interval})", + "transformations": [ + { + "id": "calculateField", + "options": { + "binary": { + "left": "Headers", + "reducer": "sum", + "right": "Receipts" + }, + "mode": "reduceRow", + "reduce": { + "include": [ + "Headers", + "Receipts", + "Transactions" + ], + "reducer": "sum" + } + } + } + ], + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 12, + "x": 12, + "y": 35 + }, + "id": 14, + "options": { + "legend": { + "calcs": [ + "last" + ], + "displayMode": "table", + "placement": "right", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "sum(reth_db_table_size{instance=~\"$instance\", table=\"Headers\"}) + sum(reth_static_files_segment_size{instance=~\"$instance\", segment=\"headers\"})", + "instant": false, + "interval": "$interval", + "legendFormat": "Headers", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "sum(reth_db_table_size{instance=~\"$instance\", table=\"Receipts\"}) + sum(reth_static_files_segment_size{instance=~\"$instance\", segment=\"receipts\"})", + "hide": false, + "instant": false, + "interval": "$interval", + "legendFormat": "Receipts", + "range": true, + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "sum(reth_db_table_size{instance=~\"$instance\", table=\"Transactions\"}) + sum(reth_static_files_segment_size{instance=~\"$instance\", segment=\"transactions\"})", + "hide": false, + "instant": false, + "interval": "$interval", + "legendFormat": "Transactions", + "range": true, + "refId": "C" + } + ], + "title": "History Size", + "transformations": [ + { + "id": "calculateField", + "options": { + "mode": "reduceRow", + "reduce": { + "include": [ + "Headers", + "Receipts", + "Transactions" + ], + "reducer": "sum" + } + } + } + ], + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 12, + "x": 0, + "y": 45 + }, + "id": 3, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": false + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "avg(delta(reth_db_table_size{instance=~\"$instance\", table=\"Headers\"}[$interval])) + avg(delta(reth_static_files_segment_size{instance=~\"$instance\", segment=\"headers\"}[$interval]))", + "instant": false, + "interval": "$interval", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Headers Growth (interval = ${interval})", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 12, + "x": 12, + "y": 45 + }, + "id": 5, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": false + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "avg(delta(reth_db_table_size{instance=~\"$instance\", table=\"Receipts\"}[$interval])) + avg(delta(reth_static_files_segment_size{instance=~\"$instance\", segment=\"receipts\"}[$interval]))", + "instant": false, + "interval": "$interval", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Receipts Growth (interval = ${interval})", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 12, + "x": 0, + "y": 55 + }, + "id": 4, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": false + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "avg(delta(reth_db_table_size{instance=~\"$instance\", table=\"Transactions\"}[$interval])) + avg(delta(reth_static_files_segment_size{instance=~\"$instance\", segment=\"transactions\"}[$interval]))", + "instant": false, + "interval": "$interval", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Transactions Growth (interval = ${interval})", + "type": "timeseries" + } + ], + "refresh": "", + "schemaVersion": 38, + "style": "dark", + "tags": [], + "templating": { + "list": [ + { + "current": {}, + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "definition": "query_result(reth_info)", + "hide": 0, + "includeAll": false, + "multi": false, + "name": "instance", + "options": [], + "query": { + "query": "query_result(reth_info)", + "refId": "PrometheusVariableQueryEditor-VariableQuery" + }, + "refresh": 1, + "regex": "/.*instance=\\\"([^\\\"]*).*/", + "skipUrlSync": false, + "sort": 0, + "type": "query" + }, + { + "current": { + "selected": true, + "text": "10m", + "value": "10m" + }, + "hide": 0, + "includeAll": false, + "label": "Interval", + "multi": false, + "name": "interval", + "options": [ + { + "selected": false, + "text": "5m", + "value": "5m" + }, + { + "selected": true, + "text": "10m", + "value": "10m" + }, + { + "selected": false, + "text": "30m", + "value": "30m" + }, + { + "selected": false, + "text": "1h", + "value": "1h" + }, + { + "selected": false, + "text": "6h", + "value": "6h" + }, + { + "selected": false, + "text": "12h", + "value": "12h" + }, + { + "selected": false, + "text": "1d", + "value": "1d" + }, + { + "selected": false, + "text": "7d", + "value": "7d" + }, + { + "selected": false, + "text": "14d", + "value": "14d" + }, + { + "selected": false, + "text": "30d", + "value": "30d" + } + ], + "query": "5m,10m,30m,1h,6h,12h,1d,7d,14d,30d", + "queryValue": "", + "skipUrlSync": false, + "type": "custom" + } + ] + }, + "time": { + "from": "now-24h", + "to": "now" + }, + "timepicker": {}, + "timezone": "", + "title": "Reth - State & History", + "uid": "cab0fcc6-1c33-478c-9675-38bc1af5de82", + "version": 1, + "weekStart": "" + } \ No newline at end of file diff --git a/examples/Cargo.toml b/examples/Cargo.toml deleted file mode 100644 index 82b6be45a..000000000 --- a/examples/Cargo.toml +++ /dev/null @@ -1,39 +0,0 @@ -[package] -name = "examples" -version = "0.0.0" -publish = false -edition.workspace = true -license.workspace = true - -[dev-dependencies] -reth-primitives.workspace = true -reth-db.workspace = true -reth-provider.workspace = true -reth-rpc-builder.workspace = true -reth-rpc-types.workspace = true -reth-rpc-types-compat.workspace = true -reth-revm.workspace = true -reth-blockchain-tree.workspace = true -reth-beacon-consensus.workspace = true -reth-network-api.workspace = true -reth-network.workspace = true -reth-transaction-pool.workspace = true -reth-tasks.workspace = true - -eyre.workspace = true -futures.workspace = true -async-trait.workspace = true -tokio.workspace = true - -[[example]] -name = "db-access" -path = "db-access.rs" - -[[example]] -name = "network" -path = "network.rs" - -[[example]] -name = "network-txpool" -path = "network-txpool.rs" - diff --git a/examples/README.md b/examples/README.md index 847325f93..4c135f880 100644 --- a/examples/README.md +++ b/examples/README.md @@ -1,17 +1,69 @@ -## Examples of how to use the Reth SDK +# Examples -This directory contains a number of examples showcasing various capabilities of -the `reth-*` crates. +These examples demonstrate the main features of some of Reth's crates and how to use them. -All examples can be executed with: - -``` -cargo run --example $name -``` - -A good starting point for the examples would be [`db-access`](db-access.rs) -and [`rpc-db`](rpc-db). +To run an example, use the command `cargo run -p `. If you've got an example you'd like to see here, please feel free to open an issue. Otherwise if you've got an example you'd like to add, please feel free to make a PR! + +## Node Builder + +| Example | Description | +| -------------------------------------------------- | ------------------------------------------------------------------------------------------------ | +| [Additional RPC namespace](./node-custom-rpc) | Illustrates how to add custom CLI parameters and set up a custom RPC namespace | +| [Custom event hooks](./node-event-hooks) | Illustrates how to hook to various node lifecycle events | +| [Custom dev node](./custom-dev-node) | Illustrates how to run a custom dev node programmatically and submit a transaction to it via RPC | +| [Custom EVM](./custom-evm) | Illustrates how to implement a node with a custom EVM | +| [Custom inspector](./custom-inspector) | Illustrates how to use a custom EVM inspector to trace new transactions | +| [Custom engine types](./custom-engine-types) | Illustrates how to create a node with custom engine types | +| [Custom node components](./custom-node-components) | Illustrates how to configure custom node components | +| [Custom payload builder](./custom-payload-builder) | Illustrates how to use a custom payload builder | + +## ExEx + +| Example | Description | +|-------------------------------------------|-----------------------------------------------------------------------------------| +| [Minimal ExEx](./exex/minimal) | Illustrates how to build a simple ExEx | +| [OP Bridge ExEx](./exex/op-bridge) | Illustrates an ExEx that decodes Optimism deposit and withdrawal receipts from L1 | +| [Rollup](./exex/rollup) | Illustrates a rollup ExEx that derives the state from L1 | +| [In Memory State](./exex/in-memory-state) | Illustrates an ExEx that tracks the plain state in memory | + +## RPC + +| Example | Description | +| ----------------------- | --------------------------------------------------------------------------- | +| [DB over RPC](./rpc-db) | Illustrates how to run a standalone RPC server over a Rethdatabase instance | + +## Database + +| Example | Description | +| ------------------------ | --------------------------------------------------------------- | +| [DB access](./db-access) | Illustrates how to access Reth's database in a separate process | + +## Network + +| Example | Description | +| ------------------------------- | ------------------------------------------------------------ | +| [Standalone network](./network) | Illustrates how to use the network as a standalone component | + +## Mempool + +| Example | Description | +| ---------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------- | +| [Trace pending transactions](./txpool-tracing) | Illustrates how to trace pending transactions as they arrive in the mempool | +| [Standalone txpool](./network-txpool) | Illustrates how to use the network as a standalone component together with a transaction pool with a custom pool validator | + +## P2P + +| Example | Description | +| --------------------------- | ----------------------------------------------------------------- | +| [Manual P2P](./manual-p2p) | Illustrates how to connect and communicate with a peer | +| [Polygon P2P](./manual-p2p) | Illustrates how to connect and communicate with a peer on Polygon | + +## Misc + +| Example | Description | +| ---------------------------------- | ----------------------------------------------------------- | +| [Beacon API SSE](./beacon-api-sse) | Illustrates how to subscribe to beacon chain events via SSE | diff --git a/examples/beacon-api-sse/Cargo.toml b/examples/beacon-api-sse/Cargo.toml index 87a882c6c..4582f2598 100644 --- a/examples/beacon-api-sse/Cargo.toml +++ b/examples/beacon-api-sse/Cargo.toml @@ -8,6 +8,7 @@ license.workspace = true [dependencies] reth.workspace = true reth-node-ethereum.workspace = true +alloy-rpc-types-beacon.workspace = true clap.workspace = true tracing.workspace = true diff --git a/examples/beacon-api-sse/src/main.rs b/examples/beacon-api-sse/src/main.rs index 38dada132..0cd4d4e78 100644 --- a/examples/beacon-api-sse/src/main.rs +++ b/examples/beacon-api-sse/src/main.rs @@ -17,10 +17,11 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] +use alloy_rpc_types_beacon::beacon::events::PayloadAttributesEvent; use clap::Parser; use futures_util::stream::StreamExt; use mev_share_sse::{client::EventStream, EventClient}; -use reth::{cli::Cli, rpc::types::beacon::events::PayloadAttributesEvent}; +use reth::cli::Cli; use reth_node_ethereum::EthereumNode; use std::net::{IpAddr, Ipv4Addr}; use tracing::{info, warn}; diff --git a/examples/custom-node/Cargo.toml b/examples/custom-engine-types/Cargo.toml similarity index 95% rename from examples/custom-node/Cargo.toml rename to examples/custom-engine-types/Cargo.toml index 9d41edafd..738631306 100644 --- a/examples/custom-node/Cargo.toml +++ b/examples/custom-engine-types/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "custom-node" +name = "custom-engine-types" version = "0.0.0" publish = false edition.workspace = true diff --git a/examples/custom-node/src/main.rs b/examples/custom-engine-types/src/main.rs similarity index 94% rename from examples/custom-node/src/main.rs rename to examples/custom-engine-types/src/main.rs index a2ade9cc1..d16146420 100644 --- a/examples/custom-node/src/main.rs +++ b/examples/custom-engine-types/src/main.rs @@ -37,9 +37,8 @@ use reth_node_api::{ EngineTypes, PayloadAttributes, PayloadBuilderAttributes, PayloadOrAttributes, }; use reth_node_core::{args::RpcServerArgs, node_config::NodeConfig}; -use reth_node_ethereum::{ - node::{EthereumNetworkBuilder, EthereumPoolBuilder}, - EthEvmConfig, +use reth_node_ethereum::node::{ + EthereumExecutorBuilder, EthereumNetworkBuilder, EthereumPoolBuilder, }; use reth_payload_builder::{ error::PayloadBuilderError, EthBuiltPayload, EthPayloadBuilderAttributes, PayloadBuilderHandle, @@ -187,12 +186,6 @@ impl NodeTypes for MyCustomNode { type Primitives = (); // use the custom engine types type Engine = CustomEngineTypes; - // use the default ethereum EVM config - type Evm = EthEvmConfig; - - fn evm_config(&self) -> Self::Evm { - Self::Evm::default() - } } /// Implement the Node trait for the custom node @@ -202,18 +195,21 @@ impl Node for MyCustomNode where N: FullNodeTypes, { - type PoolBuilder = EthereumPoolBuilder; - type NetworkBuilder = EthereumNetworkBuilder; - type PayloadBuilder = CustomPayloadServiceBuilder; - - fn components( - self, - ) -> ComponentsBuilder { + type ComponentsBuilder = ComponentsBuilder< + N, + EthereumPoolBuilder, + CustomPayloadServiceBuilder, + EthereumNetworkBuilder, + EthereumExecutorBuilder, + >; + + fn components_builder(self) -> Self::ComponentsBuilder { ComponentsBuilder::default() .node_types::() .pool(EthereumPoolBuilder::default()) .payload(CustomPayloadServiceBuilder::default()) .network(EthereumNetworkBuilder::default()) + .executor(EthereumExecutorBuilder::default()) } } @@ -239,8 +235,7 @@ where .interval(conf.interval()) .deadline(conf.deadline()) .max_payload_tasks(conf.max_payload_tasks()) - .extradata(conf.extradata_bytes()) - .max_gas_limit(conf.max_gas_limit()); + .extradata(conf.extradata_bytes()); let payload_generator = BasicPayloadJobGenerator::with_builder( ctx.provider().clone(), diff --git a/examples/custom-evm/src/main.rs b/examples/custom-evm/src/main.rs index 6c80c9a74..9572e38be 100644 --- a/examples/custom-evm/src/main.rs +++ b/examples/custom-evm/src/main.rs @@ -3,7 +3,7 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] use reth::{ - builder::{node::NodeTypes, NodeBuilder}, + builder::{components::ExecutorBuilder, BuilderContext, NodeBuilder}, primitives::{ address, revm_primitives::{CfgEnvWithHandlerCfg, Env, PrecompileResult, TxEnv}, @@ -17,10 +17,10 @@ use reth::{ }, tasks::TaskManager, }; -use reth_node_api::{ConfigureEvm, ConfigureEvmEnv}; +use reth_node_api::{ConfigureEvm, ConfigureEvmEnv, FullNodeTypes}; use reth_node_core::{args::RpcServerArgs, node_config::NodeConfig}; -use reth_node_ethereum::{EthEngineTypes, EthEvmConfig, EthereumNode}; -use reth_primitives::{Chain, ChainSpec, Genesis, Header, Transaction}; +use reth_node_ethereum::{EthEvmConfig, EthExecutorProvider, EthereumNode}; +use reth_primitives::{Chain, ChainSpec, Genesis, Header, TransactionSigned}; use reth_tracing::{RethTracer, Tracer}; use std::sync::Arc; @@ -61,13 +61,8 @@ impl MyEvmConfig { } impl ConfigureEvmEnv for MyEvmConfig { - type TxMeta = (); - - fn fill_tx_env(tx_env: &mut TxEnv, transaction: T, sender: Address, meta: Self::TxMeta) - where - T: AsRef, - { - EthEvmConfig::fill_tx_env(tx_env, transaction, sender, meta) + fn fill_tx_env(tx_env: &mut TxEnv, transaction: &TransactionSigned, sender: Address) { + EthEvmConfig::fill_tx_env(tx_env, transaction, sender) } fn fill_cfg_env( @@ -81,7 +76,9 @@ impl ConfigureEvmEnv for MyEvmConfig { } impl ConfigureEvm for MyEvmConfig { - fn evm<'a, DB: Database + 'a>(&self, db: DB) -> Evm<'a, (), DB> { + type DefaultExternalContext<'a> = (); + + fn evm<'a, DB: Database + 'a>(&self, db: DB) -> Evm<'a, Self::DefaultExternalContext<'a>, DB> { EvmBuilder::default() .with_db(db) // add additional precompiles @@ -104,18 +101,26 @@ impl ConfigureEvm for MyEvmConfig { } } -#[derive(Debug, Clone, Default)] +/// Builds a regular ethereum block executor that uses the custom EVM. +#[derive(Debug, Default, Clone, Copy)] #[non_exhaustive] -struct MyCustomNode; - -/// Configure the node types -impl NodeTypes for MyCustomNode { - type Primitives = (); - type Engine = EthEngineTypes; - type Evm = MyEvmConfig; - - fn evm_config(&self) -> Self::Evm { - Self::Evm::default() +pub struct MyExecutorBuilder; + +impl ExecutorBuilder for MyExecutorBuilder +where + Node: FullNodeTypes, +{ + type EVM = MyEvmConfig; + type Executor = EthExecutorProvider; + + async fn build_evm( + self, + ctx: &BuilderContext, + ) -> eyre::Result<(Self::EVM, Self::Executor)> { + Ok(( + MyEvmConfig::default(), + EthExecutorProvider::new(ctx.chain_spec(), MyEvmConfig::default()), + )) } } @@ -140,8 +145,8 @@ async fn main() -> eyre::Result<()> { let handle = NodeBuilder::new(node_config) .testing_node(tasks.executor()) - .with_types(MyCustomNode::default()) - .with_components(EthereumNode::components()) + .with_types::() + .with_components(EthereumNode::components().executor(MyExecutorBuilder::default())) .launch() .await .unwrap(); diff --git a/examples/custom-node-components/src/main.rs b/examples/custom-node-components/src/main.rs index 96672807d..ac98de7af 100644 --- a/examples/custom-node-components/src/main.rs +++ b/examples/custom-node-components/src/main.rs @@ -19,7 +19,7 @@ fn main() { .run(|builder, _| async move { let handle = builder // use the default ethereum node types - .with_types(EthereumNode::default()) + .with_types::() // Configure the components of the node // use default ethereum components but use our custom pool .with_components(EthereumNode::components().pool(CustomPoolBuilder::default())) @@ -64,7 +64,7 @@ where let transaction_pool = reth_transaction_pool::Pool::eth_pool(validator, blob_store, self.pool_config); info!(target: "reth::cli", "Transaction pool initialized"); - let transactions_path = data_dir.txpool_transactions_path(); + let transactions_path = data_dir.txpool_transactions(); // spawn txpool maintenance task { diff --git a/examples/custom-payload-builder/src/main.rs b/examples/custom-payload-builder/src/main.rs index 8e028771b..b2bc6af36 100644 --- a/examples/custom-payload-builder/src/main.rs +++ b/examples/custom-payload-builder/src/main.rs @@ -47,8 +47,7 @@ where .interval(conf.interval()) .deadline(conf.deadline()) .max_payload_tasks(conf.max_payload_tasks()) - .extradata(conf.extradata_bytes()) - .max_gas_limit(conf.max_gas_limit()); + .extradata(conf.extradata_bytes()); let payload_generator = EmptyBlockPayloadJobGenerator::with_builder( ctx.provider().clone(), @@ -73,7 +72,7 @@ fn main() { Cli::parse_args() .run(|builder, _| async move { let handle = builder - .with_types(EthereumNode::default()) + .with_types::() // Configure the components of the node // use default ethereum components but use our custom payload builder .with_components( diff --git a/examples/db-access/Cargo.toml b/examples/db-access/Cargo.toml new file mode 100644 index 000000000..e447493c2 --- /dev/null +++ b/examples/db-access/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "db-access" +version = "0.0.0" +publish = false +edition.workspace = true +license.workspace = true + + +[dependencies] +reth-db.workspace = true +reth-primitives.workspace = true +reth-provider.workspace = true +reth-rpc-types.workspace = true + + +eyre.workspace = true diff --git a/examples/db-access.rs b/examples/db-access/src/main.rs similarity index 100% rename from examples/db-access.rs rename to examples/db-access/src/main.rs diff --git a/examples/exex/in-memory-state/Cargo.toml b/examples/exex/in-memory-state/Cargo.toml new file mode 100644 index 000000000..c7fd34ea5 --- /dev/null +++ b/examples/exex/in-memory-state/Cargo.toml @@ -0,0 +1,15 @@ +[package] +name = "exex-in-memory-state" +version = "0.0.0" +publish = false +edition.workspace = true +license.workspace = true + +[dependencies] +reth.workspace = true +reth-exex.workspace = true +reth-node-api.workspace = true +reth-node-ethereum.workspace = true +reth-tracing.workspace = true + +eyre.workspace = true diff --git a/examples/exex/in-memory-state/src/main.rs b/examples/exex/in-memory-state/src/main.rs new file mode 100644 index 000000000..451bb9c42 --- /dev/null +++ b/examples/exex/in-memory-state/src/main.rs @@ -0,0 +1,49 @@ +#![warn(unused_crate_dependencies)] + +use reth::providers::BundleStateWithReceipts; +use reth_exex::{ExExContext, ExExEvent, ExExNotification}; +use reth_node_api::FullNodeComponents; +use reth_node_ethereum::EthereumNode; +use reth_tracing::tracing::info; + +/// An ExEx that keeps track of the entire state in memory +async fn track_state(mut ctx: ExExContext) -> eyre::Result<()> { + // keeps the entire plain state of the chain in memory + let mut state = BundleStateWithReceipts::default(); + + while let Some(notification) = ctx.notifications.recv().await { + match ¬ification { + ExExNotification::ChainCommitted { new } => { + info!(committed_chain = ?new.range(), "Received commit"); + } + ExExNotification::ChainReorged { old, new } => { + // revert to block before the reorg + state.revert_to(new.first().number - 1); + info!(from_chain = ?old.range(), to_chain = ?new.range(), "Received reorg"); + } + ExExNotification::ChainReverted { old } => { + state.revert_to(old.first().number - 1); + info!(reverted_chain = ?old.range(), "Received revert"); + } + }; + + if let Some(committed_chain) = notification.committed_chain() { + // extend the state with the new chain + state.extend(committed_chain.state().clone()); + ctx.events.send(ExExEvent::FinishedHeight(committed_chain.tip().number))?; + } + } + Ok(()) +} + +fn main() -> eyre::Result<()> { + reth::cli::Cli::parse_args().run(|builder, _| async move { + let handle = builder + .node(EthereumNode::default()) + .install_exex("in-memory-state", |ctx| async move { Ok(track_state(ctx)) }) + .launch() + .await?; + + handle.wait_for_node_exit().await + }) +} diff --git a/examples/exex/minimal/Cargo.toml b/examples/exex/minimal/Cargo.toml index c1c586fd5..a7bcc327a 100644 --- a/examples/exex/minimal/Cargo.toml +++ b/examples/exex/minimal/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "minimal" +name = "exex-minimal" version = "0.0.0" publish = false edition.workspace = true @@ -12,7 +12,7 @@ reth-node-api.workspace = true reth-node-core.workspace = true reth-node-ethereum.workspace = true reth-primitives.workspace = true -reth-provider.workspace = true +reth-tracing.workspace = true eyre.workspace = true tokio.workspace = true diff --git a/examples/exex/minimal/src/main.rs b/examples/exex/minimal/src/main.rs index 1c2463cda..18d3acd2c 100644 --- a/examples/exex/minimal/src/main.rs +++ b/examples/exex/minimal/src/main.rs @@ -1,8 +1,8 @@ use futures::Future; -use reth_exex::{ExExContext, ExExEvent}; +use reth_exex::{ExExContext, ExExEvent, ExExNotification}; use reth_node_api::FullNodeComponents; use reth_node_ethereum::EthereumNode; -use reth_provider::CanonStateNotification; +use reth_tracing::tracing::info; /// The initialization logic of the ExEx is just an async function. /// @@ -21,19 +21,20 @@ async fn exex_init( async fn exex(mut ctx: ExExContext) -> eyre::Result<()> { while let Some(notification) = ctx.notifications.recv().await { match ¬ification { - CanonStateNotification::Commit { new } => { - println!("Received commit: {:?}", new.first().number..=new.tip().number); + ExExNotification::ChainCommitted { new } => { + info!(committed_chain = ?new.range(), "Received commit"); } - CanonStateNotification::Reorg { old, new } => { - println!( - "Received reorg: {:?} -> {:?}", - old.first().number..=old.tip().number, - new.first().number..=new.tip().number - ); + ExExNotification::ChainReorged { old, new } => { + info!(from_chain = ?old.range(), to_chain = ?new.range(), "Received reorg"); + } + ExExNotification::ChainReverted { old } => { + info!(reverted_chain = ?old.range(), "Received revert"); } }; - ctx.events.send(ExExEvent::FinishedHeight(notification.tip().number))?; + if let Some(committed_chain) = notification.committed_chain() { + ctx.events.send(ExExEvent::FinishedHeight(committed_chain.tip().number))?; + } } Ok(()) } diff --git a/examples/exex/op-bridge/Cargo.toml b/examples/exex/op-bridge/Cargo.toml index 3d87b2801..d8669e914 100644 --- a/examples/exex/op-bridge/Cargo.toml +++ b/examples/exex/op-bridge/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "op-bridge" +name = "exex-op-bridge" version = "0.0.0" publish = false edition.workspace = true diff --git a/examples/exex/op-bridge/src/main.rs b/examples/exex/op-bridge/src/main.rs index 92e6ef106..02c87ba15 100644 --- a/examples/exex/op-bridge/src/main.rs +++ b/examples/exex/op-bridge/src/main.rs @@ -3,7 +3,7 @@ use futures::Future; use reth_exex::{ExExContext, ExExEvent}; use reth_node_api::FullNodeComponents; use reth_node_ethereum::EthereumNode; -use reth_primitives::{Log, SealedBlockWithSenders, TransactionSigned}; +use reth_primitives::{address, Address, Log, SealedBlockWithSenders, TransactionSigned}; use reth_provider::Chain; use reth_tracing::tracing::info; use rusqlite::Connection; @@ -11,6 +11,15 @@ use rusqlite::Connection; sol!(L1StandardBridge, "l1_standard_bridge_abi.json"); use crate::L1StandardBridge::{ETHBridgeFinalized, ETHBridgeInitiated, L1StandardBridgeEvents}; +const OP_BRIDGES: [Address; 6] = [ + address!("3154Cf16ccdb4C6d922629664174b904d80F2C35"), + address!("3a05E5d33d7Ab3864D53aaEc93c8301C1Fa49115"), + address!("697402166Fbf2F22E970df8a6486Ef171dbfc524"), + address!("99C9fc46f92E8a1c0deC1b1747d010903E884bE1"), + address!("735aDBbE72226BD52e818E7181953f42E3b0FF21"), + address!("3B95bC951EE0f553ba487327278cAc44f29715E5"), +]; + /// Initializes the ExEx. /// /// Opens up a SQLite database and creates the tables (if they don't exist). @@ -94,7 +103,8 @@ async fn op_bridge_exex( ) -> eyre::Result<()> { // Process all new chain state notifications while let Some(notification) = ctx.notifications.recv().await { - if let Some(reverted_chain) = notification.reverted() { + // Revert all deposits and withdrawals + if let Some(reverted_chain) = notification.reverted_chain() { let events = decode_chain_into_events(&reverted_chain); let mut deposits = 0; @@ -126,22 +136,22 @@ async fn op_bridge_exex( } // Insert all new deposits and withdrawals - let committed_chain = notification.committed(); - let events = decode_chain_into_events(&committed_chain); - - let mut deposits = 0; - let mut withdrawals = 0; - - for (block, tx, log, event) in events { - match event { - // L1 -> L2 deposit - L1StandardBridgeEvents::ETHBridgeInitiated(ETHBridgeInitiated { - amount, - from, - to, - .. - }) => { - let inserted = connection.execute( + if let Some(committed_chain) = notification.committed_chain() { + let events = decode_chain_into_events(&committed_chain); + + let mut deposits = 0; + let mut withdrawals = 0; + + for (block, tx, log, event) in events { + match event { + // L1 -> L2 deposit + L1StandardBridgeEvents::ETHBridgeInitiated(ETHBridgeInitiated { + amount, + from, + to, + .. + }) => { + let inserted = connection.execute( r#" INSERT INTO deposits (block_number, tx_hash, contract_address, "from", "to", amount) VALUES (?, ?, ?, ?, ?, ?) @@ -155,16 +165,16 @@ async fn op_bridge_exex( amount.to_string(), ), )?; - deposits += inserted; - } - // L2 -> L1 withdrawal - L1StandardBridgeEvents::ETHBridgeFinalized(ETHBridgeFinalized { - amount, - from, - to, - .. - }) => { - let inserted = connection.execute( + deposits += inserted; + } + // L2 -> L1 withdrawal + L1StandardBridgeEvents::ETHBridgeFinalized(ETHBridgeFinalized { + amount, + from, + to, + .. + }) => { + let inserted = connection.execute( r#" INSERT INTO withdrawals (block_number, tx_hash, contract_address, "from", "to", amount) VALUES (?, ?, ?, ?, ?, ?) @@ -178,17 +188,18 @@ async fn op_bridge_exex( amount.to_string(), ), )?; - withdrawals += inserted; - } - _ => continue, - }; - } + withdrawals += inserted; + } + _ => continue, + }; + } - info!(block_range = ?committed_chain.range(), %deposits, %withdrawals, "Committed chain events"); + info!(block_range = ?committed_chain.range(), %deposits, %withdrawals, "Committed chain events"); - // Send a finished height event, signaling the node that we don't need any blocks below - // this height anymore - ctx.events.send(ExExEvent::FinishedHeight(notification.tip().number))?; + // Send a finished height event, signaling the node that we don't need any blocks below + // this height anymore + ctx.events.send(ExExEvent::FinishedHeight(committed_chain.tip().number))?; + } } Ok(()) @@ -211,8 +222,14 @@ fn decode_chain_into_events( .zip(receipts.iter().flatten()) .map(move |(tx, receipt)| (block, tx, receipt)) }) - // Get all logs - .flat_map(|(block, tx, receipt)| receipt.logs.iter().map(move |log| (block, tx, log))) + // Get all logs from expected bridge contracts + .flat_map(|(block, tx, receipt)| { + receipt + .logs + .iter() + .filter(|log| OP_BRIDGES.contains(&log.address)) + .map(move |log| (block, tx, log)) + }) // Decode and filter bridge events .filter_map(|(block, tx, log)| { L1StandardBridgeEvents::decode_raw_log(log.topics(), &log.data.data, true) diff --git a/examples/exex/rollup/Cargo.toml b/examples/exex/rollup/Cargo.toml new file mode 100644 index 000000000..f32a77629 --- /dev/null +++ b/examples/exex/rollup/Cargo.toml @@ -0,0 +1,40 @@ +[package] +name = "exex-rollup" +version = "0.0.0" +publish = false +edition.workspace = true +license.workspace = true + +[dependencies] +# reth +reth.workspace = true +reth-cli-runner.workspace = true +reth-exex.workspace = true +reth-interfaces.workspace = true +reth-node-api.workspace = true +reth-node-core.workspace = true +reth-node-ethereum.workspace = true +reth-primitives.workspace = true +reth-provider.workspace = true +reth-revm.workspace = true +reth-tracing.workspace = true +reth-trie.workspace = true + +# async +futures.workspace = true +tokio.workspace = true + +# misc +alloy-consensus = { workspace = true, features = ["kzg"] } +alloy-rlp.workspace = true +alloy-sol-types = { workspace = true, features = ["json"] } +eyre.workspace = true +foundry-blob-explorers = { git = "https://github.com/foundry-rs/block-explorers" } +once_cell.workspace = true +rusqlite = { version = "0.31.0", features = ["bundled"] } +serde_json.workspace = true + +[dev-dependencies] +reth-interfaces = { workspace = true, features = ["test-utils"] } +secp256k1.workspace = true + diff --git a/examples/exex/rollup/rollup_abi.json b/examples/exex/rollup/rollup_abi.json new file mode 100644 index 000000000..d7278e9f6 --- /dev/null +++ b/examples/exex/rollup/rollup_abi.json @@ -0,0 +1 @@ +{"abi":[{"type":"constructor","inputs":[{"name":"defaultRollupChainId","type":"uint256","internalType":"uint256"},{"name":"admin","type":"address","internalType":"address"}],"stateMutability":"nonpayable"},{"type":"fallback","stateMutability":"payable"},{"type":"receive","stateMutability":"payable"},{"type":"function","name":"DEFAULT_ADMIN_ROLE","inputs":[],"outputs":[{"name":"","type":"bytes32","internalType":"bytes32"}],"stateMutability":"view"},{"type":"function","name":"SEQUENCER_ROLE","inputs":[],"outputs":[{"name":"","type":"bytes32","internalType":"bytes32"}],"stateMutability":"view"},{"type":"function","name":"acceptDefaultAdminTransfer","inputs":[],"outputs":[],"stateMutability":"nonpayable"},{"type":"function","name":"beginDefaultAdminTransfer","inputs":[{"name":"newAdmin","type":"address","internalType":"address"}],"outputs":[],"stateMutability":"nonpayable"},{"type":"function","name":"blockCommitment","inputs":[{"name":"header","type":"tuple","internalType":"struct Zenith.BlockHeader","components":[{"name":"rollupChainId","type":"uint256","internalType":"uint256"},{"name":"sequence","type":"uint256","internalType":"uint256"},{"name":"confirmBy","type":"uint256","internalType":"uint256"},{"name":"gasLimit","type":"uint256","internalType":"uint256"},{"name":"rewardAddress","type":"address","internalType":"address"}]},{"name":"blockDataHash","type":"bytes32","internalType":"bytes32"}],"outputs":[{"name":"commit","type":"bytes32","internalType":"bytes32"}],"stateMutability":"view"},{"type":"function","name":"cancelDefaultAdminTransfer","inputs":[],"outputs":[],"stateMutability":"nonpayable"},{"type":"function","name":"changeDefaultAdminDelay","inputs":[{"name":"newDelay","type":"uint48","internalType":"uint48"}],"outputs":[],"stateMutability":"nonpayable"},{"type":"function","name":"defaultAdmin","inputs":[],"outputs":[{"name":"","type":"address","internalType":"address"}],"stateMutability":"view"},{"type":"function","name":"defaultAdminDelay","inputs":[],"outputs":[{"name":"","type":"uint48","internalType":"uint48"}],"stateMutability":"view"},{"type":"function","name":"defaultAdminDelayIncreaseWait","inputs":[],"outputs":[{"name":"","type":"uint48","internalType":"uint48"}],"stateMutability":"view"},{"type":"function","name":"enter","inputs":[{"name":"rollupChainId","type":"uint256","internalType":"uint256"},{"name":"rollupRecipient","type":"address","internalType":"address"},{"name":"token","type":"address","internalType":"address"},{"name":"amount","type":"uint256","internalType":"uint256"}],"outputs":[],"stateMutability":"payable"},{"type":"function","name":"enter","inputs":[{"name":"rollupChainId","type":"uint256","internalType":"uint256"},{"name":"rollupRecipient","type":"address","internalType":"address"}],"outputs":[],"stateMutability":"payable"},{"type":"function","name":"fulfillExits","inputs":[{"name":"orders","type":"tuple[]","internalType":"struct Passage.ExitOrder[]","components":[{"name":"rollupChainId","type":"uint256","internalType":"uint256"},{"name":"token","type":"address","internalType":"address"},{"name":"recipient","type":"address","internalType":"address"},{"name":"amount","type":"uint256","internalType":"uint256"}]}],"outputs":[],"stateMutability":"payable"},{"type":"function","name":"getRoleAdmin","inputs":[{"name":"role","type":"bytes32","internalType":"bytes32"}],"outputs":[{"name":"","type":"bytes32","internalType":"bytes32"}],"stateMutability":"view"},{"type":"function","name":"grantRole","inputs":[{"name":"role","type":"bytes32","internalType":"bytes32"},{"name":"account","type":"address","internalType":"address"}],"outputs":[],"stateMutability":"nonpayable"},{"type":"function","name":"hasRole","inputs":[{"name":"role","type":"bytes32","internalType":"bytes32"},{"name":"account","type":"address","internalType":"address"}],"outputs":[{"name":"","type":"bool","internalType":"bool"}],"stateMutability":"view"},{"type":"function","name":"lastSubmittedAtBlock","inputs":[{"name":"","type":"uint256","internalType":"uint256"}],"outputs":[{"name":"","type":"uint256","internalType":"uint256"}],"stateMutability":"view"},{"type":"function","name":"nextSequence","inputs":[{"name":"","type":"uint256","internalType":"uint256"}],"outputs":[{"name":"","type":"uint256","internalType":"uint256"}],"stateMutability":"view"},{"type":"function","name":"owner","inputs":[],"outputs":[{"name":"","type":"address","internalType":"address"}],"stateMutability":"view"},{"type":"function","name":"pendingDefaultAdmin","inputs":[],"outputs":[{"name":"newAdmin","type":"address","internalType":"address"},{"name":"schedule","type":"uint48","internalType":"uint48"}],"stateMutability":"view"},{"type":"function","name":"pendingDefaultAdminDelay","inputs":[],"outputs":[{"name":"newDelay","type":"uint48","internalType":"uint48"},{"name":"schedule","type":"uint48","internalType":"uint48"}],"stateMutability":"view"},{"type":"function","name":"renounceRole","inputs":[{"name":"role","type":"bytes32","internalType":"bytes32"},{"name":"account","type":"address","internalType":"address"}],"outputs":[],"stateMutability":"nonpayable"},{"type":"function","name":"revokeRole","inputs":[{"name":"role","type":"bytes32","internalType":"bytes32"},{"name":"account","type":"address","internalType":"address"}],"outputs":[],"stateMutability":"nonpayable"},{"type":"function","name":"rollbackDefaultAdminDelay","inputs":[],"outputs":[],"stateMutability":"nonpayable"},{"type":"function","name":"submitBlock","inputs":[{"name":"header","type":"tuple","internalType":"struct Zenith.BlockHeader","components":[{"name":"rollupChainId","type":"uint256","internalType":"uint256"},{"name":"sequence","type":"uint256","internalType":"uint256"},{"name":"confirmBy","type":"uint256","internalType":"uint256"},{"name":"gasLimit","type":"uint256","internalType":"uint256"},{"name":"rewardAddress","type":"address","internalType":"address"}]},{"name":"blockDataHash","type":"bytes32","internalType":"bytes32"},{"name":"v","type":"uint8","internalType":"uint8"},{"name":"r","type":"bytes32","internalType":"bytes32"},{"name":"s","type":"bytes32","internalType":"bytes32"},{"name":"blockData","type":"bytes","internalType":"bytes"}],"outputs":[],"stateMutability":"nonpayable"},{"type":"function","name":"supportsInterface","inputs":[{"name":"interfaceId","type":"bytes4","internalType":"bytes4"}],"outputs":[{"name":"","type":"bool","internalType":"bool"}],"stateMutability":"view"},{"type":"event","name":"BlockData","inputs":[{"name":"blockData","type":"bytes","indexed":false,"internalType":"bytes"}],"anonymous":false},{"type":"event","name":"BlockSubmitted","inputs":[{"name":"sequencer","type":"address","indexed":true,"internalType":"address"},{"name":"header","type":"tuple","indexed":true,"internalType":"struct Zenith.BlockHeader","components":[{"name":"rollupChainId","type":"uint256","internalType":"uint256"},{"name":"sequence","type":"uint256","internalType":"uint256"},{"name":"confirmBy","type":"uint256","internalType":"uint256"},{"name":"gasLimit","type":"uint256","internalType":"uint256"},{"name":"rewardAddress","type":"address","internalType":"address"}]},{"name":"blockDataHash","type":"bytes32","indexed":false,"internalType":"bytes32"}],"anonymous":false},{"type":"event","name":"DefaultAdminDelayChangeCanceled","inputs":[],"anonymous":false},{"type":"event","name":"DefaultAdminDelayChangeScheduled","inputs":[{"name":"newDelay","type":"uint48","indexed":false,"internalType":"uint48"},{"name":"effectSchedule","type":"uint48","indexed":false,"internalType":"uint48"}],"anonymous":false},{"type":"event","name":"DefaultAdminTransferCanceled","inputs":[],"anonymous":false},{"type":"event","name":"DefaultAdminTransferScheduled","inputs":[{"name":"newAdmin","type":"address","indexed":true,"internalType":"address"},{"name":"acceptSchedule","type":"uint48","indexed":false,"internalType":"uint48"}],"anonymous":false},{"type":"event","name":"Enter","inputs":[{"name":"rollupChainId","type":"uint256","indexed":false,"internalType":"uint256"},{"name":"token","type":"address","indexed":true,"internalType":"address"},{"name":"rollupRecipient","type":"address","indexed":true,"internalType":"address"},{"name":"amount","type":"uint256","indexed":false,"internalType":"uint256"}],"anonymous":false},{"type":"event","name":"ExitFilled","inputs":[{"name":"rollupChainId","type":"uint256","indexed":false,"internalType":"uint256"},{"name":"token","type":"address","indexed":true,"internalType":"address"},{"name":"hostRecipient","type":"address","indexed":true,"internalType":"address"},{"name":"amount","type":"uint256","indexed":false,"internalType":"uint256"}],"anonymous":false},{"type":"event","name":"RoleAdminChanged","inputs":[{"name":"role","type":"bytes32","indexed":true,"internalType":"bytes32"},{"name":"previousAdminRole","type":"bytes32","indexed":true,"internalType":"bytes32"},{"name":"newAdminRole","type":"bytes32","indexed":true,"internalType":"bytes32"}],"anonymous":false},{"type":"event","name":"RoleGranted","inputs":[{"name":"role","type":"bytes32","indexed":true,"internalType":"bytes32"},{"name":"account","type":"address","indexed":true,"internalType":"address"},{"name":"sender","type":"address","indexed":true,"internalType":"address"}],"anonymous":false},{"type":"event","name":"RoleRevoked","inputs":[{"name":"role","type":"bytes32","indexed":true,"internalType":"bytes32"},{"name":"account","type":"address","indexed":true,"internalType":"address"},{"name":"sender","type":"address","indexed":true,"internalType":"address"}],"anonymous":false},{"type":"error","name":"AccessControlBadConfirmation","inputs":[]},{"type":"error","name":"AccessControlEnforcedDefaultAdminDelay","inputs":[{"name":"schedule","type":"uint48","internalType":"uint48"}]},{"type":"error","name":"AccessControlEnforcedDefaultAdminRules","inputs":[]},{"type":"error","name":"AccessControlInvalidDefaultAdmin","inputs":[{"name":"defaultAdmin","type":"address","internalType":"address"}]},{"type":"error","name":"AccessControlUnauthorizedAccount","inputs":[{"name":"account","type":"address","internalType":"address"},{"name":"neededRole","type":"bytes32","internalType":"bytes32"}]},{"type":"error","name":"BadSequence","inputs":[{"name":"expected","type":"uint256","internalType":"uint256"}]},{"type":"error","name":"BadSignature","inputs":[{"name":"derivedSequencer","type":"address","internalType":"address"}]},{"type":"error","name":"BlockExpired","inputs":[]},{"type":"error","name":"OneRollupBlockPerHostBlock","inputs":[]},{"type":"error","name":"OrderExpired","inputs":[]},{"type":"error","name":"SafeCastOverflowedUintDowncast","inputs":[{"name":"bits","type":"uint8","internalType":"uint8"},{"name":"value","type":"uint256","internalType":"uint256"}]}],"bytecode":{"object":"0x60a060405234801561000f575f80fd5b50604051611a98380380611a9883398101604081905261002e916101ae565b608082905262015180816001600160a01b03811661006557604051636116401160e11b81525f600482015260240160405180910390fd5b600180546001600160d01b0316600160d01b65ffffffffffff85160217905561008e5f82610098565b50505050506101e8565b5f826100f4575f6100b16002546001600160a01b031690565b6001600160a01b0316146100d857604051631fe1e13d60e11b815260040160405180910390fd5b600280546001600160a01b0319166001600160a01b0384161790555b6100fe8383610107565b90505b92915050565b5f828152602081815260408083206001600160a01b038516845290915281205460ff166101a7575f838152602081815260408083206001600160a01b03861684529091529020805460ff1916600117905561015f3390565b6001600160a01b0316826001600160a01b0316847f2f8788117e7eff1d82e926ec794901d17c78024a50270940304540a733656f0d60405160405180910390a4506001610101565b505f610101565b5f80604083850312156101bf575f80fd5b825160208401519092506001600160a01b03811681146101dd575f80fd5b809150509250929050565b6080516118916102075f395f818161018e01526101ba01526118915ff3fe608060405260043610610184575f3560e01c80637e82bb01116100d0578063c7bc4a6211610089578063cf6eefb711610063578063cf6eefb7146104b7578063d547741f146104f1578063d602b9fd14610510578063ea3b9ba114610524576101b5565b8063c7bc4a6214610470578063cc8463c81461048f578063cefc1429146104a3576101b5565b80637e82bb011461039b57806384ef8ffc146103c65780638da5cb5b146103f757806391d148541461040b578063a1eda53c1461042a578063a217fddf1461045d576101b5565b806336568abe1161013d5780634842855c116101175780634842855c1461031a578063634e93da1461033e578063649a5ec71461035d5780637e5692741461037c576101b5565b806336568abe146102d557806336702119146102f45780633805c6bd14610307576101b5565b806301ffc9a7146101df578063022d63fb146102135780630aa6220b1461023b5780631e6637201461024f578063248a9ca3146102885780632f2ff15d146102b6576101b5565b366101b5576101b37f000000000000000000000000000000000000000000000000000000000000000033610532565b005b6101b37f000000000000000000000000000000000000000000000000000000000000000033610532565b3480156101ea575f80fd5b506101fe6101f93660046114a8565b61057b565b60405190151581526020015b60405180910390f35b34801561021e575f80fd5b50620697805b60405165ffffffffffff909116815260200161020a565b348015610246575f80fd5b506101b36105a5565b34801561025a575f80fd5b5061027a6102693660046114cf565b60036020525f908152604090205481565b60405190815260200161020a565b348015610293575f80fd5b5061027a6102a23660046114cf565b5f9081526020819052604090206001015490565b3480156102c1575f80fd5b506101b36102d0366004611501565b6105ba565b3480156102e0575f80fd5b506101b36102ef366004611501565b6105e6565b6101b361030236600461152b565b610691565b6101b361031536600461159a565b610960565b348015610325575f80fd5b5061027a6d53455155454e4345525f524f4c4560901b81565b348015610349575f80fd5b506101b36103583660046115db565b610a31565b348015610368575f80fd5b506101b36103773660046115f4565b610a44565b348015610387575f80fd5b506101b361039636600461169a565b610a57565b3480156103a6575f80fd5b5061027a6103b53660046114cf565b60046020525f908152604090205481565b3480156103d1575f80fd5b506002546001600160a01b03165b6040516001600160a01b03909116815260200161020a565b348015610402575f80fd5b506103df610aa6565b348015610416575f80fd5b506101fe610425366004611501565b610abe565b348015610435575f80fd5b5061043e610ae6565b6040805165ffffffffffff93841681529290911660208301520161020a565b348015610468575f80fd5b5061027a5f81565b34801561047b575f80fd5b5061027a61048a36600461174d565b610b38565b34801561049a575f80fd5b50610224610bcd565b3480156104ae575f80fd5b506101b3610c2b565b3480156104c2575f80fd5b506104cb610c6a565b604080516001600160a01b03909316835265ffffffffffff90911660208301520161020a565b3480156104fc575f80fd5b506101b361050b366004611501565b610c8b565b34801561051b575f80fd5b506101b3610cb3565b6101b3610532366004611501565b604080518381523460208201526001600160a01b038316915f917fe8a7ca8155e14d9cc8faeecec58a97268da95a2327cc892593748ce37cc6953f910160405180910390a35050565b5f6001600160e01b031982166318a4c3c360e11b148061059f575061059f82610cc5565b92915050565b5f6105af81610cf9565b6105b7610d03565b50565b816105d857604051631fe1e13d60e11b815260040160405180910390fd5b6105e28282610d0f565b5050565b8115801561060157506002546001600160a01b038281169116145b15610687575f80610610610c6a565b90925090506001600160a01b038216151580610632575065ffffffffffff8116155b8061064557504265ffffffffffff821610155b15610672576040516319ca5ebb60e01b815265ffffffffffff821660048201526024015b60405180910390fd5b50506001805465ffffffffffff60a01b191690555b6105e28282610d33565b345f5b8281101561095a575f8484838181106106af576106af611776565b90506080020160200160208101906106c791906115db565b6001600160a01b03160361077e578383828181106106e7576106e7611776565b90506080020160400160208101906106ff91906115db565b6001600160a01b03166108fc85858481811061071d5761071d611776565b9050608002016060013590811502906040515f60405180830381858888f1935050505015801561074f573d5f803e3d5ffd5b5083838281811061076257610762611776565b9050608002016060013582610777919061179e565b9150610875565b83838281811061079057610790611776565b90506080020160200160208101906107a891906115db565b6001600160a01b03166323b872dd338686858181106107c9576107c9611776565b90506080020160400160208101906107e191906115db565b8787868181106107f3576107f3611776565b6040516001600160e01b031960e088901b1681526001600160a01b039586166004820152949093166024850152506060608090920201013560448201526064016020604051808303815f875af115801561084f573d5f803e3d5ffd5b505050506040513d601f19601f8201168201806040525081019061087391906117b1565b505b83838281811061088757610887611776565b905060800201604001602081019061089f91906115db565b6001600160a01b03168484838181106108ba576108ba611776565b90506080020160200160208101906108d291906115db565b6001600160a01b03167fe93d7a771f81dc20f1d474f6868677269fdfa09830508e48edb0aa4d6569983386868581811061090e5761090e611776565b9050608002015f013587878681811061092957610929611776565b9050608002016060013560405161094a929190918252602082015260400190565b60405180910390a3600101610694565b50505050565b6040516323b872dd60e01b8152336004820152306024820152604481018290526001600160a01b038316906323b872dd906064016020604051808303815f875af11580156109b0573d5f803e3d5ffd5b505050506040513d601f19601f820116820180604052508101906109d491906117b1565b50826001600160a01b0316826001600160a01b03167fe8a7ca8155e14d9cc8faeecec58a97268da95a2327cc892593748ce37cc6953f8684604051610a23929190918252602082015260400190565b60405180910390a350505050565b5f610a3b81610cf9565b6105e282610d6b565b5f610a4e81610cf9565b6105e282610ddd565b610a648787878787610e4c565b7fc030727dea5440ebb1789967645e2595e4e67cf55821175a3f9f8b33aff41fa58282604051610a959291906117d0565b60405180910390a150505050505050565b5f610ab96002546001600160a01b031690565b905090565b5f918252602082815260408084206001600160a01b0393909316845291905290205460ff1690565b6002545f90600160d01b900465ffffffffffff168015158015610b1157504265ffffffffffff821610155b610b1c575f80610b30565b600254600160a01b900465ffffffffffff16815b915091509091565b81516020808401516060808601516040808801516080909801518151710696e6974342e73657175656e6365722e76360741b8188015246603282015260528101979097526072870194909452609286019190915260b28501969096521b6bffffffffffffffffffffffff191660d283015260e68083019390935283518083039093018352610106909101909252805191012090565b6002545f90600160d01b900465ffffffffffff168015158015610bf757504265ffffffffffff8216105b610c1257600154600160d01b900465ffffffffffff16610c25565b600254600160a01b900465ffffffffffff165b91505090565b5f610c34610c6a565b509050336001600160a01b03821614610c6257604051636116401160e11b8152336004820152602401610669565b6105b7611047565b6001546001600160a01b03811691600160a01b90910465ffffffffffff1690565b81610ca957604051631fe1e13d60e11b815260040160405180910390fd5b6105e282826110dd565b5f610cbd81610cf9565b6105b7611101565b5f6001600160e01b03198216637965db0b60e01b148061059f57506301ffc9a760e01b6001600160e01b031983161461059f565b6105b7813361110b565b610d0d5f80611144565b565b5f82815260208190526040902060010154610d2981610cf9565b61095a8383611203565b6001600160a01b0381163314610d5c5760405163334bd91960e11b815260040160405180910390fd5b610d668282611270565b505050565b5f610d74610bcd565b610d7d426112ac565b610d8791906117fe565b9050610d9382826112e2565b60405165ffffffffffff821681526001600160a01b038316907f3377dc44241e779dd06afab5b788a35ca5f3b778836e2990bdb26a2a4b2e5ed69060200160405180910390a25050565b5f610de78261135f565b610df0426112ac565b610dfa91906117fe565b9050610e068282611144565b6040805165ffffffffffff8085168252831660208201527ff1038c18cf84a56e432fdbfaf746924b7ea511dfe03a6506a0ceba4888788d9b910160405180910390a15050565b84515f90815260036020526040812080549082610e6883611824565b91905055905085602001518114610e9557604051635f64988d60e11b815260048101829052602401610669565b8560400151421115610eba576040516378fd448d60e01b815260040160405180910390fd5b5f610ec58787610b38565b604080515f8082526020820180845284905260ff89169282019290925260608101879052608081018690529192509060019060a0016020604051602081039080840390855afa158015610f1a573d5f803e3d5ffd5b505050602060405103519050610f416d53455155454e4345525f524f4c4560901b82610abe565b610f6957604051639a7d38d960e01b81526001600160a01b0382166004820152602401610669565b87515f90815260046020526040902054439003610f9957604051632ce0494b60e01b815260040160405180910390fd5b87515f908152600460205260409081902043905551610ff2908990815181526020808301519082015260408083015190820152606080830151908201526080918201516001600160a01b03169181019190915260a00190565b6040518091039020816001600160a01b03167f9c5702b5639f451bda4f9dba7fdf9d125a675ccddd315b81ce962d3ddd986a238960405161103591815260200190565b60405180910390a35050505050505050565b5f80611051610c6a565b915091506110668165ffffffffffff16151590565b158061107a57504265ffffffffffff821610155b156110a2576040516319ca5ebb60e01b815265ffffffffffff82166004820152602401610669565b6110bd5f6110b86002546001600160a01b031690565b611270565b506110c85f83611203565b5050600180546001600160d01b031916905550565b5f828152602081905260409020600101546110f781610cf9565b61095a8383611270565b610d0d5f806112e2565b6111158282610abe565b6105e25760405163e2517d3f60e01b81526001600160a01b038216600482015260248101839052604401610669565b600254600160d01b900465ffffffffffff1680156111c6574265ffffffffffff8216101561119d57600254600180546001600160d01b0316600160a01b90920465ffffffffffff16600160d01b029190911790556111c6565b6040517f2b1fa2edafe6f7b9e97c1a9e0c3660e645beb2dcaa2d45bdbf9beaf5472e1ec5905f90a15b50600280546001600160a01b0316600160a01b65ffffffffffff948516026001600160d01b031617600160d01b9290931691909102919091179055565b5f8261125f575f61121c6002546001600160a01b031690565b6001600160a01b03161461124357604051631fe1e13d60e11b815260040160405180910390fd5b600280546001600160a01b0319166001600160a01b0384161790555b61126983836113b0565b9392505050565b5f8215801561128c57506002546001600160a01b038381169116145b156112a257600280546001600160a01b03191690555b611269838361143f565b5f65ffffffffffff8211156112de576040516306dfcc6560e41b81526030600482015260248101839052604401610669565b5090565b5f6112eb610c6a565b6001805465ffffffffffff8616600160a01b026001600160d01b03199091166001600160a01b03881617179055915061132d90508165ffffffffffff16151590565b15610d66576040517f8886ebfc4259abdbc16601dd8fb5678e54878f47b3c34836cfc51154a9605109905f90a1505050565b5f80611369610bcd565b90508065ffffffffffff168365ffffffffffff16116113915761138c838261183c565b611269565b61126965ffffffffffff8416620697805f828218828410028218611269565b5f6113bb8383610abe565b611438575f838152602081815260408083206001600160a01b03861684529091529020805460ff191660011790556113f03390565b6001600160a01b0316826001600160a01b0316847f2f8788117e7eff1d82e926ec794901d17c78024a50270940304540a733656f0d60405160405180910390a450600161059f565b505f61059f565b5f61144a8383610abe565b15611438575f838152602081815260408083206001600160a01b0386168085529252808320805460ff1916905551339286917ff6391f5c32d9c69d2a47ea670b442974b53935d1edc7fd64eb21e047a839171b9190a450600161059f565b5f602082840312156114b8575f80fd5b81356001600160e01b031981168114611269575f80fd5b5f602082840312156114df575f80fd5b5035919050565b80356001600160a01b03811681146114fc575f80fd5b919050565b5f8060408385031215611512575f80fd5b82359150611522602084016114e6565b90509250929050565b5f806020838503121561153c575f80fd5b823567ffffffffffffffff80821115611553575f80fd5b818501915085601f830112611566575f80fd5b813581811115611574575f80fd5b8660208260071b8501011115611588575f80fd5b60209290920196919550909350505050565b5f805f80608085870312156115ad575f80fd5b843593506115bd602086016114e6565b92506115cb604086016114e6565b9396929550929360600135925050565b5f602082840312156115eb575f80fd5b611269826114e6565b5f60208284031215611604575f80fd5b813565ffffffffffff81168114611269575f80fd5b5f60a08284031215611629575f80fd5b60405160a0810181811067ffffffffffffffff8211171561165857634e487b7160e01b5f52604160045260245ffd5b80604052508091508235815260208301356020820152604083013560408201526060830135606082015261168e608084016114e6565b60808201525092915050565b5f805f805f805f610140888a0312156116b1575f80fd5b6116bb8989611619565b965060a0880135955060c088013560ff811681146116d7575f80fd5b945060e08801359350610100880135925061012088013567ffffffffffffffff80821115611703575f80fd5b818a0191508a601f830112611716575f80fd5b813581811115611724575f80fd5b8b6020828501011115611735575f80fd5b60208301945080935050505092959891949750929550565b5f8060c0838503121561175e575f80fd5b6117688484611619565b9460a0939093013593505050565b634e487b7160e01b5f52603260045260245ffd5b634e487b7160e01b5f52601160045260245ffd5b8181038181111561059f5761059f61178a565b5f602082840312156117c1575f80fd5b81518015158114611269575f80fd5b60208152816020820152818360408301375f818301604090810191909152601f909201601f19160101919050565b65ffffffffffff81811683821601908082111561181d5761181d61178a565b5092915050565b5f600182016118355761183561178a565b5060010190565b65ffffffffffff82811682821603908082111561181d5761181d61178a56fea2646970667358221220111de8e40c8e2761ed9ab04f385dfef1dffcd646c5a270f4fc3dc0858a0d605764736f6c63430008190033","sourceMap":"281:7248:35:-:0;;;3619:155;;;;;;;;;;;;;;;;;;;;;;;;;;;;:::i;:::-;2256:44:34;;;;3753:6:35;3761:5;-1:-1:-1;;;;;2384:33:23;;2380:115;;2440:44;;-1:-1:-1;;;2440:44:23;;2481:1;2440:44;;;516:51:38;489:18;;2440:44:23;;;;;;;2380:115;2504:13;:28;;-1:-1:-1;;;;;2504:28:23;-1:-1:-1;;;2504:28:23;;;;;;;2542:51;-1:-1:-1;2573:19:23;2542:10;:51::i;:::-;;2308:292;;3619:155:35;;281:7248;;5509:370:23;5595:4;5615;5611:214;;5687:1;5661:14;6786:20;;-1:-1:-1;;;;;6786:20:23;;6707:106;5661:14;-1:-1:-1;;;;;5661:28:23;;5657:114;;5716:40;;-1:-1:-1;;;5716:40:23;;;;;;;;;;;5657:114;5784:20;:30;;-1:-1:-1;;;;;;5784:30:23;-1:-1:-1;;;;;5784:30:23;;;;;5611:214;5841:31;5858:4;5864:7;5841:16;:31::i;:::-;5834:38;;5509:370;;;;;:::o;6179:316:21:-;6256:4;2954:12;;;;;;;;;;;-1:-1:-1;;;;;2954:29:21;;;;;;;;;;;;6272:217;;6315:6;:12;;;;;;;;;;;-1:-1:-1;;;;;6315:29:21;;;;;;;;;:36;;-1:-1:-1;;6315:36:21;6347:4;6315:36;;;6397:12;735:10:27;;656:96;6397:12:21;-1:-1:-1;;;;;6370:40:21;6388:7;-1:-1:-1;;;;;6370:40:21;6382:4;6370:40;;;;;;;;;;-1:-1:-1;6431:4:21;6424:11;;6272:217;-1:-1:-1;6473:5:21;6466:12;;14:351:38;93:6;101;154:2;142:9;133:7;129:23;125:32;122:52;;;170:1;167;160:12;122:52;193:16;;252:2;237:18;;231:25;193:16;;-1:-1:-1;;;;;;285:31:38;;275:42;;265:70;;331:1;328;321:12;265:70;354:5;344:15;;;14:351;;;;;:::o;370:203::-;281:7248:35;;;;;;;;;;;;;;;;;","linkReferences":{}},"deployedBytecode":{"object":"0x608060405260043610610184575f3560e01c80637e82bb01116100d0578063c7bc4a6211610089578063cf6eefb711610063578063cf6eefb7146104b7578063d547741f146104f1578063d602b9fd14610510578063ea3b9ba114610524576101b5565b8063c7bc4a6214610470578063cc8463c81461048f578063cefc1429146104a3576101b5565b80637e82bb011461039b57806384ef8ffc146103c65780638da5cb5b146103f757806391d148541461040b578063a1eda53c1461042a578063a217fddf1461045d576101b5565b806336568abe1161013d5780634842855c116101175780634842855c1461031a578063634e93da1461033e578063649a5ec71461035d5780637e5692741461037c576101b5565b806336568abe146102d557806336702119146102f45780633805c6bd14610307576101b5565b806301ffc9a7146101df578063022d63fb146102135780630aa6220b1461023b5780631e6637201461024f578063248a9ca3146102885780632f2ff15d146102b6576101b5565b366101b5576101b37f000000000000000000000000000000000000000000000000000000000000000033610532565b005b6101b37f000000000000000000000000000000000000000000000000000000000000000033610532565b3480156101ea575f80fd5b506101fe6101f93660046114a8565b61057b565b60405190151581526020015b60405180910390f35b34801561021e575f80fd5b50620697805b60405165ffffffffffff909116815260200161020a565b348015610246575f80fd5b506101b36105a5565b34801561025a575f80fd5b5061027a6102693660046114cf565b60036020525f908152604090205481565b60405190815260200161020a565b348015610293575f80fd5b5061027a6102a23660046114cf565b5f9081526020819052604090206001015490565b3480156102c1575f80fd5b506101b36102d0366004611501565b6105ba565b3480156102e0575f80fd5b506101b36102ef366004611501565b6105e6565b6101b361030236600461152b565b610691565b6101b361031536600461159a565b610960565b348015610325575f80fd5b5061027a6d53455155454e4345525f524f4c4560901b81565b348015610349575f80fd5b506101b36103583660046115db565b610a31565b348015610368575f80fd5b506101b36103773660046115f4565b610a44565b348015610387575f80fd5b506101b361039636600461169a565b610a57565b3480156103a6575f80fd5b5061027a6103b53660046114cf565b60046020525f908152604090205481565b3480156103d1575f80fd5b506002546001600160a01b03165b6040516001600160a01b03909116815260200161020a565b348015610402575f80fd5b506103df610aa6565b348015610416575f80fd5b506101fe610425366004611501565b610abe565b348015610435575f80fd5b5061043e610ae6565b6040805165ffffffffffff93841681529290911660208301520161020a565b348015610468575f80fd5b5061027a5f81565b34801561047b575f80fd5b5061027a61048a36600461174d565b610b38565b34801561049a575f80fd5b50610224610bcd565b3480156104ae575f80fd5b506101b3610c2b565b3480156104c2575f80fd5b506104cb610c6a565b604080516001600160a01b03909316835265ffffffffffff90911660208301520161020a565b3480156104fc575f80fd5b506101b361050b366004611501565b610c8b565b34801561051b575f80fd5b506101b3610cb3565b6101b3610532366004611501565b604080518381523460208201526001600160a01b038316915f917fe8a7ca8155e14d9cc8faeecec58a97268da95a2327cc892593748ce37cc6953f910160405180910390a35050565b5f6001600160e01b031982166318a4c3c360e11b148061059f575061059f82610cc5565b92915050565b5f6105af81610cf9565b6105b7610d03565b50565b816105d857604051631fe1e13d60e11b815260040160405180910390fd5b6105e28282610d0f565b5050565b8115801561060157506002546001600160a01b038281169116145b15610687575f80610610610c6a565b90925090506001600160a01b038216151580610632575065ffffffffffff8116155b8061064557504265ffffffffffff821610155b15610672576040516319ca5ebb60e01b815265ffffffffffff821660048201526024015b60405180910390fd5b50506001805465ffffffffffff60a01b191690555b6105e28282610d33565b345f5b8281101561095a575f8484838181106106af576106af611776565b90506080020160200160208101906106c791906115db565b6001600160a01b03160361077e578383828181106106e7576106e7611776565b90506080020160400160208101906106ff91906115db565b6001600160a01b03166108fc85858481811061071d5761071d611776565b9050608002016060013590811502906040515f60405180830381858888f1935050505015801561074f573d5f803e3d5ffd5b5083838281811061076257610762611776565b9050608002016060013582610777919061179e565b9150610875565b83838281811061079057610790611776565b90506080020160200160208101906107a891906115db565b6001600160a01b03166323b872dd338686858181106107c9576107c9611776565b90506080020160400160208101906107e191906115db565b8787868181106107f3576107f3611776565b6040516001600160e01b031960e088901b1681526001600160a01b039586166004820152949093166024850152506060608090920201013560448201526064016020604051808303815f875af115801561084f573d5f803e3d5ffd5b505050506040513d601f19601f8201168201806040525081019061087391906117b1565b505b83838281811061088757610887611776565b905060800201604001602081019061089f91906115db565b6001600160a01b03168484838181106108ba576108ba611776565b90506080020160200160208101906108d291906115db565b6001600160a01b03167fe93d7a771f81dc20f1d474f6868677269fdfa09830508e48edb0aa4d6569983386868581811061090e5761090e611776565b9050608002015f013587878681811061092957610929611776565b9050608002016060013560405161094a929190918252602082015260400190565b60405180910390a3600101610694565b50505050565b6040516323b872dd60e01b8152336004820152306024820152604481018290526001600160a01b038316906323b872dd906064016020604051808303815f875af11580156109b0573d5f803e3d5ffd5b505050506040513d601f19601f820116820180604052508101906109d491906117b1565b50826001600160a01b0316826001600160a01b03167fe8a7ca8155e14d9cc8faeecec58a97268da95a2327cc892593748ce37cc6953f8684604051610a23929190918252602082015260400190565b60405180910390a350505050565b5f610a3b81610cf9565b6105e282610d6b565b5f610a4e81610cf9565b6105e282610ddd565b610a648787878787610e4c565b7fc030727dea5440ebb1789967645e2595e4e67cf55821175a3f9f8b33aff41fa58282604051610a959291906117d0565b60405180910390a150505050505050565b5f610ab96002546001600160a01b031690565b905090565b5f918252602082815260408084206001600160a01b0393909316845291905290205460ff1690565b6002545f90600160d01b900465ffffffffffff168015158015610b1157504265ffffffffffff821610155b610b1c575f80610b30565b600254600160a01b900465ffffffffffff16815b915091509091565b81516020808401516060808601516040808801516080909801518151710696e6974342e73657175656e6365722e76360741b8188015246603282015260528101979097526072870194909452609286019190915260b28501969096521b6bffffffffffffffffffffffff191660d283015260e68083019390935283518083039093018352610106909101909252805191012090565b6002545f90600160d01b900465ffffffffffff168015158015610bf757504265ffffffffffff8216105b610c1257600154600160d01b900465ffffffffffff16610c25565b600254600160a01b900465ffffffffffff165b91505090565b5f610c34610c6a565b509050336001600160a01b03821614610c6257604051636116401160e11b8152336004820152602401610669565b6105b7611047565b6001546001600160a01b03811691600160a01b90910465ffffffffffff1690565b81610ca957604051631fe1e13d60e11b815260040160405180910390fd5b6105e282826110dd565b5f610cbd81610cf9565b6105b7611101565b5f6001600160e01b03198216637965db0b60e01b148061059f57506301ffc9a760e01b6001600160e01b031983161461059f565b6105b7813361110b565b610d0d5f80611144565b565b5f82815260208190526040902060010154610d2981610cf9565b61095a8383611203565b6001600160a01b0381163314610d5c5760405163334bd91960e11b815260040160405180910390fd5b610d668282611270565b505050565b5f610d74610bcd565b610d7d426112ac565b610d8791906117fe565b9050610d9382826112e2565b60405165ffffffffffff821681526001600160a01b038316907f3377dc44241e779dd06afab5b788a35ca5f3b778836e2990bdb26a2a4b2e5ed69060200160405180910390a25050565b5f610de78261135f565b610df0426112ac565b610dfa91906117fe565b9050610e068282611144565b6040805165ffffffffffff8085168252831660208201527ff1038c18cf84a56e432fdbfaf746924b7ea511dfe03a6506a0ceba4888788d9b910160405180910390a15050565b84515f90815260036020526040812080549082610e6883611824565b91905055905085602001518114610e9557604051635f64988d60e11b815260048101829052602401610669565b8560400151421115610eba576040516378fd448d60e01b815260040160405180910390fd5b5f610ec58787610b38565b604080515f8082526020820180845284905260ff89169282019290925260608101879052608081018690529192509060019060a0016020604051602081039080840390855afa158015610f1a573d5f803e3d5ffd5b505050602060405103519050610f416d53455155454e4345525f524f4c4560901b82610abe565b610f6957604051639a7d38d960e01b81526001600160a01b0382166004820152602401610669565b87515f90815260046020526040902054439003610f9957604051632ce0494b60e01b815260040160405180910390fd5b87515f908152600460205260409081902043905551610ff2908990815181526020808301519082015260408083015190820152606080830151908201526080918201516001600160a01b03169181019190915260a00190565b6040518091039020816001600160a01b03167f9c5702b5639f451bda4f9dba7fdf9d125a675ccddd315b81ce962d3ddd986a238960405161103591815260200190565b60405180910390a35050505050505050565b5f80611051610c6a565b915091506110668165ffffffffffff16151590565b158061107a57504265ffffffffffff821610155b156110a2576040516319ca5ebb60e01b815265ffffffffffff82166004820152602401610669565b6110bd5f6110b86002546001600160a01b031690565b611270565b506110c85f83611203565b5050600180546001600160d01b031916905550565b5f828152602081905260409020600101546110f781610cf9565b61095a8383611270565b610d0d5f806112e2565b6111158282610abe565b6105e25760405163e2517d3f60e01b81526001600160a01b038216600482015260248101839052604401610669565b600254600160d01b900465ffffffffffff1680156111c6574265ffffffffffff8216101561119d57600254600180546001600160d01b0316600160a01b90920465ffffffffffff16600160d01b029190911790556111c6565b6040517f2b1fa2edafe6f7b9e97c1a9e0c3660e645beb2dcaa2d45bdbf9beaf5472e1ec5905f90a15b50600280546001600160a01b0316600160a01b65ffffffffffff948516026001600160d01b031617600160d01b9290931691909102919091179055565b5f8261125f575f61121c6002546001600160a01b031690565b6001600160a01b03161461124357604051631fe1e13d60e11b815260040160405180910390fd5b600280546001600160a01b0319166001600160a01b0384161790555b61126983836113b0565b9392505050565b5f8215801561128c57506002546001600160a01b038381169116145b156112a257600280546001600160a01b03191690555b611269838361143f565b5f65ffffffffffff8211156112de576040516306dfcc6560e41b81526030600482015260248101839052604401610669565b5090565b5f6112eb610c6a565b6001805465ffffffffffff8616600160a01b026001600160d01b03199091166001600160a01b03881617179055915061132d90508165ffffffffffff16151590565b15610d66576040517f8886ebfc4259abdbc16601dd8fb5678e54878f47b3c34836cfc51154a9605109905f90a1505050565b5f80611369610bcd565b90508065ffffffffffff168365ffffffffffff16116113915761138c838261183c565b611269565b61126965ffffffffffff8416620697805f828218828410028218611269565b5f6113bb8383610abe565b611438575f838152602081815260408083206001600160a01b03861684529091529020805460ff191660011790556113f03390565b6001600160a01b0316826001600160a01b0316847f2f8788117e7eff1d82e926ec794901d17c78024a50270940304540a733656f0d60405160405180910390a450600161059f565b505f61059f565b5f61144a8383610abe565b15611438575f838152602081815260408083206001600160a01b0386168085529252808320805460ff1916905551339286917ff6391f5c32d9c69d2a47ea670b442974b53935d1edc7fd64eb21e047a839171b9190a450600161059f565b5f602082840312156114b8575f80fd5b81356001600160e01b031981168114611269575f80fd5b5f602082840312156114df575f80fd5b5035919050565b80356001600160a01b03811681146114fc575f80fd5b919050565b5f8060408385031215611512575f80fd5b82359150611522602084016114e6565b90509250929050565b5f806020838503121561153c575f80fd5b823567ffffffffffffffff80821115611553575f80fd5b818501915085601f830112611566575f80fd5b813581811115611574575f80fd5b8660208260071b8501011115611588575f80fd5b60209290920196919550909350505050565b5f805f80608085870312156115ad575f80fd5b843593506115bd602086016114e6565b92506115cb604086016114e6565b9396929550929360600135925050565b5f602082840312156115eb575f80fd5b611269826114e6565b5f60208284031215611604575f80fd5b813565ffffffffffff81168114611269575f80fd5b5f60a08284031215611629575f80fd5b60405160a0810181811067ffffffffffffffff8211171561165857634e487b7160e01b5f52604160045260245ffd5b80604052508091508235815260208301356020820152604083013560408201526060830135606082015261168e608084016114e6565b60808201525092915050565b5f805f805f805f610140888a0312156116b1575f80fd5b6116bb8989611619565b965060a0880135955060c088013560ff811681146116d7575f80fd5b945060e08801359350610100880135925061012088013567ffffffffffffffff80821115611703575f80fd5b818a0191508a601f830112611716575f80fd5b813581811115611724575f80fd5b8b6020828501011115611735575f80fd5b60208301945080935050505092959891949750929550565b5f8060c0838503121561175e575f80fd5b6117688484611619565b9460a0939093013593505050565b634e487b7160e01b5f52603260045260245ffd5b634e487b7160e01b5f52601160045260245ffd5b8181038181111561059f5761059f61178a565b5f602082840312156117c1575f80fd5b81518015158114611269575f80fd5b60208152816020820152818360408301375f818301604090810191909152601f909201601f19160101919050565b65ffffffffffff81811683821601908082111561181d5761181d61178a565b5092915050565b5f600182016118355761183561178a565b5060010190565b65ffffffffffff82811682821603908082111561181d5761181d61178a56fea2646970667358221220111de8e40c8e2761ed9ab04f385dfef1dffcd646c5a270f4fc3dc0858a0d605764736f6c63430008190033","sourceMap":"281:7248:35:-:0;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;2632:39:34;2638:20;2660:10;2632:5;:39::i;:::-;281:7248:35;;2447:39:34;2453:20;2475:10;2447:5;:39::i;2667:219:23:-;;;;;;;;;;-1:-1:-1;2667:219:23;;;;;:::i;:::-;;:::i;:::-;;;470:14:38;;463:22;445:41;;433:2;418:18;2667:219:23;;;;;;;;7766:108;;;;;;;;;;-1:-1:-1;7861:6:23;7766:108;;;671:14:38;659:27;;;641:46;;629:2;614:18;7766:108:23;497:196:38;10927:126:23;;;;;;;;;;;;;:::i;1478:47:35:-;;;;;;;;;;-1:-1:-1;1478:47:35;;;;;:::i;:::-;;;;;;;;;;;;;;;;;1029:25:38;;;1017:2;1002:18;1478:47:35;883:177:38;3810:120:21;;;;;;;;;;-1:-1:-1;3810:120:21;;;;;:::i;:::-;3875:7;3901:12;;;;;;;;;;:22;;;;3810:120;3198:265:23;;;;;;;;;;-1:-1:-1;3198:265:23;;;;;:::i;:::-;;:::i;4515:566::-;;;;;;;;;;-1:-1:-1;4515:566:23;;;;;:::i;:::-;;:::i;5794:881:34:-;;;;;;:::i;:::-;;:::i;3733:254::-;;;;;;:::i;:::-;;:::i;1256:66:35:-;;;;;;;;;;;;-1:-1:-1;;;1256:66:35;;8068:150:23;;;;;;;;;;-1:-1:-1;8068:150:23;;;;;:::i;:::-;;:::i;10296:145::-;;;;;;;;;;-1:-1:-1;10296:145:23;;;;;:::i;:::-;;:::i;5410:287:35:-;;;;;;;;;;-1:-1:-1;5410:287:35;;;;;:::i;:::-;;:::i;1708:55::-;;;;;;;;;;-1:-1:-1;1708:55:35;;;;;:::i;:::-;;;;;;;;;;;;;;6707:106:23;;;;;;;;;;-1:-1:-1;6786:20:23;;-1:-1:-1;;;;;6786:20:23;6707:106;;;-1:-1:-1;;;;;5436:32:38;;;5418:51;;5406:2;5391:18;6707:106:23;5272:203:38;2942:93:23;;;;;;;;;;;;;:::i;2854:136:21:-;;;;;;;;;;-1:-1:-1;2854:136:21;;;;;:::i;:::-;;:::i;7432:261:23:-;;;;;;;;;;;;;:::i;:::-;;;;5660:14:38;5701:15;;;5683:34;;5753:15;;;;5748:2;5733:18;;5726:43;5623:18;7432:261:23;5480:295:38;2187:49:21;;;;;;;;;;-1:-1:-1;2187:49:21;2232:4;2187:49;;7068:459:35;;;;;;;;;;-1:-1:-1;7068:459:35;;;;;:::i;:::-;;:::i;7130:229:23:-;;;;;;;;;;;;;:::i;9146:344::-;;;;;;;;;;;;;:::i;6886:171::-;;;;;;;;;;;;;:::i;:::-;;;;-1:-1:-1;;;;;6281:32:38;;;6263:51;;6362:14;6350:27;;;6345:2;6330:18;;6323:55;6236:18;6886:171:23;6091:293:38;3563:267:23;;;;;;;;;;-1:-1:-1;3563:267:23;;;;;:::i;:::-;;:::i;8706:128::-;;;;;;;;;;;;;:::i;3056:160:34:-;;;;;;:::i;:::-;3149:60;;;6822:25:38;;;3199:9:34;6878:2:38;6863:18;;6856:34;-1:-1:-1;;;;;3149:60:34;;;3178:1;;3149:60;;6795:18:38;3149:60:34;;;;;;;3056:160;;:::o;2667:219:23:-;2752:4;-1:-1:-1;;;;;;2775:64:23;;-1:-1:-1;;;2775:64:23;;:104;;;2843:36;2867:11;2843:23;:36::i;:::-;2768:111;2667:219;-1:-1:-1;;2667:219:23:o;10927:126::-;2232:4:21;2464:16;2232:4;2464:10;:16::i;:::-;11018:28:23::1;:26;:28::i;:::-;10927:126:::0;:::o;3198:265::-;3317:4;3313:104;;3366:40;;-1:-1:-1;;;3366:40:23;;;;;;;;;;;3313:104;3426:30;3442:4;3448:7;3426:15;:30::i;:::-;3198:265;;:::o;4515:566::-;4637:26;;:55;;;;-1:-1:-1;6786:20:23;;-1:-1:-1;;;;;4667:25:23;;;6786:20;;4667:25;4637:55;4633:399;;;4709:23;4734:15;4753:21;:19;:21::i;:::-;4708:66;;-1:-1:-1;4708:66:23;-1:-1:-1;;;;;;4792:29:23;;;;;:58;;-1:-1:-1;14557:13:23;;;;4792:58;:91;;;-1:-1:-1;14785:15:23;14774:26;;;;4854:29;4792:91;4788:185;;;4910:48;;-1:-1:-1;;;4910:48:23;;671:14:38;659:27;;4910:48:23;;;641:46:38;614:18;;4910:48:23;;;;;;;;4788:185;-1:-1:-1;;4993:28:23;4986:35;;-1:-1:-1;;;;4986:35:23;;;4633:399;5041:33;5060:4;5066:7;5041:18;:33::i;5794:881:34:-;5895:9;5872:20;5914:755;5934:17;;;5914:755;;;6033:1;6006:6;;6013:1;6006:9;;;;;;;:::i;:::-;;;;;;:15;;;;;;;;;;:::i;:::-;-1:-1:-1;;;;;6006:29:34;;6002:527;;6121:6;;6128:1;6121:9;;;;;;;:::i;:::-;;;;;;:19;;;;;;;;;;:::i;:::-;-1:-1:-1;;;;;6113:37:34;:55;6151:6;;6158:1;6151:9;;;;;;;:::i;:::-;;;;;;:16;;;6113:55;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;6320:6;;6327:1;6320:9;;;;;;;:::i;:::-;;;;;;:16;;;6304:32;;;;;:::i;:::-;;;6002:527;;;6434:6;;6441:1;6434:9;;;;;;;:::i;:::-;;;;;;:15;;;;;;;;;;:::i;:::-;-1:-1:-1;;;;;6427:36:34;;6464:10;6476:6;;6483:1;6476:9;;;;;;;:::i;:::-;;;;;;:19;;;;;;;;;;:::i;:::-;6497:6;;6504:1;6497:9;;;;;;;:::i;:::-;6427:87;;-1:-1:-1;;;;;;6427:87:34;;;;;;;-1:-1:-1;;;;;7556:15:38;;;6427:87:34;;;7538:34:38;7608:15;;;;7588:18;;;7581:43;-1:-1:-1;6497:16:34;:9;;;;;:16;;7640:18:38;;;7633:34;7473:18;;6427:87:34;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;:::i;:::-;;6002:527;6620:6;;6627:1;6620:9;;;;;;;:::i;:::-;;;;;;:19;;;;;;;;;;:::i;:::-;-1:-1:-1;;;;;6567:91:34;6603:6;;6610:1;6603:9;;;;;;;:::i;:::-;;;;;;:15;;;;;;;;;;:::i;:::-;-1:-1:-1;;;;;6567:91:34;;6578:6;;6585:1;6578:9;;;;;;;:::i;:::-;;;;;;:23;;;6641:6;;6648:1;6641:9;;;;;;;:::i;:::-;;;;;;:16;;;6567:91;;;;;;6822:25:38;;;6878:2;6863:18;;6856:34;6810:2;6795:18;;6648:248;6567:91:34;;;;;;;;5953:3;;5914:755;;;;5862:813;5794:881;;:::o;3733:254::-;3852:61;;-1:-1:-1;;;3852:61:34;;3879:10;3852:61;;;7538:34:38;3899:4:34;7588:18:38;;;7581:43;7640:18;;;7633:34;;;-1:-1:-1;;;;;3852:26:34;;;;;7473:18:38;;3852:61:34;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;:::i;:::-;;3956:15;-1:-1:-1;;;;;3928:52:34;3949:5;-1:-1:-1;;;;;3928:52:34;;3934:13;3973:6;3928:52;;;;;;6822:25:38;;;6878:2;6863:18;;6856:34;6810:2;6795:18;;6648:248;3928:52:34;;;;;;;;3733:254;;;;:::o;8068:150:23:-;2232:4:21;2464:16;2232:4;2464:10;:16::i;:::-;8175:36:23::1;8202:8;8175:26;:36::i;10296:145::-:0;2232:4:21;2464:16;2232:4;2464:10;:16::i;:::-;10400:34:23::1;10425:8;10400:24;:34::i;5410:287:35:-:0;5611:44;5624:6;5632:13;5647:1;5650;5653;5611:12;:44::i;:::-;5670:20;5680:9;;5670:20;;;;;;;:::i;:::-;;;;;;;;5410:287;;;;;;;:::o;2942:93:23:-;2988:7;3014:14;6786:20;;-1:-1:-1;;;;;6786:20:23;;6707:106;3014:14;3007:21;;2942:93;:::o;2854:136:21:-;2931:4;2954:12;;;;;;;;;;;-1:-1:-1;;;;;2954:29:21;;;;;;;;;;;;;;;2854:136::o;7432:261:23:-;7552:21;;7497:15;;-1:-1:-1;;;7552:21:23;;;;14557:13;;;7591:57;;;;-1:-1:-1;14785:15:23;14774:26;;;;7619:29;7591:57;7590:96;;7681:1;7684;7590:96;;;7653:13;;-1:-1:-1;;;7653:13:23;;;;7668:8;7590:96;7583:103;;;;7432:261;;:::o;7068:459:35:-;7304:20;;7338:15;;;;;7367;;;;;7396:16;;;;;7426:20;;;;;7213:270;;-1:-1:-1;;;7213:270:35;;;8751:33:38;7277:13:35;8800:12:38;;;8793:28;8837:12;;;8830:28;;;;8874:12;;;8867:28;;;;8911:13;;;8904:29;;;;8949:13;;;8942:29;;;;9006:15;-1:-1:-1;;9002:53:38;8987:13;;;8980:76;9072:13;;;;9065:29;;;;7213:270:35;;;;;;;;;;9110:13:38;;;;7213:270:35;;;7502:18;;;;;;7068:459::o;7130:229:23:-;7224:21;;7188:6;;-1:-1:-1;;;7224:21:23;;;;14557:13;;;7263:56;;;;-1:-1:-1;14785:15:23;14774:26;;;;7291:28;7262:90;;7339:13;;-1:-1:-1;;;7339:13:23;;;;7262:90;;;7323:13;;-1:-1:-1;;;7323:13:23;;;;7262:90;7255:97;;;7130:229;:::o;9146:344::-;9210:23;9239:21;:19;:21::i;:::-;-1:-1:-1;9209:51:23;-1:-1:-1;735:10:27;-1:-1:-1;;;;;9274:31:23;;;9270:175;;9388:46;;-1:-1:-1;;;9388:46:23;;735:10:27;9388:46:23;;;5418:51:38;5391:18;;9388:46:23;5272:203:38;9270:175:23;9454:29;:27;:29::i;6886:171::-;6999:20;;-1:-1:-1;;;;;6999:20:23;;;-1:-1:-1;;;7021:28:23;;;;;;6886:171::o;3563:267::-;3683:4;3679:104;;3732:40;;-1:-1:-1;;;3732:40:23;;;;;;;;;;;3679:104;3792:31;3809:4;3815:7;3792:16;:31::i;8706:128::-;2232:4:21;2464:16;2232:4;2464:10;:16::i;:::-;8798:29:23::1;:27;:29::i;2565:202:21:-:0;2650:4;-1:-1:-1;;;;;;2673:47:21;;-1:-1:-1;;;2673:47:21;;:87;;-1:-1:-1;;;;;;;;;;862:40:29;;;2724:36:21;763:146:29;3199:103:21;3265:30;3276:4;735:10:27;3265::21;:30::i;11180:94:23:-;11245:22;11262:1;11265;11245:16;:22::i;:::-;11180:94::o;4226:136:21:-;3875:7;3901:12;;;;;;;;;;:22;;;2464:16;2475:4;2464:10;:16::i;:::-;4330:25:::1;4341:4;4347:7;4330:10;:25::i;5328:245::-:0;-1:-1:-1;;;;;5421:34:21;;735:10:27;5421:34:21;5417:102;;5478:30;;-1:-1:-1;;;5478:30:21;;;;;;;;;;;5417:102;5529:37;5541:4;5547:18;5529:11;:37::i;:::-;;5328:245;;:::o;8345:288:23:-;8426:18;8484:19;:17;:19::i;:::-;8447:34;8465:15;8447:17;:34::i;:::-;:56;;;;:::i;:::-;8426:77;;8513:46;8537:8;8547:11;8513:23;:46::i;:::-;8574:52;;671:14:38;659:27;;641:46;;-1:-1:-1;;;;;8574:52:23;;;;;629:2:38;614:18;8574:52:23;;;;;;;8416:217;8345:288;:::o;10566:::-;10644:18;10702:26;10719:8;10702:16;:26::i;:::-;10665:34;10683:15;10665:17;:34::i;:::-;:63;;;;:::i;:::-;10644:84;;10738:39;10755:8;10765:11;10738:16;:39::i;:::-;10792:55;;;5660:14:38;5701:15;;;5683:34;;5753:15;;5748:2;5733:18;;5726:43;10792:55:23;;5623:18:38;10792:55:23;;;;;;;10634:220;10566:288;:::o;5703:1152:35:-;5931:20;;5894:21;5918:34;;;:12;:34;;;;;:36;;;5894:21;5918:36;;;:::i;:::-;;;;;5894:60;;5985:6;:15;;;5968:13;:32;5964:71;;6009:26;;-1:-1:-1;;;6009:26:35;;;;;1029:25:38;;;1002:18;;6009:26:35;883:177:38;5964:71:35;6121:6;:16;;;6103:15;:34;6099:61;;;6146:14;;-1:-1:-1;;;6146:14:35;;;;;;;;;;;6099:61;6232:19;6254:38;6270:6;6278:13;6254:15;:38::i;:::-;6322:31;;;6302:17;6322:31;;;;;;;;;9682:25:38;;;9755:4;9743:17;;9723:18;;;9716:45;;;;9777:18;;;9770:34;;;9820:18;;;9813:34;;;6232:60:35;;-1:-1:-1;6302:17:35;6322:31;;9654:19:38;;6322:31:35;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;6302:51;;6440:34;-1:-1:-1;;;6464:9:35;6440:7;:34::i;:::-;6435:71;;6483:23;;-1:-1:-1;;;6483:23:35;;-1:-1:-1;;;;;5436:32:38;;6483:23:35;;;5418:51:38;5391:18;;6483:23:35;5272:203:38;6435:71:35;6621:20;;6600:42;;;;:20;:42;;;;;;6646:12;6600:58;;6596:99;;6667:28;;-1:-1:-1;;;6667:28:35;;;;;;;;;;;6596:99;6726:20;;6705:42;;;;:20;:42;;;;;;;6750:12;6705:57;;6800:48;;;6726:6;;10059:13:38;;10047:26;;10123:4;10111:17;;;10105:24;10089:14;;;10082:48;10178:2;10166:15;;;10160:22;10146:12;;;10139:44;10231:2;10219:15;;;10213:22;10199:12;;;10192:44;10289:3;10277:16;;;10271:23;-1:-1:-1;;;;;10267:49:38;10252:13;;;10245:72;;;;10304:3;10333:13;;9858:494;6800:48:35;;;;;;;;6815:9;-1:-1:-1;;;;;6800:48:35;;6834:13;6800:48;;;;1029:25:38;;1017:2;1002:18;;883:177;6800:48:35;;;;;;;;5815:1040;;;5703:1152;;;;;:::o;9618:474:23:-;9685:16;9703:15;9722:21;:19;:21::i;:::-;9684:59;;;;9758:24;9773:8;14557:13;;;;;14471:106;9758:24;9757:25;:58;;;-1:-1:-1;14785:15:23;14774:26;;;;9786:29;9757:58;9753:144;;;9838:48;;-1:-1:-1;;;9838:48:23;;671:14:38;659:27;;9838:48:23;;;641:46:38;614:18;;9838:48:23;497:196:38;9753:144:23;9906:47;2232:4:21;9938:14:23;6786:20;;-1:-1:-1;;;;;6786:20:23;;6707:106;9938:14;9906:11;:47::i;:::-;-1:-1:-1;9963:40:23;2232:4:21;9994:8:23;9963:10;:40::i;:::-;-1:-1:-1;;10020:20:23;10013:27;;-1:-1:-1;;;;;;10050:35:23;;;-1:-1:-1;9618:474:23:o;4642:138:21:-;3875:7;3901:12;;;;;;;;;;:22;;;2464:16;2475:4;2464:10;:16::i;:::-;4747:26:::1;4759:4;4765:7;4747:11;:26::i;8962:111:23:-:0;9028:38;9060:1;9064;9028:23;:38::i;3432:197:21:-;3520:22;3528:4;3534:7;3520;:22::i;:::-;3515:108;;3565:47;;-1:-1:-1;;;3565:47:21;;-1:-1:-1;;;;;10549:32:38;;3565:47:21;;;10531:51:38;10598:18;;;10591:34;;;10504:18;;3565:47:21;10357:274:38;13741:585:23;13843:21;;-1:-1:-1;;;13843:21:23;;;;14557:13;;13875:365;;14785:15;14774:26;;;;13922:308;;;14040:13;;14024;:29;;-1:-1:-1;;;;;14024:29:23;-1:-1:-1;;;14040:13:23;;;;;-1:-1:-1;;;14024:29:23;;;;;;;13922:308;;;14182:33;;;;;;;13922:308;-1:-1:-1;14250:13:23;:24;;-1:-1:-1;;;;;14284:35:23;-1:-1:-1;;;14250:24:23;;;;;-1:-1:-1;;;;;14284:35:23;;-1:-1:-1;;;14284:35:23;;;;;;;;;;;;;;13741:585::o;5509:370::-;5595:4;5615;5611:214;;5687:1;5661:14;6786:20;;-1:-1:-1;;;;;6786:20:23;;6707:106;5661:14;-1:-1:-1;;;;;5661:28:23;;5657:114;;5716:40;;-1:-1:-1;;;5716:40:23;;;;;;;;;;;5657:114;5784:20;:30;;-1:-1:-1;;;;;;5784:30:23;-1:-1:-1;;;;;5784:30:23;;;;;5611:214;5841:31;5858:4;5864:7;5841:16;:31::i;:::-;5834:38;5509:370;-1:-1:-1;;;5509:370:23:o;5946:271::-;6033:4;6053:26;;:55;;;;-1:-1:-1;6786:20:23;;-1:-1:-1;;;;;6083:25:23;;;6786:20;;6083:25;6053:55;6049:113;;;6131:20;6124:27;;-1:-1:-1;;;;;;6124:27:23;;;6049:113;6178:32;6196:4;6202:7;6178:17;:32::i;14296:213:32:-;14352:6;14382:16;14374:24;;14370:103;;;14421:41;;-1:-1:-1;;;14421:41:32;;14452:2;14421:41;;;10817:36:38;10869:18;;;10862:34;;;10790:18;;14421:41:32;10636:266:38;14370:103:32;-1:-1:-1;14496:5:32;14296:213::o;13062:525:23:-;13154:18;13176:21;:19;:21::i;:::-;13208:20;:31;;13249:42;;;-1:-1:-1;;;13249:42:23;-1:-1:-1;;;;;;13249:42:23;;;-1:-1:-1;;;;;13208:31:23;;13249:42;;;;13151:46;-1:-1:-1;13403:27:23;;-1:-1:-1;13151:46:23;14557:13;;;;;14471:106;13403:27;13399:182;;;13540:30;;;;;;;13141:446;13062:525;;:::o;11621:1249::-;11695:6;11713:19;11735;:17;:19::i;:::-;11713:41;;12684:12;12673:23;;:8;:23;;;:190;;12840:23;12855:8;12840:12;:23;:::i;:::-;12673:190;;;12722:51;;;;7861:6;3429:7:31;3066:5;;;3463;;;3065:36;3060:42;;3455:20;2825:294;6179:316:21;6256:4;6277:22;6285:4;6291:7;6277;:22::i;:::-;6272:217;;6315:6;:12;;;;;;;;;;;-1:-1:-1;;;;;6315:29:21;;;;;;;;;:36;;-1:-1:-1;;6315:36:21;6347:4;6315:36;;;6397:12;735:10:27;;656:96;6397:12:21;-1:-1:-1;;;;;6370:40:21;6388:7;-1:-1:-1;;;;;6370:40:21;6382:4;6370:40;;;;;;;;;;-1:-1:-1;6431:4:21;6424:11;;6272:217;-1:-1:-1;6473:5:21;6466:12;;6730:317;6808:4;6828:22;6836:4;6842:7;6828;:22::i;:::-;6824:217;;;6898:5;6866:12;;;;;;;;;;;-1:-1:-1;;;;;6866:29:21;;;;;;;;;;:37;;-1:-1:-1;;6866:37:21;;;6922:40;735:10:27;;6866:12:21;;6922:40;;6898:5;6922:40;-1:-1:-1;6983:4:21;6976:11;;14:286:38;72:6;125:2;113:9;104:7;100:23;96:32;93:52;;;141:1;138;131:12;93:52;167:23;;-1:-1:-1;;;;;;219:32:38;;209:43;;199:71;;266:1;263;256:12;698:180;757:6;810:2;798:9;789:7;785:23;781:32;778:52;;;826:1;823;816:12;778:52;-1:-1:-1;849:23:38;;698:180;-1:-1:-1;698:180:38:o;1432:173::-;1500:20;;-1:-1:-1;;;;;1549:31:38;;1539:42;;1529:70;;1595:1;1592;1585:12;1529:70;1432:173;;;:::o;1610:254::-;1678:6;1686;1739:2;1727:9;1718:7;1714:23;1710:32;1707:52;;;1755:1;1752;1745:12;1707:52;1791:9;1778:23;1768:33;;1820:38;1854:2;1843:9;1839:18;1820:38;:::i;:::-;1810:48;;1610:254;;;;;:::o;1869:645::-;1985:6;1993;2046:2;2034:9;2025:7;2021:23;2017:32;2014:52;;;2062:1;2059;2052:12;2014:52;2102:9;2089:23;2131:18;2172:2;2164:6;2161:14;2158:34;;;2188:1;2185;2178:12;2158:34;2226:6;2215:9;2211:22;2201:32;;2271:7;2264:4;2260:2;2256:13;2252:27;2242:55;;2293:1;2290;2283:12;2242:55;2333:2;2320:16;2359:2;2351:6;2348:14;2345:34;;;2375:1;2372;2365:12;2345:34;2428:7;2423:2;2413:6;2410:1;2406:14;2402:2;2398:23;2394:32;2391:45;2388:65;;;2449:1;2446;2439:12;2388:65;2480:2;2472:11;;;;;2502:6;;-1:-1:-1;1869:645:38;;-1:-1:-1;;;;1869:645:38:o;2519:397::-;2605:6;2613;2621;2629;2682:3;2670:9;2661:7;2657:23;2653:33;2650:53;;;2699:1;2696;2689:12;2650:53;2735:9;2722:23;2712:33;;2764:38;2798:2;2787:9;2783:18;2764:38;:::i;:::-;2754:48;;2821:38;2855:2;2844:9;2840:18;2821:38;:::i;:::-;2519:397;;;;-1:-1:-1;2811:48:38;;2906:2;2891:18;2878:32;;-1:-1:-1;;2519:397:38:o;2921:186::-;2980:6;3033:2;3021:9;3012:7;3008:23;3004:32;3001:52;;;3049:1;3046;3039:12;3001:52;3072:29;3091:9;3072:29;:::i;3112:280::-;3170:6;3223:2;3211:9;3202:7;3198:23;3194:32;3191:52;;;3239:1;3236;3229:12;3191:52;3278:9;3265:23;3328:14;3321:5;3317:26;3310:5;3307:37;3297:65;;3358:1;3355;3348:12;3397:779;3455:5;3503:4;3491:9;3486:3;3482:19;3478:30;3475:50;;;3521:1;3518;3511:12;3475:50;3554:2;3548:9;3596:4;3588:6;3584:17;3667:6;3655:10;3652:22;3631:18;3619:10;3616:34;3613:62;3610:185;;;3717:10;3712:3;3708:20;3705:1;3698:31;3752:4;3749:1;3742:15;3780:4;3777:1;3770:15;3610:185;3815:10;3811:2;3804:22;;3844:6;3835:15;;3887:9;3874:23;3866:6;3859:39;3959:2;3948:9;3944:18;3931:32;3926:2;3918:6;3914:15;3907:57;4025:2;4014:9;4010:18;3997:32;3992:2;3984:6;3980:15;3973:57;4091:2;4080:9;4076:18;4063:32;4058:2;4050:6;4046:15;4039:57;4130:39;4164:3;4153:9;4149:19;4130:39;:::i;:::-;4124:3;4116:6;4112:16;4105:65;;3397:779;;;;:::o;4181:1086::-;4324:6;4332;4340;4348;4356;4364;4372;4425:3;4413:9;4404:7;4400:23;4396:33;4393:53;;;4442:1;4439;4432:12;4393:53;4465:49;4506:7;4495:9;4465:49;:::i;:::-;4455:59;;4561:3;4550:9;4546:19;4533:33;4523:43;;4616:3;4605:9;4601:19;4588:33;4661:4;4654:5;4650:16;4643:5;4640:27;4630:55;;4681:1;4678;4671:12;4630:55;4704:5;-1:-1:-1;4756:3:38;4741:19;;4728:33;;-1:-1:-1;4808:3:38;4793:19;;4780:33;;-1:-1:-1;4864:3:38;4849:19;;4836:33;4888:18;4918:14;;;4915:34;;;4945:1;4942;4935:12;4915:34;4983:6;4972:9;4968:22;4958:32;;5028:7;5021:4;5017:2;5013:13;5009:27;4999:55;;5050:1;5047;5040:12;4999:55;5090:2;5077:16;5116:2;5108:6;5105:14;5102:34;;;5132:1;5129;5122:12;5102:34;5179:7;5172:4;5163:6;5159:2;5155:15;5151:26;5148:39;5145:59;;;5200:1;5197;5190:12;5145:59;5231:4;5227:2;5223:13;5213:23;;5255:6;5245:16;;;;;4181:1086;;;;;;;;;;:::o;5780:306::-;5878:6;5886;5939:3;5927:9;5918:7;5914:23;5910:33;5907:53;;;5956:1;5953;5946:12;5907:53;5979:49;6020:7;6009:9;5979:49;:::i;:::-;5969:59;6075:3;6060:19;;;;6047:33;;-1:-1:-1;;;5780:306:38:o;6901:127::-;6962:10;6957:3;6953:20;6950:1;6943:31;6993:4;6990:1;6983:15;7017:4;7014:1;7007:15;7033:127;7094:10;7089:3;7085:20;7082:1;7075:31;7125:4;7122:1;7115:15;7149:4;7146:1;7139:15;7165:128;7232:9;;;7253:11;;;7250:37;;;7267:18;;:::i;7678:277::-;7745:6;7798:2;7786:9;7777:7;7773:23;7769:32;7766:52;;;7814:1;7811;7804:12;7766:52;7846:9;7840:16;7899:5;7892:13;7885:21;7878:5;7875:32;7865:60;;7921:1;7918;7911:12;7960:388;8117:2;8106:9;8099:21;8156:6;8151:2;8140:9;8136:18;8129:34;8213:6;8205;8200:2;8189:9;8185:18;8172:48;8269:1;8240:22;;;8264:2;8236:31;;;8229:42;;;;8332:2;8311:15;;;-1:-1:-1;;8307:29:38;8292:45;8288:54;;7960:388;-1:-1:-1;7960:388:38:o;9134:176::-;9201:14;9235:10;;;9247;;;9231:27;;9270:11;;;9267:37;;;9284:18;;:::i;:::-;9267:37;9134:176;;;;:::o;9315:135::-;9354:3;9375:17;;;9372:43;;9395:18;;:::i;:::-;-1:-1:-1;9442:1:38;9431:13;;9315:135::o;10907:179::-;10975:14;11022:10;;;11010;;;11006:27;;11045:12;;;11042:38;;;11060:18;;:::i","linkReferences":{},"immutableReferences":{"49726":[{"start":398,"length":32},{"start":442,"length":32}]}},"methodIdentifiers":{"DEFAULT_ADMIN_ROLE()":"a217fddf","SEQUENCER_ROLE()":"4842855c","acceptDefaultAdminTransfer()":"cefc1429","beginDefaultAdminTransfer(address)":"634e93da","blockCommitment((uint256,uint256,uint256,uint256,address),bytes32)":"c7bc4a62","cancelDefaultAdminTransfer()":"d602b9fd","changeDefaultAdminDelay(uint48)":"649a5ec7","defaultAdmin()":"84ef8ffc","defaultAdminDelay()":"cc8463c8","defaultAdminDelayIncreaseWait()":"022d63fb","enter(uint256,address)":"ea3b9ba1","enter(uint256,address,address,uint256)":"3805c6bd","fulfillExits((uint256,address,address,uint256)[])":"36702119","getRoleAdmin(bytes32)":"248a9ca3","grantRole(bytes32,address)":"2f2ff15d","hasRole(bytes32,address)":"91d14854","lastSubmittedAtBlock(uint256)":"7e82bb01","nextSequence(uint256)":"1e663720","owner()":"8da5cb5b","pendingDefaultAdmin()":"cf6eefb7","pendingDefaultAdminDelay()":"a1eda53c","renounceRole(bytes32,address)":"36568abe","revokeRole(bytes32,address)":"d547741f","rollbackDefaultAdminDelay()":"0aa6220b","submitBlock((uint256,uint256,uint256,uint256,address),bytes32,uint8,bytes32,bytes32,bytes)":"7e569274","supportsInterface(bytes4)":"01ffc9a7"},"rawMetadata":"{\"compiler\":{\"version\":\"0.8.25+commit.b61c2a91\"},\"language\":\"Solidity\",\"output\":{\"abi\":[{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"defaultRollupChainId\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"admin\",\"type\":\"address\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[],\"name\":\"AccessControlBadConfirmation\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"uint48\",\"name\":\"schedule\",\"type\":\"uint48\"}],\"name\":\"AccessControlEnforcedDefaultAdminDelay\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"AccessControlEnforcedDefaultAdminRules\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"defaultAdmin\",\"type\":\"address\"}],\"name\":\"AccessControlInvalidDefaultAdmin\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"},{\"internalType\":\"bytes32\",\"name\":\"neededRole\",\"type\":\"bytes32\"}],\"name\":\"AccessControlUnauthorizedAccount\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"expected\",\"type\":\"uint256\"}],\"name\":\"BadSequence\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"derivedSequencer\",\"type\":\"address\"}],\"name\":\"BadSignature\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"BlockExpired\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OneRollupBlockPerHostBlock\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OrderExpired\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"uint8\",\"name\":\"bits\",\"type\":\"uint8\"},{\"internalType\":\"uint256\",\"name\":\"value\",\"type\":\"uint256\"}],\"name\":\"SafeCastOverflowedUintDowncast\",\"type\":\"error\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"blockData\",\"type\":\"bytes\"}],\"name\":\"BlockData\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"sequencer\",\"type\":\"address\"},{\"components\":[{\"internalType\":\"uint256\",\"name\":\"rollupChainId\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"sequence\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"confirmBy\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"gasLimit\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"rewardAddress\",\"type\":\"address\"}],\"indexed\":true,\"internalType\":\"struct Zenith.BlockHeader\",\"name\":\"header\",\"type\":\"tuple\"},{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"blockDataHash\",\"type\":\"bytes32\"}],\"name\":\"BlockSubmitted\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[],\"name\":\"DefaultAdminDelayChangeCanceled\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint48\",\"name\":\"newDelay\",\"type\":\"uint48\"},{\"indexed\":false,\"internalType\":\"uint48\",\"name\":\"effectSchedule\",\"type\":\"uint48\"}],\"name\":\"DefaultAdminDelayChangeScheduled\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[],\"name\":\"DefaultAdminTransferCanceled\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"newAdmin\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint48\",\"name\":\"acceptSchedule\",\"type\":\"uint48\"}],\"name\":\"DefaultAdminTransferScheduled\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"rollupChainId\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"token\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"rollupRecipient\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"}],\"name\":\"Enter\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"rollupChainId\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"token\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"hostRecipient\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"}],\"name\":\"ExitFilled\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"role\",\"type\":\"bytes32\"},{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"previousAdminRole\",\"type\":\"bytes32\"},{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"newAdminRole\",\"type\":\"bytes32\"}],\"name\":\"RoleAdminChanged\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"role\",\"type\":\"bytes32\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"sender\",\"type\":\"address\"}],\"name\":\"RoleGranted\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"role\",\"type\":\"bytes32\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"sender\",\"type\":\"address\"}],\"name\":\"RoleRevoked\",\"type\":\"event\"},{\"stateMutability\":\"payable\",\"type\":\"fallback\"},{\"inputs\":[],\"name\":\"DEFAULT_ADMIN_ROLE\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"SEQUENCER_ROLE\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"acceptDefaultAdminTransfer\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"newAdmin\",\"type\":\"address\"}],\"name\":\"beginDefaultAdminTransfer\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"components\":[{\"internalType\":\"uint256\",\"name\":\"rollupChainId\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"sequence\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"confirmBy\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"gasLimit\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"rewardAddress\",\"type\":\"address\"}],\"internalType\":\"struct Zenith.BlockHeader\",\"name\":\"header\",\"type\":\"tuple\"},{\"internalType\":\"bytes32\",\"name\":\"blockDataHash\",\"type\":\"bytes32\"}],\"name\":\"blockCommitment\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"commit\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"cancelDefaultAdminTransfer\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint48\",\"name\":\"newDelay\",\"type\":\"uint48\"}],\"name\":\"changeDefaultAdminDelay\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"defaultAdmin\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"defaultAdminDelay\",\"outputs\":[{\"internalType\":\"uint48\",\"name\":\"\",\"type\":\"uint48\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"defaultAdminDelayIncreaseWait\",\"outputs\":[{\"internalType\":\"uint48\",\"name\":\"\",\"type\":\"uint48\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"rollupChainId\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"rollupRecipient\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"token\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"}],\"name\":\"enter\",\"outputs\":[],\"stateMutability\":\"payable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"rollupChainId\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"rollupRecipient\",\"type\":\"address\"}],\"name\":\"enter\",\"outputs\":[],\"stateMutability\":\"payable\",\"type\":\"function\"},{\"inputs\":[{\"components\":[{\"internalType\":\"uint256\",\"name\":\"rollupChainId\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"token\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"recipient\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"}],\"internalType\":\"struct Passage.ExitOrder[]\",\"name\":\"orders\",\"type\":\"tuple[]\"}],\"name\":\"fulfillExits\",\"outputs\":[],\"stateMutability\":\"payable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"role\",\"type\":\"bytes32\"}],\"name\":\"getRoleAdmin\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"role\",\"type\":\"bytes32\"},{\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"}],\"name\":\"grantRole\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"role\",\"type\":\"bytes32\"},{\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"}],\"name\":\"hasRole\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"lastSubmittedAtBlock\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"nextSequence\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"pendingDefaultAdmin\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"newAdmin\",\"type\":\"address\"},{\"internalType\":\"uint48\",\"name\":\"schedule\",\"type\":\"uint48\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"pendingDefaultAdminDelay\",\"outputs\":[{\"internalType\":\"uint48\",\"name\":\"newDelay\",\"type\":\"uint48\"},{\"internalType\":\"uint48\",\"name\":\"schedule\",\"type\":\"uint48\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"role\",\"type\":\"bytes32\"},{\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"}],\"name\":\"renounceRole\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"role\",\"type\":\"bytes32\"},{\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"}],\"name\":\"revokeRole\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"rollbackDefaultAdminDelay\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"components\":[{\"internalType\":\"uint256\",\"name\":\"rollupChainId\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"sequence\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"confirmBy\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"gasLimit\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"rewardAddress\",\"type\":\"address\"}],\"internalType\":\"struct Zenith.BlockHeader\",\"name\":\"header\",\"type\":\"tuple\"},{\"internalType\":\"bytes32\",\"name\":\"blockDataHash\",\"type\":\"bytes32\"},{\"internalType\":\"uint8\",\"name\":\"v\",\"type\":\"uint8\"},{\"internalType\":\"bytes32\",\"name\":\"r\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"s\",\"type\":\"bytes32\"},{\"internalType\":\"bytes\",\"name\":\"blockData\",\"type\":\"bytes\"}],\"name\":\"submitBlock\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes4\",\"name\":\"interfaceId\",\"type\":\"bytes4\"}],\"name\":\"supportsInterface\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"stateMutability\":\"payable\",\"type\":\"receive\"}],\"devdoc\":{\"errors\":{\"AccessControlBadConfirmation()\":[{\"details\":\"The caller of a function is not the expected one. NOTE: Don't confuse with {AccessControlUnauthorizedAccount}.\"}],\"AccessControlEnforcedDefaultAdminDelay(uint48)\":[{\"details\":\"The delay for transferring the default admin delay is enforced and the operation must wait until `schedule`. NOTE: `schedule` can be 0 indicating there's no transfer scheduled.\"}],\"AccessControlEnforcedDefaultAdminRules()\":[{\"details\":\"At least one of the following rules was violated: - The `DEFAULT_ADMIN_ROLE` must only be managed by itself. - The `DEFAULT_ADMIN_ROLE` must only be held by one account at the time. - Any `DEFAULT_ADMIN_ROLE` transfer must be in two delayed steps.\"}],\"AccessControlInvalidDefaultAdmin(address)\":[{\"details\":\"The new default admin is not a valid default admin.\"}],\"AccessControlUnauthorizedAccount(address,bytes32)\":[{\"details\":\"The `account` is missing a role.\"}],\"BadSequence(uint256)\":[{\"details\":\"Blocks must be submitted in strict monotonic increasing order.\",\"params\":{\"expected\":\"- the correct next sequence number for the given rollup chainId.\"}}],\"BadSignature(address)\":[{\"params\":{\"derivedSequencer\":\"- the derived signer of the block data that is not a permissioned sequencer.\"}}],\"SafeCastOverflowedUintDowncast(uint8,uint256)\":[{\"details\":\"Value doesn't fit in an uint of `bits` size.\"}]},\"events\":{\"BlockSubmitted(address,(uint256,uint256,uint256,uint256,address),bytes32)\":{\"params\":{\"blockDataHash\":\"- keccak256(blockData). the Node will discard the block if the hash doens't match.\",\"header\":\"- the block header information for the block.\",\"sequencer\":\"- the address of the sequencer that signed the block.\"}},\"DefaultAdminDelayChangeCanceled()\":{\"details\":\"Emitted when a {pendingDefaultAdminDelay} is reset if its schedule didn't pass.\"},\"DefaultAdminDelayChangeScheduled(uint48,uint48)\":{\"details\":\"Emitted when a {defaultAdminDelay} change is started, setting `newDelay` as the next delay to be applied between default admin transfer after `effectSchedule` has passed.\"},\"DefaultAdminTransferCanceled()\":{\"details\":\"Emitted when a {pendingDefaultAdmin} is reset if it was never accepted, regardless of its schedule.\"},\"DefaultAdminTransferScheduled(address,uint48)\":{\"details\":\"Emitted when a {defaultAdmin} transfer is started, setting `newAdmin` as the next address to become the {defaultAdmin} by calling {acceptDefaultAdminTransfer} only after `acceptSchedule` passes.\"},\"Enter(uint256,address,address,uint256)\":{\"params\":{\"amount\":\"- The amount of the token entering the rollup.\",\"rollupRecipient\":\"- The recipient of the token on the rollup.\",\"token\":\"- The address of the token entering the rollup.\"}},\"ExitFilled(uint256,address,address,uint256)\":{\"params\":{\"amount\":\"- The amount of the token transferred to the recipient.\",\"hostRecipient\":\"- The recipient of the token on host.\",\"token\":\"- The address of the token transferred to the recipient.\"}},\"RoleAdminChanged(bytes32,bytes32,bytes32)\":{\"details\":\"Emitted when `newAdminRole` is set as ``role``'s admin role, replacing `previousAdminRole` `DEFAULT_ADMIN_ROLE` is the starting admin for all roles, despite {RoleAdminChanged} not being emitted signaling this.\"},\"RoleGranted(bytes32,address,address)\":{\"details\":\"Emitted when `account` is granted `role`. `sender` is the account that originated the contract call. This account bears the admin role (for the granted role). Expected in cases where the role was granted using the internal {AccessControl-_grantRole}.\"},\"RoleRevoked(bytes32,address,address)\":{\"details\":\"Emitted when `account` is revoked `role`. `sender` is the account that originated the contract call: - if using `revokeRole`, it is the admin role bearer - if using `renounceRole`, it is the role bearer (i.e. `account`)\"}},\"kind\":\"dev\",\"methods\":{\"acceptDefaultAdminTransfer()\":{\"details\":\"Completes a {defaultAdmin} transfer previously started with {beginDefaultAdminTransfer}. After calling the function: - `DEFAULT_ADMIN_ROLE` should be granted to the caller. - `DEFAULT_ADMIN_ROLE` should be revoked from the previous holder. - {pendingDefaultAdmin} should be reset to zero values. Requirements: - Only can be called by the {pendingDefaultAdmin}'s `newAdmin`. - The {pendingDefaultAdmin}'s `acceptSchedule` should've passed.\"},\"beginDefaultAdminTransfer(address)\":{\"details\":\"Starts a {defaultAdmin} transfer by setting a {pendingDefaultAdmin} scheduled for acceptance after the current timestamp plus a {defaultAdminDelay}. Requirements: - Only can be called by the current {defaultAdmin}. Emits a DefaultAdminRoleChangeStarted event.\"},\"blockCommitment((uint256,uint256,uint256,uint256,address),bytes32)\":{\"params\":{\"header\":\"- the header information for the rollup block.\"},\"returns\":{\"commit\":\"- the hash of the encoded block details.\"}},\"cancelDefaultAdminTransfer()\":{\"details\":\"Cancels a {defaultAdmin} transfer previously started with {beginDefaultAdminTransfer}. A {pendingDefaultAdmin} not yet accepted can also be cancelled with this function. Requirements: - Only can be called by the current {defaultAdmin}. May emit a DefaultAdminTransferCanceled event.\"},\"changeDefaultAdminDelay(uint48)\":{\"details\":\"Initiates a {defaultAdminDelay} update by setting a {pendingDefaultAdminDelay} scheduled for getting into effect after the current timestamp plus a {defaultAdminDelay}. This function guarantees that any call to {beginDefaultAdminTransfer} done between the timestamp this method is called and the {pendingDefaultAdminDelay} effect schedule will use the current {defaultAdminDelay} set before calling. The {pendingDefaultAdminDelay}'s effect schedule is defined in a way that waiting until the schedule and then calling {beginDefaultAdminTransfer} with the new delay will take at least the same as another {defaultAdmin} complete transfer (including acceptance). The schedule is designed for two scenarios: - When the delay is changed for a larger one the schedule is `block.timestamp + newDelay` capped by {defaultAdminDelayIncreaseWait}. - When the delay is changed for a shorter one, the schedule is `block.timestamp + (current delay - new delay)`. A {pendingDefaultAdminDelay} that never got into effect will be canceled in favor of a new scheduled change. Requirements: - Only can be called by the current {defaultAdmin}. Emits a DefaultAdminDelayChangeScheduled event and may emit a DefaultAdminDelayChangeCanceled event.\"},\"constructor\":{\"details\":\"See `AccessControlDefaultAdminRules` for information on contract administration. - Admin role can grant and revoke Sequencer roles. - Admin role can be transferred via two-step process with a 1 day timelock.\",\"params\":{\"admin\":\"- the address that will be the initial admin.\"}},\"defaultAdmin()\":{\"details\":\"Returns the address of the current `DEFAULT_ADMIN_ROLE` holder.\"},\"defaultAdminDelay()\":{\"details\":\"Returns the delay required to schedule the acceptance of a {defaultAdmin} transfer started. This delay will be added to the current timestamp when calling {beginDefaultAdminTransfer} to set the acceptance schedule. NOTE: If a delay change has been scheduled, it will take effect as soon as the schedule passes, making this function returns the new delay. See {changeDefaultAdminDelay}.\"},\"defaultAdminDelayIncreaseWait()\":{\"details\":\"Maximum time in seconds for an increase to {defaultAdminDelay} (that is scheduled using {changeDefaultAdminDelay}) to take effect. Default to 5 days. When the {defaultAdminDelay} is scheduled to be increased, it goes into effect after the new delay has passed with the purpose of giving enough time for reverting any accidental change (i.e. using milliseconds instead of seconds) that may lock the contract. However, to avoid excessive schedules, the wait is capped by this function and it can be overrode for a custom {defaultAdminDelay} increase scheduling. IMPORTANT: Make sure to add a reasonable amount of time while overriding this value, otherwise, there's a risk of setting a high new delay that goes into effect almost immediately without the possibility of human intervention in the case of an input error (eg. set milliseconds instead of seconds).\"},\"enter(uint256,address)\":{\"custom:emits\":\"Enter indicating the amount of Ether to mint on the rollup & its recipient.\",\"details\":\"Permanently burns the entire msg.value by locking it in this contract.\",\"params\":{\"rollupChainId\":\"- The rollup chain to enter.\",\"rollupRecipient\":\"- The recipient of the Ether on the rollup.\"}},\"enter(uint256,address,address,uint256)\":{\"custom:emits\":\"Enter indicating the amount of tokens to mint on the rollup & its recipient.\",\"details\":\"Permanently burns the token amount by locking it in this contract.\",\"params\":{\"amount\":\"- The amount of the ERC20 token to transfer to the rollup.\",\"rollupChainId\":\"- The rollup chain to enter.\",\"rollupRecipient\":\"- The recipient of the Ether on the rollup.\",\"token\":\"- The address of the ERC20 token on the Host.\"}},\"fulfillExits((uint256,address,address,uint256)[])\":{\"custom:emits\":\"ExitFilled for each exit order fulfilled.\",\"details\":\"Builder SHOULD call `filfillExits` atomically with `submitBlock`. Builder SHOULD set a block expiration time that is AT MOST the minimum of all exit order deadlines; this way, `fulfillExits` + `submitBlock` will revert atomically on mainnet if any exit orders have expired. Otherwise, `filfillExits` may mine on mainnet, while `submitExit` reverts on the rollup, and the Builder can't collect the corresponding value on the rollup.Called by the Builder atomically with a transaction calling `submitBlock`. The user-submitted transactions initiating the ExitOrders on the rollup must be included by the Builder in the rollup block submitted via `submitBlock`.The user transfers tokenIn on the rollup, and receives tokenOut on host.The Builder receives tokenIn on the rollup, and transfers tokenOut to the user on host.The rollup STF MUST NOT apply `submitExit` transactions to the rollup state UNLESS a corresponding ExitFilled event is emitted on host in the same block.If the user submits multiple exit transactions for the same token in the same rollup block, the Builder may transfer the cumulative tokenOut to the user in a single ExitFilled event. The rollup STF will apply the user's exit transactions on the rollup up to the point that sum(tokenOut) is lte the ExitFilled amount. TODO: add option to fulfill ExitOrders with native ETH? or is it sufficient to only allow users to exit via WETH?\",\"params\":{\"orders\":\"The exit orders to fulfill\"}},\"getRoleAdmin(bytes32)\":{\"details\":\"Returns the admin role that controls `role`. See {grantRole} and {revokeRole}. To change a role's admin, use {_setRoleAdmin}.\"},\"grantRole(bytes32,address)\":{\"details\":\"See {AccessControl-grantRole}. Reverts for `DEFAULT_ADMIN_ROLE`.\"},\"hasRole(bytes32,address)\":{\"details\":\"Returns `true` if `account` has been granted `role`.\"},\"owner()\":{\"details\":\"See {IERC5313-owner}.\"},\"pendingDefaultAdmin()\":{\"details\":\"Returns a tuple of a `newAdmin` and an accept schedule. After the `schedule` passes, the `newAdmin` will be able to accept the {defaultAdmin} role by calling {acceptDefaultAdminTransfer}, completing the role transfer. A zero value only in `acceptSchedule` indicates no pending admin transfer. NOTE: A zero address `newAdmin` means that {defaultAdmin} is being renounced.\"},\"pendingDefaultAdminDelay()\":{\"details\":\"Returns a tuple of `newDelay` and an effect schedule. After the `schedule` passes, the `newDelay` will get into effect immediately for every new {defaultAdmin} transfer started with {beginDefaultAdminTransfer}. A zero value only in `effectSchedule` indicates no pending delay change. NOTE: A zero value only for `newDelay` means that the next {defaultAdminDelay} will be zero after the effect schedule.\"},\"renounceRole(bytes32,address)\":{\"details\":\"See {AccessControl-renounceRole}. For the `DEFAULT_ADMIN_ROLE`, it only allows renouncing in two steps by first calling {beginDefaultAdminTransfer} to the `address(0)`, so it's required that the {pendingDefaultAdmin} schedule has also passed when calling this function. After its execution, it will not be possible to call `onlyRole(DEFAULT_ADMIN_ROLE)` functions. NOTE: Renouncing `DEFAULT_ADMIN_ROLE` will leave the contract without a {defaultAdmin}, thereby disabling any functionality that is only available for it, and the possibility of reassigning a non-administrated role.\"},\"revokeRole(bytes32,address)\":{\"details\":\"See {AccessControl-revokeRole}. Reverts for `DEFAULT_ADMIN_ROLE`.\"},\"rollbackDefaultAdminDelay()\":{\"details\":\"Cancels a scheduled {defaultAdminDelay} change. Requirements: - Only can be called by the current {defaultAdmin}. May emit a DefaultAdminDelayChangeCanceled event.\"},\"submitBlock((uint256,uint256,uint256,uint256,address),bytes32,uint8,bytes32,bytes32,bytes)\":{\"custom:emits\":\"BlockSubmitted if the block is successfully submitted.BlockData to expose the block calldata; as a convenience until calldata tracing is implemented in the Node.\",\"custom:reverts\":\"BadSequence if the sequence number is not the next block for the given rollup chainId.BlockExpired if the confirmBy time has passed.BadSignature if the signer is not a permissioned sequencer, OR if the signature provided commits to a different header.OneRollupBlockPerHostBlock if attempting to submit a second rollup block within one host block.\",\"details\":\"Blocks are submitted by Builders, with an attestation to the block data signed by a Sequencer.including blockDataHash allows the sequencer to sign over finalized block data, without needing to calldatacopy the `blockData` param.\",\"params\":{\"blockData\":\"- block data information. could be packed blob hashes, or direct rlp-encoded transctions. blockData is ignored by the contract logic.\",\"blockDataHash\":\"- keccak256(blockData). the Node will discard the block if the hash doens't match.\",\"header\":\"- the header information for the rollup block.\",\"r\":\"- the r component of the Sequencer's ECSDA signature over the block header.\",\"s\":\"- the s component of the Sequencer's ECSDA signature over the block header.\",\"v\":\"- the v component of the Sequencer's ECSDA signature over the block header.\"}},\"supportsInterface(bytes4)\":{\"details\":\"See {IERC165-supportsInterface}.\"}},\"version\":1},\"userdoc\":{\"errors\":{\"BadSequence(uint256)\":[{\"notice\":\"Thrown when a block submission is attempted with a sequence number that is not the next block for the rollup chainId.\"}],\"BadSignature(address)\":[{\"notice\":\"Thrown when a block submission is attempted with a signature by a non-permissioned sequencer, OR when signature is produced over different data than is provided.\"}],\"BlockExpired()\":[{\"notice\":\"Thrown when a block submission is attempted when the confirmBy time has passed.\"}],\"OneRollupBlockPerHostBlock()\":[{\"notice\":\"Thrown when attempting to submit more than one rollup block per host block\"}],\"OrderExpired()\":[{\"notice\":\"Thrown when attempting to fulfill an exit order with a deadline that has passed.\"}]},\"events\":{\"BlockData(bytes)\":{\"notice\":\"Emit the entire block data for easy visibility\"},\"BlockSubmitted(address,(uint256,uint256,uint256,uint256,address),bytes32)\":{\"notice\":\"Emitted when a new rollup block is successfully submitted.\"},\"Enter(uint256,address,address,uint256)\":{\"notice\":\"Emitted when tokens enter the rollup.\"},\"ExitFilled(uint256,address,address,uint256)\":{\"notice\":\"Emitted when an exit order is fulfilled by the Builder.\"}},\"kind\":\"user\",\"methods\":{\"SEQUENCER_ROLE()\":{\"notice\":\"Role that allows a key to sign commitments to rollup blocks.\"},\"blockCommitment((uint256,uint256,uint256,uint256,address),bytes32)\":{\"notice\":\"Construct hash of block details that the sequencer signs.\"},\"constructor\":{\"notice\":\"Initializes the Admin role.\"},\"enter(uint256,address)\":{\"notice\":\"Allows native Ether to enter the rollup.\"},\"enter(uint256,address,address,uint256)\":{\"notice\":\"Allows ERC20s to enter the rollup.\"},\"fulfillExits((uint256,address,address,uint256)[])\":{\"notice\":\"Fulfills exit orders by transferring tokenOut to the recipient\"},\"lastSubmittedAtBlock(uint256)\":{\"notice\":\"The host block number that a block was last submitted at for a given rollup chainId. rollupChainId => host blockNumber that block was last submitted at\"},\"nextSequence(uint256)\":{\"notice\":\"The sequence number of the next block that can be submitted for a given rollup chainId. rollupChainId => nextSequence number\"},\"submitBlock((uint256,uint256,uint256,uint256,address),bytes32,uint8,bytes32,bytes32,bytes)\":{\"notice\":\"Submit a rollup block with block data submitted via calldata.\"}},\"version\":1}},\"settings\":{\"compilationTarget\":{\"src/Zenith.sol\":\"Zenith\"},\"evmVersion\":\"cancun\",\"libraries\":{},\"metadata\":{\"bytecodeHash\":\"ipfs\"},\"optimizer\":{\"enabled\":true,\"runs\":200},\"remappings\":[\":@openzeppelin/contracts/=lib/openzeppelin-contracts/contracts/\",\":ds-test/=lib/forge-std/lib/ds-test/src/\",\":erc4626-tests/=lib/openzeppelin-contracts/lib/erc4626-tests/\",\":forge-std/=lib/forge-std/src/\",\":openzeppelin-contracts/=lib/openzeppelin-contracts/\"]},\"sources\":{\"lib/openzeppelin-contracts/contracts/access/AccessControl.sol\":{\"keccak256\":\"0xa0e92d42942f4f57c5be50568dac11e9d00c93efcb458026e18d2d9b9b2e7308\",\"license\":\"MIT\",\"urls\":[\"bzz-raw://46326c0bb1e296b67185e81c918e0b40501b8b6386165855df0a3f3c634b6a80\",\"dweb:/ipfs/QmTwyrDYtsxsk6pymJTK94PnEpzsmkpUxFuzEiakDopy4Z\"]},\"lib/openzeppelin-contracts/contracts/access/IAccessControl.sol\":{\"keccak256\":\"0xc503b1464e90b1cf79d81239f719f81c35ff646b17b638c87fe87a1d7bc5d94d\",\"license\":\"MIT\",\"urls\":[\"bzz-raw://381076837654e98f1d5dfc3909a3ebb80e2c86a97d662b507320701e09cb7a60\",\"dweb:/ipfs/QmWGwdWe9JWx2ae3n8EhWuY6ipWo6shVg9bct6y5og7v9Y\"]},\"lib/openzeppelin-contracts/contracts/access/extensions/AccessControlDefaultAdminRules.sol\":{\"keccak256\":\"0xd5e43578dce2678fbd458e1221dc37b20e983ecce4a314b422704f07d6015c5b\",\"license\":\"MIT\",\"urls\":[\"bzz-raw://9ea4d9ae3392dc9db1ef4d7ebef84ce7fa243dc14abb46e68eb2eb60d2cd0e93\",\"dweb:/ipfs/QmRfjyDoLWF74EgmpcGkWZM7Kx1LgHN8dZHBxAnU9vPH46\"]},\"lib/openzeppelin-contracts/contracts/access/extensions/IAccessControlDefaultAdminRules.sol\":{\"keccak256\":\"0xc2dbeddf97707bf012827013b4a072bacbe56ad3219c405e30fd2a959e8a5413\",\"license\":\"MIT\",\"urls\":[\"bzz-raw://281289e424c30c2ea92fc25598315117410404cf76a756663ad39ba18fd38b48\",\"dweb:/ipfs/Qma3wmq2cjxpfkKKM7JrvyJzzohsNWNNWsnaf3jVNBD65v\"]},\"lib/openzeppelin-contracts/contracts/interfaces/IERC5313.sol\":{\"keccak256\":\"0x22412c268e74cc3cbf550aecc2f7456f6ac40783058e219cfe09f26f4d396621\",\"license\":\"MIT\",\"urls\":[\"bzz-raw://0b841021f25480424d2359de4869e60e77f790f52e8e85f07aa389543024b559\",\"dweb:/ipfs/QmV7U5ehV5xe3QrbE8ErxfWSSzK1T1dGeizXvYPjWpNDGq\"]},\"lib/openzeppelin-contracts/contracts/token/ERC20/IERC20.sol\":{\"keccak256\":\"0xee2337af2dc162a973b4be6d3f7c16f06298259e0af48c5470d2839bfa8a22f4\",\"license\":\"MIT\",\"urls\":[\"bzz-raw://30c476b4b2f405c1bb3f0bae15b006d129c80f1bfd9d0f2038160a3bb9745009\",\"dweb:/ipfs/Qmb3VcuDufv6xbHeVgksC4tHpc5gKYVqBEwjEXW72XzSvN\"]},\"lib/openzeppelin-contracts/contracts/utils/Context.sol\":{\"keccak256\":\"0x493033a8d1b176a037b2cc6a04dad01a5c157722049bbecf632ca876224dd4b2\",\"license\":\"MIT\",\"urls\":[\"bzz-raw://6a708e8a5bdb1011c2c381c9a5cfd8a9a956d7d0a9dc1bd8bcdaf52f76ef2f12\",\"dweb:/ipfs/Qmax9WHBnVsZP46ZxEMNRQpLQnrdE4dK8LehML1Py8FowF\"]},\"lib/openzeppelin-contracts/contracts/utils/Panic.sol\":{\"keccak256\":\"0x29074fe5a74bb024c57b3570abf6c74d8bceed3438694d470fd0166a3ecd196a\",\"license\":\"MIT\",\"urls\":[\"bzz-raw://f4f8435ccbc56e384f4cc9ac9ff491cf30a82f2beac00e33ccc2cf8af3f77cc3\",\"dweb:/ipfs/QmUKJXxTe6nn1qfgnX8xbnboNNAPUuEmJyGqMZCKNiFBgn\"]},\"lib/openzeppelin-contracts/contracts/utils/introspection/ERC165.sol\":{\"keccak256\":\"0x6fac27fb1885a1d9fd2ce3f8fac4e44a6596ca4d44207c9ef2541ba8c941291e\",\"license\":\"MIT\",\"urls\":[\"bzz-raw://2079378abdb36baec15c23bc2353b73a3d28d1d0610b436b0c1c4e6fa61d65c9\",\"dweb:/ipfs/QmVZkRFMzKW7sLaugKSTbMNnUBKWF3QDsoMi5uoQFyVMjf\"]},\"lib/openzeppelin-contracts/contracts/utils/introspection/IERC165.sol\":{\"keccak256\":\"0xc859863e3bda7ec3cddf6dafe2ffe91bcbe648d1395b856b839c32ee9617c44c\",\"license\":\"MIT\",\"urls\":[\"bzz-raw://a9d5417888b873cf2225ed5d50b2a67be97c1504134a2a580512168d587ad82e\",\"dweb:/ipfs/QmNr5fTb2heFW658NZn7dDnofZgFvQTnNxKRJ3wdnR1skX\"]},\"lib/openzeppelin-contracts/contracts/utils/math/Math.sol\":{\"keccak256\":\"0x3233b02fcf2b20a41cce60a62e43c7e5a67a55b738ec1db842a82452e6aa170d\",\"license\":\"MIT\",\"urls\":[\"bzz-raw://231c75d18bc6973533dfa7d58d2b97e504ca4e21d703a5c8b0ec31475e97db67\",\"dweb:/ipfs/QmPJ29HDuFceD1FDr4CnjYYtvaQ234wGAfojZpL3RXFG26\"]},\"lib/openzeppelin-contracts/contracts/utils/math/SafeCast.sol\":{\"keccak256\":\"0x8cd59334ed58b8884cd1f775afc9400db702e674e5d6a7a438c655b9de788d7e\",\"license\":\"MIT\",\"urls\":[\"bzz-raw://99e62c7de7318f413b6352e3f2704ca23e7725ff144e43c8bd574d12dbf29047\",\"dweb:/ipfs/QmSEXG2rBx1VxU2uFTWdiChjDvA4osEY2mesjmoVeVhHko\"]},\"src/Passage.sol\":{\"keccak256\":\"0x81016c92006558f93c028e3d4f61ddad8ff870b956edaa19ad2ccd68ec5d292a\",\"license\":\"UNLICENSED\",\"urls\":[\"bzz-raw://dc70a7d97b18e988ce9560f4fabbf9caea3c6178f64fab056b1cf63d27bef6c5\",\"dweb:/ipfs/QmeJDLqvLdhkbWfyLHdYUPoGz7XHWw3zpe8YTCMQE9MacX\"]},\"src/Zenith.sol\":{\"keccak256\":\"0x0febef21c15ebf62421e25337341a8a11a6dd5b5dc2e9ea967a2d4769469ecd6\",\"license\":\"UNLICENSED\",\"urls\":[\"bzz-raw://405a8eb90f834ab216e96d40b8c1cfd98c4bc4e71399b09c04ef4123eb3bb1ab\",\"dweb:/ipfs/QmVakr7Upoe2tgU1jQSZUgXE1UASAuHh9kReZ2mfgCsdha\"]}},\"version\":1}","metadata":{"compiler":{"version":"0.8.25+commit.b61c2a91"},"language":"Solidity","output":{"abi":[{"inputs":[{"internalType":"uint256","name":"defaultRollupChainId","type":"uint256"},{"internalType":"address","name":"admin","type":"address"}],"stateMutability":"nonpayable","type":"constructor"},{"inputs":[],"type":"error","name":"AccessControlBadConfirmation"},{"inputs":[{"internalType":"uint48","name":"schedule","type":"uint48"}],"type":"error","name":"AccessControlEnforcedDefaultAdminDelay"},{"inputs":[],"type":"error","name":"AccessControlEnforcedDefaultAdminRules"},{"inputs":[{"internalType":"address","name":"defaultAdmin","type":"address"}],"type":"error","name":"AccessControlInvalidDefaultAdmin"},{"inputs":[{"internalType":"address","name":"account","type":"address"},{"internalType":"bytes32","name":"neededRole","type":"bytes32"}],"type":"error","name":"AccessControlUnauthorizedAccount"},{"inputs":[{"internalType":"uint256","name":"expected","type":"uint256"}],"type":"error","name":"BadSequence"},{"inputs":[{"internalType":"address","name":"derivedSequencer","type":"address"}],"type":"error","name":"BadSignature"},{"inputs":[],"type":"error","name":"BlockExpired"},{"inputs":[],"type":"error","name":"OneRollupBlockPerHostBlock"},{"inputs":[],"type":"error","name":"OrderExpired"},{"inputs":[{"internalType":"uint8","name":"bits","type":"uint8"},{"internalType":"uint256","name":"value","type":"uint256"}],"type":"error","name":"SafeCastOverflowedUintDowncast"},{"inputs":[{"internalType":"bytes","name":"blockData","type":"bytes","indexed":false}],"type":"event","name":"BlockData","anonymous":false},{"inputs":[{"internalType":"address","name":"sequencer","type":"address","indexed":true},{"internalType":"struct Zenith.BlockHeader","name":"header","type":"tuple","components":[{"internalType":"uint256","name":"rollupChainId","type":"uint256"},{"internalType":"uint256","name":"sequence","type":"uint256"},{"internalType":"uint256","name":"confirmBy","type":"uint256"},{"internalType":"uint256","name":"gasLimit","type":"uint256"},{"internalType":"address","name":"rewardAddress","type":"address"}],"indexed":true},{"internalType":"bytes32","name":"blockDataHash","type":"bytes32","indexed":false}],"type":"event","name":"BlockSubmitted","anonymous":false},{"inputs":[],"type":"event","name":"DefaultAdminDelayChangeCanceled","anonymous":false},{"inputs":[{"internalType":"uint48","name":"newDelay","type":"uint48","indexed":false},{"internalType":"uint48","name":"effectSchedule","type":"uint48","indexed":false}],"type":"event","name":"DefaultAdminDelayChangeScheduled","anonymous":false},{"inputs":[],"type":"event","name":"DefaultAdminTransferCanceled","anonymous":false},{"inputs":[{"internalType":"address","name":"newAdmin","type":"address","indexed":true},{"internalType":"uint48","name":"acceptSchedule","type":"uint48","indexed":false}],"type":"event","name":"DefaultAdminTransferScheduled","anonymous":false},{"inputs":[{"internalType":"uint256","name":"rollupChainId","type":"uint256","indexed":false},{"internalType":"address","name":"token","type":"address","indexed":true},{"internalType":"address","name":"rollupRecipient","type":"address","indexed":true},{"internalType":"uint256","name":"amount","type":"uint256","indexed":false}],"type":"event","name":"Enter","anonymous":false},{"inputs":[{"internalType":"uint256","name":"rollupChainId","type":"uint256","indexed":false},{"internalType":"address","name":"token","type":"address","indexed":true},{"internalType":"address","name":"hostRecipient","type":"address","indexed":true},{"internalType":"uint256","name":"amount","type":"uint256","indexed":false}],"type":"event","name":"ExitFilled","anonymous":false},{"inputs":[{"internalType":"bytes32","name":"role","type":"bytes32","indexed":true},{"internalType":"bytes32","name":"previousAdminRole","type":"bytes32","indexed":true},{"internalType":"bytes32","name":"newAdminRole","type":"bytes32","indexed":true}],"type":"event","name":"RoleAdminChanged","anonymous":false},{"inputs":[{"internalType":"bytes32","name":"role","type":"bytes32","indexed":true},{"internalType":"address","name":"account","type":"address","indexed":true},{"internalType":"address","name":"sender","type":"address","indexed":true}],"type":"event","name":"RoleGranted","anonymous":false},{"inputs":[{"internalType":"bytes32","name":"role","type":"bytes32","indexed":true},{"internalType":"address","name":"account","type":"address","indexed":true},{"internalType":"address","name":"sender","type":"address","indexed":true}],"type":"event","name":"RoleRevoked","anonymous":false},{"inputs":[],"stateMutability":"payable","type":"fallback"},{"inputs":[],"stateMutability":"view","type":"function","name":"DEFAULT_ADMIN_ROLE","outputs":[{"internalType":"bytes32","name":"","type":"bytes32"}]},{"inputs":[],"stateMutability":"view","type":"function","name":"SEQUENCER_ROLE","outputs":[{"internalType":"bytes32","name":"","type":"bytes32"}]},{"inputs":[],"stateMutability":"nonpayable","type":"function","name":"acceptDefaultAdminTransfer"},{"inputs":[{"internalType":"address","name":"newAdmin","type":"address"}],"stateMutability":"nonpayable","type":"function","name":"beginDefaultAdminTransfer"},{"inputs":[{"internalType":"struct Zenith.BlockHeader","name":"header","type":"tuple","components":[{"internalType":"uint256","name":"rollupChainId","type":"uint256"},{"internalType":"uint256","name":"sequence","type":"uint256"},{"internalType":"uint256","name":"confirmBy","type":"uint256"},{"internalType":"uint256","name":"gasLimit","type":"uint256"},{"internalType":"address","name":"rewardAddress","type":"address"}]},{"internalType":"bytes32","name":"blockDataHash","type":"bytes32"}],"stateMutability":"view","type":"function","name":"blockCommitment","outputs":[{"internalType":"bytes32","name":"commit","type":"bytes32"}]},{"inputs":[],"stateMutability":"nonpayable","type":"function","name":"cancelDefaultAdminTransfer"},{"inputs":[{"internalType":"uint48","name":"newDelay","type":"uint48"}],"stateMutability":"nonpayable","type":"function","name":"changeDefaultAdminDelay"},{"inputs":[],"stateMutability":"view","type":"function","name":"defaultAdmin","outputs":[{"internalType":"address","name":"","type":"address"}]},{"inputs":[],"stateMutability":"view","type":"function","name":"defaultAdminDelay","outputs":[{"internalType":"uint48","name":"","type":"uint48"}]},{"inputs":[],"stateMutability":"view","type":"function","name":"defaultAdminDelayIncreaseWait","outputs":[{"internalType":"uint48","name":"","type":"uint48"}]},{"inputs":[{"internalType":"uint256","name":"rollupChainId","type":"uint256"},{"internalType":"address","name":"rollupRecipient","type":"address"},{"internalType":"address","name":"token","type":"address"},{"internalType":"uint256","name":"amount","type":"uint256"}],"stateMutability":"payable","type":"function","name":"enter"},{"inputs":[{"internalType":"uint256","name":"rollupChainId","type":"uint256"},{"internalType":"address","name":"rollupRecipient","type":"address"}],"stateMutability":"payable","type":"function","name":"enter"},{"inputs":[{"internalType":"struct Passage.ExitOrder[]","name":"orders","type":"tuple[]","components":[{"internalType":"uint256","name":"rollupChainId","type":"uint256"},{"internalType":"address","name":"token","type":"address"},{"internalType":"address","name":"recipient","type":"address"},{"internalType":"uint256","name":"amount","type":"uint256"}]}],"stateMutability":"payable","type":"function","name":"fulfillExits"},{"inputs":[{"internalType":"bytes32","name":"role","type":"bytes32"}],"stateMutability":"view","type":"function","name":"getRoleAdmin","outputs":[{"internalType":"bytes32","name":"","type":"bytes32"}]},{"inputs":[{"internalType":"bytes32","name":"role","type":"bytes32"},{"internalType":"address","name":"account","type":"address"}],"stateMutability":"nonpayable","type":"function","name":"grantRole"},{"inputs":[{"internalType":"bytes32","name":"role","type":"bytes32"},{"internalType":"address","name":"account","type":"address"}],"stateMutability":"view","type":"function","name":"hasRole","outputs":[{"internalType":"bool","name":"","type":"bool"}]},{"inputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function","name":"lastSubmittedAtBlock","outputs":[{"internalType":"uint256","name":"","type":"uint256"}]},{"inputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function","name":"nextSequence","outputs":[{"internalType":"uint256","name":"","type":"uint256"}]},{"inputs":[],"stateMutability":"view","type":"function","name":"owner","outputs":[{"internalType":"address","name":"","type":"address"}]},{"inputs":[],"stateMutability":"view","type":"function","name":"pendingDefaultAdmin","outputs":[{"internalType":"address","name":"newAdmin","type":"address"},{"internalType":"uint48","name":"schedule","type":"uint48"}]},{"inputs":[],"stateMutability":"view","type":"function","name":"pendingDefaultAdminDelay","outputs":[{"internalType":"uint48","name":"newDelay","type":"uint48"},{"internalType":"uint48","name":"schedule","type":"uint48"}]},{"inputs":[{"internalType":"bytes32","name":"role","type":"bytes32"},{"internalType":"address","name":"account","type":"address"}],"stateMutability":"nonpayable","type":"function","name":"renounceRole"},{"inputs":[{"internalType":"bytes32","name":"role","type":"bytes32"},{"internalType":"address","name":"account","type":"address"}],"stateMutability":"nonpayable","type":"function","name":"revokeRole"},{"inputs":[],"stateMutability":"nonpayable","type":"function","name":"rollbackDefaultAdminDelay"},{"inputs":[{"internalType":"struct Zenith.BlockHeader","name":"header","type":"tuple","components":[{"internalType":"uint256","name":"rollupChainId","type":"uint256"},{"internalType":"uint256","name":"sequence","type":"uint256"},{"internalType":"uint256","name":"confirmBy","type":"uint256"},{"internalType":"uint256","name":"gasLimit","type":"uint256"},{"internalType":"address","name":"rewardAddress","type":"address"}]},{"internalType":"bytes32","name":"blockDataHash","type":"bytes32"},{"internalType":"uint8","name":"v","type":"uint8"},{"internalType":"bytes32","name":"r","type":"bytes32"},{"internalType":"bytes32","name":"s","type":"bytes32"},{"internalType":"bytes","name":"blockData","type":"bytes"}],"stateMutability":"nonpayable","type":"function","name":"submitBlock"},{"inputs":[{"internalType":"bytes4","name":"interfaceId","type":"bytes4"}],"stateMutability":"view","type":"function","name":"supportsInterface","outputs":[{"internalType":"bool","name":"","type":"bool"}]},{"inputs":[],"stateMutability":"payable","type":"receive"}],"devdoc":{"kind":"dev","methods":{"acceptDefaultAdminTransfer()":{"details":"Completes a {defaultAdmin} transfer previously started with {beginDefaultAdminTransfer}. After calling the function: - `DEFAULT_ADMIN_ROLE` should be granted to the caller. - `DEFAULT_ADMIN_ROLE` should be revoked from the previous holder. - {pendingDefaultAdmin} should be reset to zero values. Requirements: - Only can be called by the {pendingDefaultAdmin}'s `newAdmin`. - The {pendingDefaultAdmin}'s `acceptSchedule` should've passed."},"beginDefaultAdminTransfer(address)":{"details":"Starts a {defaultAdmin} transfer by setting a {pendingDefaultAdmin} scheduled for acceptance after the current timestamp plus a {defaultAdminDelay}. Requirements: - Only can be called by the current {defaultAdmin}. Emits a DefaultAdminRoleChangeStarted event."},"blockCommitment((uint256,uint256,uint256,uint256,address),bytes32)":{"params":{"header":"- the header information for the rollup block."},"returns":{"commit":"- the hash of the encoded block details."}},"cancelDefaultAdminTransfer()":{"details":"Cancels a {defaultAdmin} transfer previously started with {beginDefaultAdminTransfer}. A {pendingDefaultAdmin} not yet accepted can also be cancelled with this function. Requirements: - Only can be called by the current {defaultAdmin}. May emit a DefaultAdminTransferCanceled event."},"changeDefaultAdminDelay(uint48)":{"details":"Initiates a {defaultAdminDelay} update by setting a {pendingDefaultAdminDelay} scheduled for getting into effect after the current timestamp plus a {defaultAdminDelay}. This function guarantees that any call to {beginDefaultAdminTransfer} done between the timestamp this method is called and the {pendingDefaultAdminDelay} effect schedule will use the current {defaultAdminDelay} set before calling. The {pendingDefaultAdminDelay}'s effect schedule is defined in a way that waiting until the schedule and then calling {beginDefaultAdminTransfer} with the new delay will take at least the same as another {defaultAdmin} complete transfer (including acceptance). The schedule is designed for two scenarios: - When the delay is changed for a larger one the schedule is `block.timestamp + newDelay` capped by {defaultAdminDelayIncreaseWait}. - When the delay is changed for a shorter one, the schedule is `block.timestamp + (current delay - new delay)`. A {pendingDefaultAdminDelay} that never got into effect will be canceled in favor of a new scheduled change. Requirements: - Only can be called by the current {defaultAdmin}. Emits a DefaultAdminDelayChangeScheduled event and may emit a DefaultAdminDelayChangeCanceled event."},"constructor":{"details":"See `AccessControlDefaultAdminRules` for information on contract administration. - Admin role can grant and revoke Sequencer roles. - Admin role can be transferred via two-step process with a 1 day timelock.","params":{"admin":"- the address that will be the initial admin."}},"defaultAdmin()":{"details":"Returns the address of the current `DEFAULT_ADMIN_ROLE` holder."},"defaultAdminDelay()":{"details":"Returns the delay required to schedule the acceptance of a {defaultAdmin} transfer started. This delay will be added to the current timestamp when calling {beginDefaultAdminTransfer} to set the acceptance schedule. NOTE: If a delay change has been scheduled, it will take effect as soon as the schedule passes, making this function returns the new delay. See {changeDefaultAdminDelay}."},"defaultAdminDelayIncreaseWait()":{"details":"Maximum time in seconds for an increase to {defaultAdminDelay} (that is scheduled using {changeDefaultAdminDelay}) to take effect. Default to 5 days. When the {defaultAdminDelay} is scheduled to be increased, it goes into effect after the new delay has passed with the purpose of giving enough time for reverting any accidental change (i.e. using milliseconds instead of seconds) that may lock the contract. However, to avoid excessive schedules, the wait is capped by this function and it can be overrode for a custom {defaultAdminDelay} increase scheduling. IMPORTANT: Make sure to add a reasonable amount of time while overriding this value, otherwise, there's a risk of setting a high new delay that goes into effect almost immediately without the possibility of human intervention in the case of an input error (eg. set milliseconds instead of seconds)."},"enter(uint256,address)":{"custom:emits":"Enter indicating the amount of Ether to mint on the rollup & its recipient.","details":"Permanently burns the entire msg.value by locking it in this contract.","params":{"rollupChainId":"- The rollup chain to enter.","rollupRecipient":"- The recipient of the Ether on the rollup."}},"enter(uint256,address,address,uint256)":{"custom:emits":"Enter indicating the amount of tokens to mint on the rollup & its recipient.","details":"Permanently burns the token amount by locking it in this contract.","params":{"amount":"- The amount of the ERC20 token to transfer to the rollup.","rollupChainId":"- The rollup chain to enter.","rollupRecipient":"- The recipient of the Ether on the rollup.","token":"- The address of the ERC20 token on the Host."}},"fulfillExits((uint256,address,address,uint256)[])":{"custom:emits":"ExitFilled for each exit order fulfilled.","details":"Builder SHOULD call `filfillExits` atomically with `submitBlock`. Builder SHOULD set a block expiration time that is AT MOST the minimum of all exit order deadlines; this way, `fulfillExits` + `submitBlock` will revert atomically on mainnet if any exit orders have expired. Otherwise, `filfillExits` may mine on mainnet, while `submitExit` reverts on the rollup, and the Builder can't collect the corresponding value on the rollup.Called by the Builder atomically with a transaction calling `submitBlock`. The user-submitted transactions initiating the ExitOrders on the rollup must be included by the Builder in the rollup block submitted via `submitBlock`.The user transfers tokenIn on the rollup, and receives tokenOut on host.The Builder receives tokenIn on the rollup, and transfers tokenOut to the user on host.The rollup STF MUST NOT apply `submitExit` transactions to the rollup state UNLESS a corresponding ExitFilled event is emitted on host in the same block.If the user submits multiple exit transactions for the same token in the same rollup block, the Builder may transfer the cumulative tokenOut to the user in a single ExitFilled event. The rollup STF will apply the user's exit transactions on the rollup up to the point that sum(tokenOut) is lte the ExitFilled amount. TODO: add option to fulfill ExitOrders with native ETH? or is it sufficient to only allow users to exit via WETH?","params":{"orders":"The exit orders to fulfill"}},"getRoleAdmin(bytes32)":{"details":"Returns the admin role that controls `role`. See {grantRole} and {revokeRole}. To change a role's admin, use {_setRoleAdmin}."},"grantRole(bytes32,address)":{"details":"See {AccessControl-grantRole}. Reverts for `DEFAULT_ADMIN_ROLE`."},"hasRole(bytes32,address)":{"details":"Returns `true` if `account` has been granted `role`."},"owner()":{"details":"See {IERC5313-owner}."},"pendingDefaultAdmin()":{"details":"Returns a tuple of a `newAdmin` and an accept schedule. After the `schedule` passes, the `newAdmin` will be able to accept the {defaultAdmin} role by calling {acceptDefaultAdminTransfer}, completing the role transfer. A zero value only in `acceptSchedule` indicates no pending admin transfer. NOTE: A zero address `newAdmin` means that {defaultAdmin} is being renounced."},"pendingDefaultAdminDelay()":{"details":"Returns a tuple of `newDelay` and an effect schedule. After the `schedule` passes, the `newDelay` will get into effect immediately for every new {defaultAdmin} transfer started with {beginDefaultAdminTransfer}. A zero value only in `effectSchedule` indicates no pending delay change. NOTE: A zero value only for `newDelay` means that the next {defaultAdminDelay} will be zero after the effect schedule."},"renounceRole(bytes32,address)":{"details":"See {AccessControl-renounceRole}. For the `DEFAULT_ADMIN_ROLE`, it only allows renouncing in two steps by first calling {beginDefaultAdminTransfer} to the `address(0)`, so it's required that the {pendingDefaultAdmin} schedule has also passed when calling this function. After its execution, it will not be possible to call `onlyRole(DEFAULT_ADMIN_ROLE)` functions. NOTE: Renouncing `DEFAULT_ADMIN_ROLE` will leave the contract without a {defaultAdmin}, thereby disabling any functionality that is only available for it, and the possibility of reassigning a non-administrated role."},"revokeRole(bytes32,address)":{"details":"See {AccessControl-revokeRole}. Reverts for `DEFAULT_ADMIN_ROLE`."},"rollbackDefaultAdminDelay()":{"details":"Cancels a scheduled {defaultAdminDelay} change. Requirements: - Only can be called by the current {defaultAdmin}. May emit a DefaultAdminDelayChangeCanceled event."},"submitBlock((uint256,uint256,uint256,uint256,address),bytes32,uint8,bytes32,bytes32,bytes)":{"custom:emits":"BlockSubmitted if the block is successfully submitted.BlockData to expose the block calldata; as a convenience until calldata tracing is implemented in the Node.","custom:reverts":"BadSequence if the sequence number is not the next block for the given rollup chainId.BlockExpired if the confirmBy time has passed.BadSignature if the signer is not a permissioned sequencer, OR if the signature provided commits to a different header.OneRollupBlockPerHostBlock if attempting to submit a second rollup block within one host block.","details":"Blocks are submitted by Builders, with an attestation to the block data signed by a Sequencer.including blockDataHash allows the sequencer to sign over finalized block data, without needing to calldatacopy the `blockData` param.","params":{"blockData":"- block data information. could be packed blob hashes, or direct rlp-encoded transctions. blockData is ignored by the contract logic.","blockDataHash":"- keccak256(blockData). the Node will discard the block if the hash doens't match.","header":"- the header information for the rollup block.","r":"- the r component of the Sequencer's ECSDA signature over the block header.","s":"- the s component of the Sequencer's ECSDA signature over the block header.","v":"- the v component of the Sequencer's ECSDA signature over the block header."}},"supportsInterface(bytes4)":{"details":"See {IERC165-supportsInterface}."}},"version":1},"userdoc":{"kind":"user","methods":{"SEQUENCER_ROLE()":{"notice":"Role that allows a key to sign commitments to rollup blocks."},"blockCommitment((uint256,uint256,uint256,uint256,address),bytes32)":{"notice":"Construct hash of block details that the sequencer signs."},"constructor":{"notice":"Initializes the Admin role."},"enter(uint256,address)":{"notice":"Allows native Ether to enter the rollup."},"enter(uint256,address,address,uint256)":{"notice":"Allows ERC20s to enter the rollup."},"fulfillExits((uint256,address,address,uint256)[])":{"notice":"Fulfills exit orders by transferring tokenOut to the recipient"},"lastSubmittedAtBlock(uint256)":{"notice":"The host block number that a block was last submitted at for a given rollup chainId. rollupChainId => host blockNumber that block was last submitted at"},"nextSequence(uint256)":{"notice":"The sequence number of the next block that can be submitted for a given rollup chainId. rollupChainId => nextSequence number"},"submitBlock((uint256,uint256,uint256,uint256,address),bytes32,uint8,bytes32,bytes32,bytes)":{"notice":"Submit a rollup block with block data submitted via calldata."}},"version":1}},"settings":{"remappings":["@openzeppelin/contracts/=lib/openzeppelin-contracts/contracts/","ds-test/=lib/forge-std/lib/ds-test/src/","erc4626-tests/=lib/openzeppelin-contracts/lib/erc4626-tests/","forge-std/=lib/forge-std/src/","openzeppelin-contracts/=lib/openzeppelin-contracts/"],"optimizer":{"enabled":true,"runs":200},"metadata":{"bytecodeHash":"ipfs"},"compilationTarget":{"src/Zenith.sol":"Zenith"},"evmVersion":"cancun","libraries":{}},"sources":{"lib/openzeppelin-contracts/contracts/access/AccessControl.sol":{"keccak256":"0xa0e92d42942f4f57c5be50568dac11e9d00c93efcb458026e18d2d9b9b2e7308","urls":["bzz-raw://46326c0bb1e296b67185e81c918e0b40501b8b6386165855df0a3f3c634b6a80","dweb:/ipfs/QmTwyrDYtsxsk6pymJTK94PnEpzsmkpUxFuzEiakDopy4Z"],"license":"MIT"},"lib/openzeppelin-contracts/contracts/access/IAccessControl.sol":{"keccak256":"0xc503b1464e90b1cf79d81239f719f81c35ff646b17b638c87fe87a1d7bc5d94d","urls":["bzz-raw://381076837654e98f1d5dfc3909a3ebb80e2c86a97d662b507320701e09cb7a60","dweb:/ipfs/QmWGwdWe9JWx2ae3n8EhWuY6ipWo6shVg9bct6y5og7v9Y"],"license":"MIT"},"lib/openzeppelin-contracts/contracts/access/extensions/AccessControlDefaultAdminRules.sol":{"keccak256":"0xd5e43578dce2678fbd458e1221dc37b20e983ecce4a314b422704f07d6015c5b","urls":["bzz-raw://9ea4d9ae3392dc9db1ef4d7ebef84ce7fa243dc14abb46e68eb2eb60d2cd0e93","dweb:/ipfs/QmRfjyDoLWF74EgmpcGkWZM7Kx1LgHN8dZHBxAnU9vPH46"],"license":"MIT"},"lib/openzeppelin-contracts/contracts/access/extensions/IAccessControlDefaultAdminRules.sol":{"keccak256":"0xc2dbeddf97707bf012827013b4a072bacbe56ad3219c405e30fd2a959e8a5413","urls":["bzz-raw://281289e424c30c2ea92fc25598315117410404cf76a756663ad39ba18fd38b48","dweb:/ipfs/Qma3wmq2cjxpfkKKM7JrvyJzzohsNWNNWsnaf3jVNBD65v"],"license":"MIT"},"lib/openzeppelin-contracts/contracts/interfaces/IERC5313.sol":{"keccak256":"0x22412c268e74cc3cbf550aecc2f7456f6ac40783058e219cfe09f26f4d396621","urls":["bzz-raw://0b841021f25480424d2359de4869e60e77f790f52e8e85f07aa389543024b559","dweb:/ipfs/QmV7U5ehV5xe3QrbE8ErxfWSSzK1T1dGeizXvYPjWpNDGq"],"license":"MIT"},"lib/openzeppelin-contracts/contracts/token/ERC20/IERC20.sol":{"keccak256":"0xee2337af2dc162a973b4be6d3f7c16f06298259e0af48c5470d2839bfa8a22f4","urls":["bzz-raw://30c476b4b2f405c1bb3f0bae15b006d129c80f1bfd9d0f2038160a3bb9745009","dweb:/ipfs/Qmb3VcuDufv6xbHeVgksC4tHpc5gKYVqBEwjEXW72XzSvN"],"license":"MIT"},"lib/openzeppelin-contracts/contracts/utils/Context.sol":{"keccak256":"0x493033a8d1b176a037b2cc6a04dad01a5c157722049bbecf632ca876224dd4b2","urls":["bzz-raw://6a708e8a5bdb1011c2c381c9a5cfd8a9a956d7d0a9dc1bd8bcdaf52f76ef2f12","dweb:/ipfs/Qmax9WHBnVsZP46ZxEMNRQpLQnrdE4dK8LehML1Py8FowF"],"license":"MIT"},"lib/openzeppelin-contracts/contracts/utils/Panic.sol":{"keccak256":"0x29074fe5a74bb024c57b3570abf6c74d8bceed3438694d470fd0166a3ecd196a","urls":["bzz-raw://f4f8435ccbc56e384f4cc9ac9ff491cf30a82f2beac00e33ccc2cf8af3f77cc3","dweb:/ipfs/QmUKJXxTe6nn1qfgnX8xbnboNNAPUuEmJyGqMZCKNiFBgn"],"license":"MIT"},"lib/openzeppelin-contracts/contracts/utils/introspection/ERC165.sol":{"keccak256":"0x6fac27fb1885a1d9fd2ce3f8fac4e44a6596ca4d44207c9ef2541ba8c941291e","urls":["bzz-raw://2079378abdb36baec15c23bc2353b73a3d28d1d0610b436b0c1c4e6fa61d65c9","dweb:/ipfs/QmVZkRFMzKW7sLaugKSTbMNnUBKWF3QDsoMi5uoQFyVMjf"],"license":"MIT"},"lib/openzeppelin-contracts/contracts/utils/introspection/IERC165.sol":{"keccak256":"0xc859863e3bda7ec3cddf6dafe2ffe91bcbe648d1395b856b839c32ee9617c44c","urls":["bzz-raw://a9d5417888b873cf2225ed5d50b2a67be97c1504134a2a580512168d587ad82e","dweb:/ipfs/QmNr5fTb2heFW658NZn7dDnofZgFvQTnNxKRJ3wdnR1skX"],"license":"MIT"},"lib/openzeppelin-contracts/contracts/utils/math/Math.sol":{"keccak256":"0x3233b02fcf2b20a41cce60a62e43c7e5a67a55b738ec1db842a82452e6aa170d","urls":["bzz-raw://231c75d18bc6973533dfa7d58d2b97e504ca4e21d703a5c8b0ec31475e97db67","dweb:/ipfs/QmPJ29HDuFceD1FDr4CnjYYtvaQ234wGAfojZpL3RXFG26"],"license":"MIT"},"lib/openzeppelin-contracts/contracts/utils/math/SafeCast.sol":{"keccak256":"0x8cd59334ed58b8884cd1f775afc9400db702e674e5d6a7a438c655b9de788d7e","urls":["bzz-raw://99e62c7de7318f413b6352e3f2704ca23e7725ff144e43c8bd574d12dbf29047","dweb:/ipfs/QmSEXG2rBx1VxU2uFTWdiChjDvA4osEY2mesjmoVeVhHko"],"license":"MIT"},"src/Passage.sol":{"keccak256":"0x81016c92006558f93c028e3d4f61ddad8ff870b956edaa19ad2ccd68ec5d292a","urls":["bzz-raw://dc70a7d97b18e988ce9560f4fabbf9caea3c6178f64fab056b1cf63d27bef6c5","dweb:/ipfs/QmeJDLqvLdhkbWfyLHdYUPoGz7XHWw3zpe8YTCMQE9MacX"],"license":"UNLICENSED"},"src/Zenith.sol":{"keccak256":"0x0febef21c15ebf62421e25337341a8a11a6dd5b5dc2e9ea967a2d4769469ecd6","urls":["bzz-raw://405a8eb90f834ab216e96d40b8c1cfd98c4bc4e71399b09c04ef4123eb3bb1ab","dweb:/ipfs/QmVakr7Upoe2tgU1jQSZUgXE1UASAuHh9kReZ2mfgCsdha"],"license":"UNLICENSED"}},"version":1},"id":35} \ No newline at end of file diff --git a/examples/exex/rollup/src/db.rs b/examples/exex/rollup/src/db.rs new file mode 100644 index 000000000..201272a00 --- /dev/null +++ b/examples/exex/rollup/src/db.rs @@ -0,0 +1,460 @@ +use std::{ + collections::{hash_map::Entry, HashMap}, + str::FromStr, + sync::{Arc, Mutex, MutexGuard}, +}; + +use reth_primitives::{ + revm_primitives::{AccountInfo, Bytecode}, + Address, Bytes, SealedBlockWithSenders, StorageEntry, B256, U256, +}; +use reth_provider::{bundle_state::StorageRevertsIter, OriginalValuesKnown}; +use reth_revm::db::{ + states::{PlainStorageChangeset, PlainStorageRevert}, + BundleState, +}; +use rusqlite::Connection; + +/// Type used to initialize revms bundle state. +type BundleStateInit = + HashMap, Option, HashMap)>; + +/// Types used inside RevertsInit to initialize revms reverts. +pub type AccountRevertInit = (Option>, Vec); + +/// Type used to initialize revms reverts. +pub type RevertsInit = HashMap; + +pub struct Database { + connection: Arc>, +} + +impl Database { + /// Create new database with the provided connection. + pub fn new(connection: Connection) -> eyre::Result { + let database = Self { connection: Arc::new(Mutex::new(connection)) }; + database.create_tables()?; + Ok(database) + } + + fn connection(&self) -> MutexGuard<'_, Connection> { + self.connection.lock().expect("failed to acquire database lock") + } + + fn create_tables(&self) -> eyre::Result<()> { + self.connection().execute_batch( + "CREATE TABLE IF NOT EXISTS block ( + id INTEGER PRIMARY KEY, + number TEXT UNIQUE, + data TEXT + ); + CREATE TABLE IF NOT EXISTS account ( + id INTEGER PRIMARY KEY, + address TEXT UNIQUE, + data TEXT + ); + CREATE TABLE IF NOT EXISTS account_revert ( + id INTEGER PRIMARY KEY, + block_number TEXT, + address TEXT, + data TEXT, + UNIQUE (block_number, address) + ); + CREATE TABLE IF NOT EXISTS storage ( + id INTEGER PRIMARY KEY, + address TEXT, + key TEXT, + data TEXT, + UNIQUE (address, key) + ); + CREATE TABLE IF NOT EXISTS storage_revert ( + id INTEGER PRIMARY KEY, + block_number TEXT, + address TEXT, + key TEXT, + data TEXT, + UNIQUE (block_number, address, key) + ); + CREATE TABLE IF NOT EXISTS bytecode ( + id INTEGER PRIMARY KEY, + hash TEXT UNIQUE, + data TEXT + );", + )?; + Ok(()) + } + + /// Insert block with bundle into the database. + pub fn insert_block_with_bundle( + &self, + block: &SealedBlockWithSenders, + bundle: BundleState, + ) -> eyre::Result<()> { + let mut connection = self.connection(); + let tx = connection.transaction()?; + + tx.execute( + "INSERT INTO block (number, data) VALUES (?, ?)", + (block.header.number.to_string(), serde_json::to_string(block)?), + )?; + + let (changeset, reverts) = bundle.into_plain_state_and_reverts(OriginalValuesKnown::Yes); + + for (address, account) in changeset.accounts { + if let Some(account) = account { + tx.execute( + "INSERT INTO account (address, data) VALUES (?, ?) ON CONFLICT(address) DO UPDATE SET data = excluded.data", + (address.to_string(), serde_json::to_string(&account)?), + )?; + } else { + tx.execute("DELETE FROM account WHERE address = ?", (address.to_string(),))?; + } + } + + if reverts.accounts.len() > 1 { + eyre::bail!("too many blocks in account reverts"); + } + if let Some(account_reverts) = reverts.accounts.into_iter().next() { + for (address, account) in account_reverts { + tx.execute( + "INSERT INTO account_revert (block_number, address, data) VALUES (?, ?, ?) ON CONFLICT(block_number, address) DO UPDATE SET data = excluded.data", + (block.header.number.to_string(), address.to_string(), serde_json::to_string(&account)?), + )?; + } + } + + for PlainStorageChangeset { address, wipe_storage, storage } in changeset.storage { + if wipe_storage { + tx.execute("DELETE FROM storage WHERE address = ?", (address.to_string(),))?; + } + + for (key, data) in storage { + tx.execute( + "INSERT INTO storage (address, key, data) VALUES (?, ?, ?) ON CONFLICT(address, key) DO UPDATE SET data = excluded.data", + (address.to_string(), B256::from(key).to_string(), data.to_string()), + )?; + } + } + + if reverts.storage.len() > 1 { + eyre::bail!("too many blocks in storage reverts"); + } + if let Some(storage_reverts) = reverts.storage.into_iter().next() { + for PlainStorageRevert { address, wiped, storage_revert } in storage_reverts { + let storage = storage_revert + .into_iter() + .map(|(k, v)| (B256::new(k.to_be_bytes()), v)) + .collect::>(); + let wiped_storage = if wiped { get_storages(&tx, address)? } else { Vec::new() }; + for (key, data) in StorageRevertsIter::new(storage, wiped_storage) { + tx.execute( + "INSERT INTO storage_revert (block_number, address, key, data) VALUES (?, ?, ?, ?) ON CONFLICT(block_number, address, key) DO UPDATE SET data = excluded.data", + (block.header.number.to_string(), address.to_string(), key.to_string(), data.to_string()), + )?; + } + } + } + + for (hash, bytecode) in changeset.contracts { + tx.execute( + "INSERT INTO bytecode (hash, data) VALUES (?, ?) ON CONFLICT(hash) DO NOTHING", + (hash.to_string(), bytecode.bytes().to_string()), + )?; + } + + tx.commit()?; + + Ok(()) + } + + /// Reverts the tip block from the database, checking it against the provided block number. + /// + /// The code is adapted from + pub fn revert_tip_block(&self, block_number: U256) -> eyre::Result<()> { + let mut connection = self.connection(); + let tx = connection.transaction()?; + + let tip_block_number = tx + .query_row::( + "SELECT number FROM block ORDER BY number DESC LIMIT 1", + [], + |row| row.get(0), + ) + .map(|data| U256::from_str(&data))??; + if block_number != tip_block_number { + eyre::bail!("Reverts can only be done from the tip. Attempted to revert block {} with tip block {}", block_number, tip_block_number); + } + + tx.execute("DELETE FROM block WHERE number = ?", (block_number.to_string(),))?; + + let mut state = BundleStateInit::new(); + let mut reverts = RevertsInit::new(); + + let account_reverts = tx + .prepare("SELECT address, data FROM account_revert WHERE block_number = ?")? + .query((block_number.to_string(),))? + .mapped(|row| { + Ok(( + Address::from_str(row.get_ref(0)?.as_str()?), + serde_json::from_str::>(row.get_ref(1)?.as_str()?), + )) + }) + .map(|result| { + let (address, data) = result?; + Ok((address?, data?)) + }) + .collect::>>()?; + + for (address, old_info) in account_reverts { + // insert old info into reverts + reverts.entry(address).or_default().0 = Some(old_info.clone()); + + match state.entry(address) { + Entry::Vacant(entry) => { + let new_info = get_account(&tx, address)?; + entry.insert((old_info, new_info, HashMap::new())); + } + Entry::Occupied(mut entry) => { + // overwrite old account state + entry.get_mut().0 = old_info; + } + } + } + + let storage_reverts = tx + .prepare("SELECT address, key, data FROM storage_revert WHERE block_number = ?")? + .query((block_number.to_string(),))? + .mapped(|row| { + Ok(( + Address::from_str(row.get_ref(0)?.as_str()?), + B256::from_str(row.get_ref(1)?.as_str()?), + U256::from_str(row.get_ref(2)?.as_str()?), + )) + }) + .map(|result| { + let (address, key, data) = result?; + Ok((address?, key?, data?)) + }) + .collect::>>()?; + + for (address, key, old_data) in storage_reverts.into_iter().rev() { + let old_storage = StorageEntry { key, value: old_data }; + + // insert old info into reverts + reverts.entry(address).or_default().1.push(old_storage); + + // get account state or insert from plain state + let account_state = match state.entry(address) { + Entry::Vacant(entry) => { + let present_info = get_account(&tx, address)?; + entry.insert((present_info.clone(), present_info, HashMap::new())) + } + Entry::Occupied(entry) => entry.into_mut(), + }; + + // match storage + match account_state.2.entry(old_storage.key) { + Entry::Vacant(entry) => { + let new_value = get_storage(&tx, address, old_storage.key)?.unwrap_or_default(); + entry.insert((old_storage.value, new_value)); + } + Entry::Occupied(mut entry) => { + entry.get_mut().0 = old_storage.value; + } + }; + } + + // iterate over local plain state remove all account and all storages + for (address, (old_account, new_account, storage)) in state { + // revert account if needed + if old_account != new_account { + if let Some(account) = old_account { + upsert_account(&tx, address, |_| Ok(account))?; + } else { + delete_account(&tx, address)?; + } + } + + // revert storages + for (storage_key, (old_storage_value, _new_storage_value)) in storage { + // delete previous value + delete_storage(&tx, address, storage_key)?; + + // insert value if needed + if !old_storage_value.is_zero() { + upsert_storage(&tx, address, storage_key, old_storage_value)?; + } + } + } + + tx.commit()?; + + Ok(()) + } + + /// Get block by number. + pub fn get_block(&self, number: U256) -> eyre::Result> { + let block = self.connection().query_row::( + "SELECT data FROM block WHERE number = ?", + (number.to_string(),), + |row| row.get(0), + ); + match block { + Ok(data) => Ok(Some(serde_json::from_str(&data)?)), + Err(rusqlite::Error::QueryReturnedNoRows) => Ok(None), + Err(e) => Err(e.into()), + } + } + + /// Insert new account if it does not exist, update otherwise. The provided closure is called + /// with the current account, if it exists. + pub fn upsert_account( + &self, + address: Address, + f: impl FnOnce(Option) -> eyre::Result, + ) -> eyre::Result<()> { + upsert_account(&self.connection(), address, f) + } + + /// Get account by address. + pub fn get_account(&self, address: Address) -> eyre::Result> { + get_account(&self.connection(), address) + } +} + +/// Insert new account if it does not exist, update otherwise. The provided closure is called +/// with the current account, if it exists. Connection can be either +/// [rusqlite::Transaction] or [rusqlite::Connection]. +fn upsert_account( + connection: &Connection, + address: Address, + f: impl FnOnce(Option) -> eyre::Result, +) -> eyre::Result<()> { + let account = get_account(connection, address)?; + let account = f(account)?; + connection.execute( + "INSERT INTO account (address, data) VALUES (?, ?) ON CONFLICT(address) DO UPDATE SET data = excluded.data", + (address.to_string(), serde_json::to_string(&account)?), + )?; + + Ok(()) +} + +/// Delete account by address. Connection can be either [rusqlite::Transaction] or +/// [rusqlite::Connection]. +fn delete_account(connection: &Connection, address: Address) -> eyre::Result<()> { + connection.execute("DELETE FROM account WHERE address = ?", (address.to_string(),))?; + Ok(()) +} + +/// Get account by address using the database connection. Connection can be either +/// [rusqlite::Transaction] or [rusqlite::Connection]. +fn get_account(connection: &Connection, address: Address) -> eyre::Result> { + match connection.query_row::( + "SELECT data FROM account WHERE address = ?", + (address.to_string(),), + |row| row.get(0), + ) { + Ok(account_info) => Ok(Some(serde_json::from_str(&account_info)?)), + Err(rusqlite::Error::QueryReturnedNoRows) => Ok(None), + Err(e) => Err(e.into()), + } +} + +/// Insert new storage if it does not exist, update otherwise. Connection can be either +/// [rusqlite::Transaction] or [rusqlite::Connection]. +fn upsert_storage( + connection: &Connection, + address: Address, + key: B256, + data: U256, +) -> eyre::Result<()> { + connection.execute( + "INSERT INTO storage (address, key, data) VALUES (?, ?, ?) ON CONFLICT(address, key) DO UPDATE SET data = excluded.data", + (address.to_string(), key.to_string(), data.to_string()), + )?; + Ok(()) +} + +/// Delete storage by address and key. Connection can be either [rusqlite::Transaction] or +/// [rusqlite::Connection]. +fn delete_storage(connection: &Connection, address: Address, key: B256) -> eyre::Result<()> { + connection.execute( + "DELETE FROM storage WHERE address = ? AND key = ?", + (address.to_string(), key.to_string()), + )?; + Ok(()) +} + +/// Get all storages for the provided address using the database connection. Connection can be +/// either [rusqlite::Transaction] or [rusqlite::Connection]. +fn get_storages(connection: &Connection, address: Address) -> eyre::Result> { + connection + .prepare("SELECT key, data FROM storage WHERE address = ?")? + .query((address.to_string(),))? + .mapped(|row| { + Ok(( + B256::from_str(row.get_ref(0)?.as_str()?), + U256::from_str(row.get_ref(1)?.as_str()?), + )) + }) + .map(|result| { + let (key, data) = result?; + Ok((key?, data?)) + }) + .collect() +} + +/// Get storage for the provided address by key using the database connection. Connection can be +/// either [rusqlite::Transaction] or [rusqlite::Connection]. +fn get_storage(connection: &Connection, address: Address, key: B256) -> eyre::Result> { + match connection.query_row::( + "SELECT data FROM storage WHERE address = ? AND key = ?", + (address.to_string(), key.to_string()), + |row| row.get(0), + ) { + Ok(data) => Ok(Some(U256::from_str(&data)?)), + Err(rusqlite::Error::QueryReturnedNoRows) => Ok(None), + Err(e) => Err(e.into()), + } +} + +impl reth_revm::Database for Database { + type Error = eyre::Report; + + fn basic(&mut self, address: Address) -> Result, Self::Error> { + self.get_account(address) + } + + fn code_by_hash(&mut self, code_hash: B256) -> Result { + let bytecode = self.connection().query_row::( + "SELECT data FROM bytecode WHERE hash = ?", + (code_hash.to_string(),), + |row| row.get(0), + ); + match bytecode { + Ok(data) => Ok(Bytecode::new_raw(Bytes::from_str(&data).unwrap())), + Err(rusqlite::Error::QueryReturnedNoRows) => Ok(Bytecode::default()), + Err(err) => Err(err.into()), + } + } + + fn storage(&mut self, address: Address, index: U256) -> Result { + get_storage(&self.connection(), address, index.into()).map(|data| data.unwrap_or_default()) + } + + fn block_hash(&mut self, number: U256) -> Result { + let block_hash = self.connection().query_row::( + "SELECT hash FROM block WHERE number = ?", + (number.to_string(),), + |row| row.get(0), + ); + match block_hash { + Ok(data) => Ok(B256::from_str(&data).unwrap()), + // No special handling for `QueryReturnedNoRows` is needed, because revm does block + // number bound checks on its own. + // See https://github.com/bluealloy/revm/blob/1ca3d39f6a9e9778f8eb0fcb74fe529345a531b4/crates/interpreter/src/instructions/host.rs#L106-L123. + Err(err) => Err(err.into()), + } + } +} diff --git a/examples/exex/rollup/src/execution.rs b/examples/exex/rollup/src/execution.rs new file mode 100644 index 000000000..1403833d3 --- /dev/null +++ b/examples/exex/rollup/src/execution.rs @@ -0,0 +1,487 @@ +use crate::{db::Database, RollupContract, CHAIN_ID, CHAIN_SPEC}; +use alloy_consensus::{Blob, SidecarCoder, SimpleCoder}; +use alloy_rlp::Decodable as _; +use eyre::OptionExt; +use reth::transaction_pool::TransactionPool; +use reth_interfaces::executor::BlockValidationError; +use reth_node_api::{ConfigureEvm, ConfigureEvmEnv}; +use reth_node_ethereum::EthEvmConfig; +use reth_primitives::{ + constants, + eip4844::kzg_to_versioned_hash, + keccak256, + revm::env::fill_tx_env, + revm_primitives::{CfgEnvWithHandlerCfg, EVMError, ExecutionResult, ResultAndState}, + Address, Block, BlockWithSenders, Bytes, Hardfork, Header, Receipt, TransactionSigned, TxType, + B256, U256, +}; +use reth_revm::{ + db::{states::bundle_state::BundleRetention, BundleState}, + DBBox, DatabaseCommit, Evm, StateBuilder, StateDBBox, +}; +use reth_tracing::tracing::debug; + +/// Execute a rollup block and return (block with recovered senders)[BlockWithSenders], (bundle +/// state)[BundleState] and list of (receipts)[Receipt]. +pub async fn execute_block( + db: &mut Database, + pool: &Pool, + tx: &TransactionSigned, + header: &RollupContract::BlockHeader, + block_data: Bytes, + block_data_hash: B256, +) -> eyre::Result<(BlockWithSenders, BundleState, Vec, Vec)> { + if header.rollupChainId != U256::from(CHAIN_ID) { + eyre::bail!("Invalid rollup chain ID") + } + + // Construct header + let header = construct_header(db, header)?; + + // Decode transactions + let transactions = decode_transactions(pool, tx, block_data, block_data_hash).await?; + + // Configure EVM + let evm_config = EthEvmConfig::default(); + let mut evm = configure_evm(&evm_config, db, &header); + + // Execute transactions + let (executed_txs, receipts, results) = execute_transactions(&mut evm, &header, transactions)?; + + // Construct block and recover senders + let block = Block { header, body: executed_txs, ..Default::default() } + .with_recovered_senders() + .ok_or_eyre("failed to recover senders")?; + + let bundle = evm.db_mut().take_bundle(); + + Ok((block, bundle, receipts, results)) +} + +/// Construct header from the given rollup header. +fn construct_header(db: &Database, header: &RollupContract::BlockHeader) -> eyre::Result
{ + let parent_block = if !header.sequence.is_zero() { + db.get_block(header.sequence - U256::from(1))? + } else { + None + }; + + let block_number = u64::try_from(header.sequence)?; + + // Calculate base fee per gas for EIP-1559 transactions + let base_fee_per_gas = if CHAIN_SPEC.fork(Hardfork::London).transitions_at_block(block_number) { + constants::EIP1559_INITIAL_BASE_FEE + } else { + parent_block + .as_ref() + .ok_or(eyre::eyre!("parent block not found"))? + .header + .next_block_base_fee(CHAIN_SPEC.base_fee_params_at_block(block_number)) + .ok_or(eyre::eyre!("failed to calculate base fee"))? + }; + + // Construct header + Ok(Header { + parent_hash: parent_block.map(|block| block.header.hash()).unwrap_or_default(), + number: block_number, + gas_limit: u64::try_from(header.gasLimit)?, + timestamp: u64::try_from(header.confirmBy)?, + base_fee_per_gas: Some(base_fee_per_gas), + ..Default::default() + }) +} + +/// Configure EVM with the given database and header. +fn configure_evm<'a>( + config: &'a EthEvmConfig, + db: &'a mut Database, + header: &Header, +) -> Evm<'a, (), StateDBBox<'a, eyre::Report>> { + let mut evm = config.evm( + StateBuilder::new_with_database(Box::new(db) as DBBox<'_, eyre::Report>) + .with_bundle_update() + .build(), + ); + evm.db_mut().set_state_clear_flag( + CHAIN_SPEC.fork(Hardfork::SpuriousDragon).active_at_block(header.number), + ); + + let mut cfg = CfgEnvWithHandlerCfg::new_with_spec_id(evm.cfg().clone(), evm.spec_id()); + EthEvmConfig::fill_cfg_and_block_env( + &mut cfg, + evm.block_mut(), + &CHAIN_SPEC, + header, + U256::ZERO, + ); + *evm.cfg_mut() = cfg.cfg_env; + + evm +} + +/// Decode transactions from the block data and recover senders. +/// - If the transaction is a blob-carrying one, decode the blobs either using the local transaction +/// pool, or querying Blobscan. +/// - If the transaction is a regular one, decode the block data directly. +async fn decode_transactions( + pool: &Pool, + tx: &TransactionSigned, + block_data: Bytes, + block_data_hash: B256, +) -> eyre::Result> { + // Get raw transactions either from the blobs, or directly from the block data + let raw_transactions = if matches!(tx.tx_type(), TxType::Eip4844) { + let blobs: Vec<_> = if let Some(sidecar) = pool.get_blob(tx.hash)? { + // Try to get blobs from the transaction pool + sidecar.blobs.into_iter().zip(sidecar.commitments).collect() + } else { + // If transaction is not found in the pool, try to get blobs from Blobscan + let blobscan_client = foundry_blob_explorers::Client::holesky(); + let sidecar = blobscan_client.transaction(tx.hash).await?.blob_sidecar(); + sidecar + .blobs + .into_iter() + .map(|blob| (*blob).into()) + .zip(sidecar.commitments.into_iter().map(|commitment| (*commitment).into())) + .collect() + }; + + // Decode blob hashes from block data + let blob_hashes = Vec::::decode(&mut block_data.as_ref())?; + + // Filter blobs that are present in the block data + let blobs = blobs + .into_iter() + // Convert blob KZG commitments to versioned hashes + .map(|(blob, commitment)| (blob, kzg_to_versioned_hash(commitment.as_slice()))) + // Filter only blobs that are present in the block data + .filter(|(_, hash)| blob_hashes.contains(hash)) + .map(|(blob, _)| Blob::from(*blob)) + .collect::>(); + if blobs.len() != blob_hashes.len() { + eyre::bail!("some blobs not found") + } + + // Decode blobs and concatenate them to get the raw transactions + let data = SimpleCoder::default() + .decode_all(&blobs) + .ok_or(eyre::eyre!("failed to decode blobs"))? + .concat(); + + data.into() + } else { + block_data + }; + + let raw_transaction_hash = keccak256(&raw_transactions); + if raw_transaction_hash != block_data_hash { + eyre::bail!("block data hash mismatch") + } + + // Decode block data, filter only transactions with the correct chain ID and recover senders + let transactions = Vec::::decode(&mut raw_transactions.as_ref())? + .into_iter() + .filter(|tx| tx.chain_id() == Some(CHAIN_ID)) + .map(|tx| { + let sender = tx.recover_signer().ok_or(eyre::eyre!("failed to recover signer"))?; + Ok((tx, sender)) + }) + .collect::>()?; + + Ok(transactions) +} + +/// Execute transactions and return the list of executed transactions, receipts and +/// execution results. +fn execute_transactions( + evm: &mut Evm<'_, (), StateDBBox<'_, eyre::Report>>, + header: &Header, + transactions: Vec<(TransactionSigned, Address)>, +) -> eyre::Result<(Vec, Vec, Vec)> { + let mut receipts = Vec::with_capacity(transactions.len()); + let mut executed_txs = Vec::with_capacity(transactions.len()); + let mut results = Vec::with_capacity(transactions.len()); + if !transactions.is_empty() { + let mut cumulative_gas_used = 0; + for (transaction, sender) in transactions { + // The sum of the transaction’s gas limit, Tg, and the gas utilized in this block prior, + // must be no greater than the block’s gasLimit. + let block_available_gas = header.gas_limit - cumulative_gas_used; + if transaction.gas_limit() > block_available_gas { + return Err(BlockValidationError::TransactionGasLimitMoreThanAvailableBlockGas { + transaction_gas_limit: transaction.gas_limit(), + block_available_gas, + } + .into()) + } + // Execute transaction. + // Fill revm structure. + fill_tx_env(evm.tx_mut(), &transaction, sender); + + let ResultAndState { result, state } = match evm.transact() { + Ok(result) => result, + Err(err) => { + match err { + EVMError::Transaction(err) => { + // if the transaction is invalid, we can skip it + debug!(%err, ?transaction, "Skipping invalid transaction"); + continue + } + err => { + // this is an error that we should treat as fatal for this attempt + eyre::bail!(err) + } + } + } + }; + + debug!(?transaction, ?result, ?state, "Executed transaction"); + + evm.db_mut().commit(state); + + // append gas used + cumulative_gas_used += result.gas_used(); + + // Push transaction changeset and calculate header bloom filter for receipt. + #[allow(clippy::needless_update)] // side-effect of optimism fields + receipts.push(Receipt { + tx_type: transaction.tx_type(), + success: result.is_success(), + cumulative_gas_used, + logs: result.logs().iter().cloned().map(Into::into).collect(), + ..Default::default() + }); + + // append transaction to the list of executed transactions + executed_txs.push(transaction); + results.push(result); + } + + evm.db_mut().merge_transitions(BundleRetention::Reverts); + } + + Ok((executed_txs, receipts, results)) +} + +#[cfg(test)] +mod tests { + use std::time::{SystemTime, UNIX_EPOCH}; + + use alloy_consensus::{SidecarBuilder, SimpleCoder}; + use alloy_sol_types::{sol, SolCall}; + use reth::transaction_pool::{ + test_utils::{testing_pool, MockTransaction}, + TransactionOrigin, TransactionPool, + }; + use reth_interfaces::test_utils::generators::{self, sign_tx_with_key_pair}; + use reth_primitives::{ + bytes, + constants::ETH_TO_WEI, + keccak256, public_key_to_address, + revm_primitives::{AccountInfo, ExecutionResult, Output, TransactTo, TxEnv}, + BlockNumber, Receipt, SealedBlockWithSenders, Transaction, TxEip2930, TxKind, U256, + }; + use reth_revm::Evm; + use rusqlite::Connection; + use secp256k1::{Keypair, Secp256k1}; + + use crate::{ + db::Database, execute_block, RollupContract::BlockHeader, CHAIN_ID, + ROLLUP_SUBMITTER_ADDRESS, + }; + + sol!( + WETH, + r#" +[ + { + "constant":true, + "inputs":[ + { + "name":"", + "type":"address" + } + ], + "name":"balanceOf", + "outputs":[ + { + "name":"", + "type":"uint256" + } + ], + "payable":false, + "stateMutability":"view", + "type":"function" + } +] + "# + ); + + #[tokio::test] + async fn test_execute_block() -> eyre::Result<()> { + reth_tracing::init_test_tracing(); + + let mut database = Database::new(Connection::open_in_memory()?)?; + + // Create key pair + let secp = Secp256k1::new(); + let key_pair = Keypair::new(&secp, &mut generators::rng()); + let sender_address = public_key_to_address(key_pair.public_key()); + + // Deposit some ETH to the sender and insert it into database + database.upsert_account(sender_address, |_| { + Ok(AccountInfo { balance: U256::from(ETH_TO_WEI), nonce: 1, ..Default::default() }) + })?; + + // WETH deployment transaction sent using calldata + let (_, _, results) = execute_transaction( + &mut database, + key_pair, + 0, + Transaction::Eip2930(TxEip2930 { + chain_id: CHAIN_ID, + nonce: 1, + gas_limit: 1_500_000, + gas_price: 1_500_000_000, + to: TxKind::Create, + // WETH9 bytecode + input: bytes!("60606040526040805190810160405280600d81526020017f57726170706564204574686572000000000000000000000000000000000000008152506000908051906020019061004f9291906100c8565b506040805190810160405280600481526020017f57455448000000000000000000000000000000000000000000000000000000008152506001908051906020019061009b9291906100c8565b506012600260006101000a81548160ff021916908360ff16021790555034156100c357600080fd5b61016d565b828054600181600116156101000203166002900490600052602060002090601f016020900481019282601f1061010957805160ff1916838001178555610137565b82800160010185558215610137579182015b8281111561013657825182559160200191906001019061011b565b5b5090506101449190610148565b5090565b61016a91905b8082111561016657600081600090555060010161014e565b5090565b90565b610c348061017c6000396000f3006060604052600436106100af576000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff16806306fdde03146100b9578063095ea7b31461014757806318160ddd146101a157806323b872dd146101ca5780632e1a7d4d14610243578063313ce5671461026657806370a082311461029557806395d89b41146102e2578063a9059cbb14610370578063d0e30db0146103ca578063dd62ed3e146103d4575b6100b7610440565b005b34156100c457600080fd5b6100cc6104dd565b6040518080602001828103825283818151815260200191508051906020019080838360005b8381101561010c5780820151818401526020810190506100f1565b50505050905090810190601f1680156101395780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b341561015257600080fd5b610187600480803573ffffffffffffffffffffffffffffffffffffffff1690602001909190803590602001909190505061057b565b604051808215151515815260200191505060405180910390f35b34156101ac57600080fd5b6101b461066d565b6040518082815260200191505060405180910390f35b34156101d557600080fd5b610229600480803573ffffffffffffffffffffffffffffffffffffffff1690602001909190803573ffffffffffffffffffffffffffffffffffffffff1690602001909190803590602001909190505061068c565b604051808215151515815260200191505060405180910390f35b341561024e57600080fd5b61026460048080359060200190919050506109d9565b005b341561027157600080fd5b610279610b05565b604051808260ff1660ff16815260200191505060405180910390f35b34156102a057600080fd5b6102cc600480803573ffffffffffffffffffffffffffffffffffffffff16906020019091905050610b18565b6040518082815260200191505060405180910390f35b34156102ed57600080fd5b6102f5610b30565b6040518080602001828103825283818151815260200191508051906020019080838360005b8381101561033557808201518184015260208101905061031a565b50505050905090810190601f1680156103625780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b341561037b57600080fd5b6103b0600480803573ffffffffffffffffffffffffffffffffffffffff16906020019091908035906020019091905050610bce565b604051808215151515815260200191505060405180910390f35b6103d2610440565b005b34156103df57600080fd5b61042a600480803573ffffffffffffffffffffffffffffffffffffffff1690602001909190803573ffffffffffffffffffffffffffffffffffffffff16906020019091905050610be3565b6040518082815260200191505060405180910390f35b34600360003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020600082825401925050819055503373ffffffffffffffffffffffffffffffffffffffff167fe1fffcc4923d04b559f4d29a8bfc6cda04eb5b0d3c460751c2402c5c5cc9109c346040518082815260200191505060405180910390a2565b60008054600181600116156101000203166002900480601f0160208091040260200160405190810160405280929190818152602001828054600181600116156101000203166002900480156105735780601f1061054857610100808354040283529160200191610573565b820191906000526020600020905b81548152906001019060200180831161055657829003601f168201915b505050505081565b600081600460003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060008573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020819055508273ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff167f8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925846040518082815260200191505060405180910390a36001905092915050565b60003073ffffffffffffffffffffffffffffffffffffffff1631905090565b600081600360008673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002054101515156106dc57600080fd5b3373ffffffffffffffffffffffffffffffffffffffff168473ffffffffffffffffffffffffffffffffffffffff16141580156107b457507fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff600460008673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000205414155b156108cf5781600460008673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020541015151561084457600080fd5b81600460008673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020600082825403925050819055505b81600360008673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000206000828254039250508190555081600360008573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020600082825401925050819055508273ffffffffffffffffffffffffffffffffffffffff168473ffffffffffffffffffffffffffffffffffffffff167fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef846040518082815260200191505060405180910390a3600190509392505050565b80600360003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000205410151515610a2757600080fd5b80600360003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020600082825403925050819055503373ffffffffffffffffffffffffffffffffffffffff166108fc829081150290604051600060405180830381858888f193505050501515610ab457600080fd5b3373ffffffffffffffffffffffffffffffffffffffff167f7fcf532c15f0a6db0bd6d0e038bea71d30d808c7d98cb3bf7268a95bf5081b65826040518082815260200191505060405180910390a250565b600260009054906101000a900460ff1681565b60036020528060005260406000206000915090505481565b60018054600181600116156101000203166002900480601f016020809104026020016040519081016040528092919081815260200182805460018160011615610100020316600290048015610bc65780601f10610b9b57610100808354040283529160200191610bc6565b820191906000526020600020905b815481529060010190602001808311610ba957829003601f168201915b505050505081565b6000610bdb33848461068c565b905092915050565b60046020528160005260406000206020528060005260406000206000915091505054815600a165627a7a72305820deb4c2ccab3c2fdca32ab3f46728389c2fe2c165d5fafa07661e4e004f6c344a0029"), + ..Default::default() + }), + BlockDataSource::Calldata + ).await?; + + let weth_address = match results.first() { + Some(ExecutionResult::Success { output: Output::Create(_, Some(address)), .. }) => { + *address + } + _ => eyre::bail!("WETH contract address not found"), + }; + + // WETH deposit transaction sent using blobs + execute_transaction( + &mut database, + key_pair, + 1, + Transaction::Eip2930(TxEip2930 { + chain_id: CHAIN_ID, + nonce: 2, + gas_limit: 50000, + gas_price: 1_500_000_000, + to: TxKind::Call(weth_address), + value: U256::from(0.5 * ETH_TO_WEI as f64), + input: bytes!("d0e30db0"), + ..Default::default() + }), + BlockDataSource::Blobs, + ) + .await?; + + // Verify WETH balance + let mut evm = Evm::builder() + .with_db(&mut database) + .with_tx_env(TxEnv { + caller: sender_address, + gas_limit: 50_000_000, + transact_to: TransactTo::Call(weth_address), + data: WETH::balanceOfCall::new((sender_address,)).abi_encode().into(), + ..Default::default() + }) + .build(); + let result = evm.transact().map_err(|err| eyre::eyre!(err))?.result; + assert_eq!( + result.output(), + Some(&U256::from(0.5 * ETH_TO_WEI as f64).to_be_bytes_vec().into()) + ); + drop(evm); + + // Verify nonce + let account = database.get_account(sender_address)?.unwrap(); + assert_eq!(account.nonce, 3); + + // Revert block with WETH deposit transaction + database.revert_tip_block(U256::from(1))?; + + // Verify WETH balance after revert + let mut evm = Evm::builder() + .with_db(&mut database) + .with_tx_env(TxEnv { + caller: sender_address, + gas_limit: 50_000_000, + transact_to: TransactTo::Call(weth_address), + data: WETH::balanceOfCall::new((sender_address,)).abi_encode().into(), + ..Default::default() + }) + .build(); + let result = evm.transact().map_err(|err| eyre::eyre!(err))?.result; + assert_eq!(result.output(), Some(&U256::ZERO.to_be_bytes_vec().into())); + drop(evm); + + // Verify nonce after revert + let account = database.get_account(sender_address)?.unwrap(); + assert_eq!(account.nonce, 2); + + Ok(()) + } + + enum BlockDataSource { + Calldata, + Blobs, + } + + async fn execute_transaction( + database: &mut Database, + key_pair: Keypair, + sequence: BlockNumber, + tx: Transaction, + block_data_source: BlockDataSource, + ) -> eyre::Result<(SealedBlockWithSenders, Vec, Vec)> { + // Construct block header + let block_header = BlockHeader { + rollupChainId: U256::from(CHAIN_ID), + sequence: U256::from(sequence), + confirmBy: U256::from(SystemTime::now().duration_since(UNIX_EPOCH)?.as_secs()), + gasLimit: U256::from(30_000_000), + rewardAddress: ROLLUP_SUBMITTER_ADDRESS, + }; + let encoded_transactions = + alloy_rlp::encode(vec![sign_tx_with_key_pair(key_pair, tx).envelope_encoded()]); + let block_data_hash = keccak256(&encoded_transactions); + + let pool = testing_pool(); + + let (block_data, l1_transaction) = match block_data_source { + BlockDataSource::Calldata => ( + encoded_transactions, + sign_tx_with_key_pair(key_pair, Transaction::Eip2930(TxEip2930::default())), + ), + BlockDataSource::Blobs => { + let sidecar = + SidecarBuilder::::from_slice(&encoded_transactions).build()?; + let blob_hashes = alloy_rlp::encode(sidecar.versioned_hashes().collect::>()); + + let mut mock_transaction = MockTransaction::eip4844_with_sidecar(sidecar); + let transaction = + sign_tx_with_key_pair(key_pair, Transaction::from(mock_transaction.clone())); + mock_transaction.set_hash(transaction.hash); + pool.add_transaction(TransactionOrigin::Local, mock_transaction).await?; + (blob_hashes, transaction) + } + }; + + // Execute block and insert into database + let (block, bundle, receipts, results) = execute_block( + database, + &pool, + &l1_transaction, + &block_header, + block_data.into(), + block_data_hash, + ) + .await?; + let block = block.seal_slow(); + database.insert_block_with_bundle(&block, bundle)?; + + Ok((block, receipts, results)) + } +} diff --git a/examples/exex/rollup/src/main.rs b/examples/exex/rollup/src/main.rs new file mode 100644 index 000000000..f1af0c1ae --- /dev/null +++ b/examples/exex/rollup/src/main.rs @@ -0,0 +1,277 @@ +//! Example of a simple rollup that derives its state from the L1 chain by executing transactions, +//! processing deposits and storing all related data in an SQLite database. +//! +//! The rollup contract accepts blocks of transactions and deposits of ETH and is deployed on +//! Holesky at [ROLLUP_CONTRACT_ADDRESS], see . + +use alloy_sol_types::{sol, SolEventInterface, SolInterface}; +use db::Database; +use execution::execute_block; +use once_cell::sync::Lazy; +use reth_exex::{ExExContext, ExExEvent}; +use reth_node_api::FullNodeComponents; +use reth_node_ethereum::EthereumNode; +use reth_primitives::{ + address, Address, ChainSpec, ChainSpecBuilder, Genesis, SealedBlockWithSenders, + TransactionSigned, U256, +}; +use reth_provider::Chain; +use reth_tracing::tracing::{error, info}; +use rusqlite::Connection; +use std::sync::Arc; + +mod db; +mod execution; + +sol!(RollupContract, "rollup_abi.json"); +use RollupContract::{RollupContractCalls, RollupContractEvents}; + +const DATABASE_PATH: &str = "rollup.db"; +const ROLLUP_CONTRACT_ADDRESS: Address = address!("97C0E40c6B5bb5d4fa3e2AA1C6b8bC7EA5ECAe31"); +const ROLLUP_SUBMITTER_ADDRESS: Address = address!("5b0517Dc94c413a5871536872605522E54C85a03"); +const CHAIN_ID: u64 = 17001; +static CHAIN_SPEC: Lazy> = Lazy::new(|| { + Arc::new( + ChainSpecBuilder::default() + .chain(CHAIN_ID.into()) + .genesis(Genesis::clique_genesis(CHAIN_ID, ROLLUP_SUBMITTER_ADDRESS)) + .shanghai_activated() + .build(), + ) +}); + +struct Rollup { + ctx: ExExContext, + db: Database, +} + +impl Rollup { + fn new(ctx: ExExContext, connection: Connection) -> eyre::Result { + let db = Database::new(connection)?; + Ok(Self { ctx, db }) + } + + async fn start(mut self) -> eyre::Result<()> { + // Process all new chain state notifications + while let Some(notification) = self.ctx.notifications.recv().await { + if let Some(reverted_chain) = notification.reverted_chain() { + self.revert(&reverted_chain)?; + } + + if let Some(committed_chain) = notification.committed_chain() { + self.commit(&committed_chain).await?; + self.ctx.events.send(ExExEvent::FinishedHeight(committed_chain.tip().number))?; + } + } + + Ok(()) + } + + /// Process a new chain commit. + /// + /// This function decodes all transactions to the rollup contract into events, executes the + /// corresponding actions and inserts the results into the database. + async fn commit(&mut self, chain: &Chain) -> eyre::Result<()> { + let events = decode_chain_into_rollup_events(chain); + + for (_, tx, event) in events { + match event { + // A new block is submitted to the rollup contract. + // The block is executed on top of existing rollup state and committed into the + // database. + RollupContractEvents::BlockSubmitted(RollupContract::BlockSubmitted { + blockDataHash, + .. + }) => { + let call = RollupContractCalls::abi_decode(tx.input(), true)?; + + if let RollupContractCalls::submitBlock(RollupContract::submitBlockCall { + header, + blockData, + .. + }) = call + { + match execute_block( + &mut self.db, + self.ctx.pool(), + tx, + &header, + blockData, + blockDataHash, + ) + .await + { + Ok((block, bundle, _, _)) => { + let block = block.seal_slow(); + self.db.insert_block_with_bundle(&block, bundle)?; + info!( + tx_hash = %tx.recalculate_hash(), + chain_id = %header.rollupChainId, + sequence = %header.sequence, + transactions = block.body.len(), + "Block submitted, executed and inserted into database" + ); + } + Err(err) => { + error!( + %err, + tx_hash = %tx.recalculate_hash(), + chain_id = %header.rollupChainId, + sequence = %header.sequence, + "Failed to execute block" + ); + } + } + } + } + // A deposit of ETH to the rollup contract. The deposit is added to the recipient's + // balance and committed into the database. + RollupContractEvents::Enter(RollupContract::Enter { + rollupChainId, + token, + rollupRecipient, + amount, + }) => { + if rollupChainId != U256::from(CHAIN_ID) { + error!(tx_hash = %tx.recalculate_hash(), "Invalid rollup chain ID"); + continue + } + if token != Address::ZERO { + error!(tx_hash = %tx.recalculate_hash(), "Only ETH deposits are supported"); + continue + } + + self.db.upsert_account(rollupRecipient, |account| { + let mut account = account.unwrap_or_default(); + account.balance += amount; + Ok(account) + })?; + + info!( + tx_hash = %tx.recalculate_hash(), + %amount, + recipient = %rollupRecipient, + "Deposit", + ); + } + _ => (), + } + } + + Ok(()) + } + + /// Process a chain revert. + /// + /// This function decodes all transactions to the rollup contract into events, reverts the + /// corresponding actions and updates the database. + fn revert(&mut self, chain: &Chain) -> eyre::Result<()> { + let mut events = decode_chain_into_rollup_events(chain); + // Reverse the order of events to start reverting from the tip + events.reverse(); + + for (_, tx, event) in events { + match event { + // The block is reverted from the database. + RollupContractEvents::BlockSubmitted(_) => { + let call = RollupContractCalls::abi_decode(tx.input(), true)?; + + if let RollupContractCalls::submitBlock(RollupContract::submitBlockCall { + header, + .. + }) = call + { + self.db.revert_tip_block(header.sequence)?; + info!( + tx_hash = %tx.recalculate_hash(), + chain_id = %header.rollupChainId, + sequence = %header.sequence, + "Block reverted" + ); + } + } + // The deposit is subtracted from the recipient's balance. + RollupContractEvents::Enter(RollupContract::Enter { + rollupChainId, + token, + rollupRecipient, + amount, + }) => { + if rollupChainId != U256::from(CHAIN_ID) { + error!(tx_hash = %tx.recalculate_hash(), "Invalid rollup chain ID"); + continue + } + if token != Address::ZERO { + error!(tx_hash = %tx.recalculate_hash(), "Only ETH deposits are supported"); + continue + } + + self.db.upsert_account(rollupRecipient, |account| { + let mut account = account.ok_or(eyre::eyre!("account not found"))?; + account.balance -= amount; + Ok(account) + })?; + + info!( + tx_hash = %tx.recalculate_hash(), + %amount, + recipient = %rollupRecipient, + "Deposit reverted", + ); + } + _ => (), + } + } + + Ok(()) + } +} + +/// Decode chain of blocks into a flattened list of receipt logs, filter only transactions to the +/// Rollup contract [ROLLUP_CONTRACT_ADDRESS] and extract [RollupContractEvents]. +fn decode_chain_into_rollup_events( + chain: &Chain, +) -> Vec<(&SealedBlockWithSenders, &TransactionSigned, RollupContractEvents)> { + chain + // Get all blocks and receipts + .blocks_and_receipts() + // Get all receipts + .flat_map(|(block, receipts)| { + block + .body + .iter() + .zip(receipts.iter().flatten()) + .map(move |(tx, receipt)| (block, tx, receipt)) + }) + // Get all logs from rollup contract + .flat_map(|(block, tx, receipt)| { + receipt + .logs + .iter() + .filter(|log| log.address == ROLLUP_CONTRACT_ADDRESS) + .map(move |log| (block, tx, log)) + }) + // Decode and filter rollup events + .filter_map(|(block, tx, log)| { + RollupContractEvents::decode_raw_log(log.topics(), &log.data.data, true) + .ok() + .map(|event| (block, tx, event)) + }) + .collect() +} + +fn main() -> eyre::Result<()> { + reth::cli::Cli::parse_args().run(|builder, _| async move { + let handle = builder + .node(EthereumNode::default()) + .install_exex("Rollup", move |ctx| async { + let connection = Connection::open(DATABASE_PATH)?; + + Ok(Rollup::new(ctx, connection)?.start()) + }) + .launch() + .await?; + + handle.wait_for_node_exit().await + }) +} diff --git a/examples/manual-p2p/Cargo.toml b/examples/manual-p2p/Cargo.toml index a9c7f2513..139cb0e18 100644 --- a/examples/manual-p2p/Cargo.toml +++ b/examples/manual-p2p/Cargo.toml @@ -6,14 +6,17 @@ edition.workspace = true license.workspace = true [dependencies] -once_cell.workspace = true -eyre.workspace = true - reth-primitives.workspace = true reth-network.workspace = true reth-discv4.workspace = true reth-eth-wire.workspace = true reth-ecies.workspace = true -futures.workspace = true +reth-network-types.workspace = true + secp256k1 = { workspace = true, features = ["global-context", "rand-std", "recovery"] } + +futures.workspace = true tokio.workspace = true + +eyre.workspace = true +once_cell.workspace = true diff --git a/examples/manual-p2p/src/main.rs b/examples/manual-p2p/src/main.rs index 737daf728..e97cb3662 100644 --- a/examples/manual-p2p/src/main.rs +++ b/examples/manual-p2p/src/main.rs @@ -16,8 +16,9 @@ use reth_eth_wire::{ EthMessage, EthStream, HelloMessage, P2PStream, Status, UnauthedEthStream, UnauthedP2PStream, }; use reth_network::config::rng_secret_key; +use reth_network_types::pk2id; use reth_primitives::{ - mainnet_nodes, pk2id, Chain, Hardfork, Head, NodeRecord, MAINNET, MAINNET_GENESIS_HASH, + mainnet_nodes, Chain, Hardfork, Head, NodeRecord, MAINNET, MAINNET_GENESIS_HASH, }; use secp256k1::{SecretKey, SECP256K1}; use tokio::net::TcpStream; diff --git a/examples/network-txpool/Cargo.toml b/examples/network-txpool/Cargo.toml new file mode 100644 index 000000000..12544a8f3 --- /dev/null +++ b/examples/network-txpool/Cargo.toml @@ -0,0 +1,13 @@ +[package] +name = "network-txpool" +version = "0.0.0" +publish = false +edition.workspace = true +license.workspace = true + +[dependencies] +reth-provider = { workspace = true, features = ["test-utils"] } +eyre.workspace = true +tokio.workspace = true +reth-network.workspace = true +reth-transaction-pool.workspace = true diff --git a/examples/network-txpool.rs b/examples/network-txpool/src/main.rs similarity index 98% rename from examples/network-txpool.rs rename to examples/network-txpool/src/main.rs index 0af120a89..6f8d69eab 100644 --- a/examples/network-txpool.rs +++ b/examples/network-txpool/src/main.rs @@ -4,7 +4,7 @@ //! Run with //! //! ```not_rust -//! cargo run --example network-txpool +//! cargo run --release -p network-txpool -- node //! ``` use reth_network::{config::rng_secret_key, NetworkConfig, NetworkManager}; diff --git a/examples/network/Cargo.toml b/examples/network/Cargo.toml new file mode 100644 index 000000000..b3b740dd8 --- /dev/null +++ b/examples/network/Cargo.toml @@ -0,0 +1,13 @@ +[package] +name = "network" +version = "0.0.0" +publish = false +edition.workspace = true +license.workspace = true + +[dependencies] +reth-network.workspace = true +reth-provider = { workspace = true, features = ["test-utils"] } +futures.workspace = true +tokio.workspace = true +eyre.workspace = true \ No newline at end of file diff --git a/examples/network.rs b/examples/network/src/main.rs similarity index 96% rename from examples/network.rs rename to examples/network/src/main.rs index 18bf5cbcf..16482ca1f 100644 --- a/examples/network.rs +++ b/examples/network/src/main.rs @@ -3,7 +3,7 @@ //! Run with //! //! ```not_rust -//! cargo run --example network +//! cargo run --release -p network //! ``` use futures::StreamExt; diff --git a/examples/additional-rpc-namespace-in-cli/Cargo.toml b/examples/node-custom-rpc/Cargo.toml similarity index 90% rename from examples/additional-rpc-namespace-in-cli/Cargo.toml rename to examples/node-custom-rpc/Cargo.toml index 960dd86d0..f1c5d95d9 100644 --- a/examples/additional-rpc-namespace-in-cli/Cargo.toml +++ b/examples/node-custom-rpc/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "additional-rpc-namespace-in-cli" +name = "node-custom-rpc" version = "0.0.0" publish = false edition.workspace = true diff --git a/examples/additional-rpc-namespace-in-cli/src/main.rs b/examples/node-custom-rpc/src/main.rs similarity index 97% rename from examples/additional-rpc-namespace-in-cli/src/main.rs rename to examples/node-custom-rpc/src/main.rs index a4713f931..08b27d3ac 100644 --- a/examples/additional-rpc-namespace-in-cli/src/main.rs +++ b/examples/node-custom-rpc/src/main.rs @@ -3,7 +3,7 @@ //! Run with //! //! ```not_rust -//! cargo run -p additional-rpc-namespace-in-cli -- node --http --ws --enable-ext +//! cargo run -p node-custom-rpc -- node --http --ws --enable-ext //! ``` //! //! This installs an additional RPC method `txpoolExt_transactionCount` that can be queried via [cast](https://github.com/foundry-rs/foundry) diff --git a/examples/cli-extension-event-hooks/Cargo.toml b/examples/node-event-hooks/Cargo.toml similarity index 82% rename from examples/cli-extension-event-hooks/Cargo.toml rename to examples/node-event-hooks/Cargo.toml index 8664057e7..eb36722aa 100644 --- a/examples/cli-extension-event-hooks/Cargo.toml +++ b/examples/node-event-hooks/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "cli-extension-event-hooks" +name = "node-event-hooks" version = "0.0.0" publish = false edition.workspace = true diff --git a/examples/cli-extension-event-hooks/src/main.rs b/examples/node-event-hooks/src/main.rs similarity index 95% rename from examples/cli-extension-event-hooks/src/main.rs rename to examples/node-event-hooks/src/main.rs index 9f09d7a3c..b9cd53298 100644 --- a/examples/cli-extension-event-hooks/src/main.rs +++ b/examples/node-event-hooks/src/main.rs @@ -4,7 +4,7 @@ //! Run with //! //! ```not_rust -//! cargo run -p cli-extension-event-hooks -- node +//! cargo run -p node-event-hooks -- node //! ``` //! //! This launch the regular reth node and also print: diff --git a/examples/polygon-p2p/src/chain_cfg.rs b/examples/polygon-p2p/src/chain_cfg.rs index 5a1fadb53..5860cdb1d 100644 --- a/examples/polygon-p2p/src/chain_cfg.rs +++ b/examples/polygon-p2p/src/chain_cfg.rs @@ -1,6 +1,5 @@ use reth_primitives::{ - b256, BaseFeeParams, Chain, ChainSpec, ForkCondition, ForkTimestamps, Hardfork, Head, - NodeRecord, B256, + b256, BaseFeeParams, Chain, ChainSpec, ForkCondition, Hardfork, Head, NodeRecord, B256, }; use std::{collections::BTreeMap, sync::Arc}; @@ -15,7 +14,6 @@ pub(crate) fn polygon_chain_spec() -> Arc { // genesis: serde_json::from_str(include_str!("./genesis.json")).expect("deserialize genesis"), genesis_hash: Some(GENESIS), - fork_timestamps: ForkTimestamps::default().shanghai(1681338455), paris_block_and_final_difficulty: None, hardforks: BTreeMap::from([ (Hardfork::Petersburg, ForkCondition::Block(0)), diff --git a/examples/rpc-db/src/main.rs b/examples/rpc-db/src/main.rs index b8286e51b..627da093c 100644 --- a/examples/rpc-db/src/main.rs +++ b/examples/rpc-db/src/main.rs @@ -49,7 +49,7 @@ async fn main() -> eyre::Result<()> { // 2. Setup the blockchain provider using only the database provider and a noop for the tree to // satisfy trait bounds. Tree is not used in this example since we are only operating on the // disk and don't handle new blocks/live sync etc, which is done by the blockchain tree. - let provider = BlockchainProvider::new(factory, NoopBlockchainTree::default())?; + let provider = BlockchainProvider::new(factory, Arc::new(NoopBlockchainTree::default()))?; let rpc_builder = RpcModuleBuilder::default() .with_provider(provider.clone()) diff --git a/examples/trace-transaction-cli/Cargo.toml b/examples/txpool-tracing/Cargo.toml similarity index 88% rename from examples/trace-transaction-cli/Cargo.toml rename to examples/txpool-tracing/Cargo.toml index 3f681c2de..220e5d8d5 100644 --- a/examples/trace-transaction-cli/Cargo.toml +++ b/examples/txpool-tracing/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "trace-transaction-cli" +name = "txpool-tracing" version = "0.0.0" publish = false edition.workspace = true diff --git a/examples/trace-transaction-cli/src/main.rs b/examples/txpool-tracing/src/main.rs similarity index 96% rename from examples/trace-transaction-cli/src/main.rs rename to examples/txpool-tracing/src/main.rs index ab72c2720..85a5b795a 100644 --- a/examples/trace-transaction-cli/src/main.rs +++ b/examples/txpool-tracing/src/main.rs @@ -3,7 +3,7 @@ //! Run with //! //! ```not_rust -//! cargo run --release -p trace-transaction-cli -- node --http --ws --recipients 0x....,0x.... +//! cargo run --release -p txpool-tracing -- node --http --ws --recipients 0x....,0x.... //! ``` //! //! If no recipients are specified, all transactions will be traced. diff --git a/testing/ef-tests/Cargo.toml b/testing/ef-tests/Cargo.toml index 3f2193227..2584c42d6 100644 --- a/testing/ef-tests/Cargo.toml +++ b/testing/ef-tests/Cargo.toml @@ -22,12 +22,12 @@ reth-provider = { workspace = true, features = ["test-utils"] } reth-stages.workspace = true reth-interfaces.workspace = true reth-revm.workspace = true -reth-node-ethereum.workspace = true +reth-evm-ethereum.workspace = true alloy-rlp.workspace = true -tokio = "1.28.1" +tokio.workspace = true walkdir = "2.3.3" -serde = "1.0.163" +serde.workspace = true serde_json.workspace = true thiserror.workspace = true rayon.workspace = true diff --git a/testing/ef-tests/src/cases/blockchain_test.rs b/testing/ef-tests/src/cases/blockchain_test.rs index 424603cb4..27f62f886 100644 --- a/testing/ef-tests/src/cases/blockchain_test.rs +++ b/testing/ef-tests/src/cases/blockchain_test.rs @@ -7,7 +7,6 @@ use crate::{ use alloy_rlp::Decodable; use rayon::iter::{ParallelBridge, ParallelIterator}; use reth_db::test_utils::{create_test_rw_db, create_test_static_files_dir}; -use reth_node_ethereum::EthEvmConfig; use reth_primitives::{BlockBody, SealedBlock, StaticFileSegment}; use reth_provider::{providers::StaticFileWriter, HashingWriter, ProviderFactory}; use reth_stages::{stages::ExecutionStage, ExecInput, Stage}; @@ -136,10 +135,11 @@ impl Case for BlockchainTestCase { // Execute the execution stage using the EVM processor factory for the test case // network. - let _ = ExecutionStage::new_with_factory(reth_revm::EvmProcessorFactory::new( - Arc::new(case.network.clone().into()), - EthEvmConfig::default(), - )) + let _ = ExecutionStage::new_with_executor( + reth_evm_ethereum::execute::EthExecutorProvider::ethereum(Arc::new( + case.network.clone().into(), + )), + ) .execute( &provider, ExecInput { target: last_block.as_ref().map(|b| b.number), checkpoint: None }, diff --git a/testing/testing-utils/Cargo.toml b/testing/testing-utils/Cargo.toml new file mode 100644 index 000000000..97a4c78df --- /dev/null +++ b/testing/testing-utils/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "reth-testing-utils" +description = "Testing utils for reth." +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true + +[lints] +workspace = true + +[dependencies] +secp256k1.workspace = true +alloy-genesis.workspace = true +reth-primitives.workspace = true diff --git a/testing/testing-utils/src/genesis_allocator.rs b/testing/testing-utils/src/genesis_allocator.rs new file mode 100644 index 000000000..067f68343 --- /dev/null +++ b/testing/testing-utils/src/genesis_allocator.rs @@ -0,0 +1,206 @@ +//! Helps create a custom genesis alloc by making it easy to add funded accounts with known +//! signers to the genesis block. + +use alloy_genesis::GenesisAccount; +use reth_primitives::{public_key_to_address, Address, Bytes, B256, U256}; +use secp256k1::{ + rand::{thread_rng, RngCore}, + Keypair, Secp256k1, +}; +use std::{ + collections::{hash_map::Entry, BTreeMap, HashMap}, + fmt, +}; + +/// This helps create a custom genesis alloc by making it easy to add funded accounts with known +/// signers to the genesis block. +/// +/// # Example +/// ``` +/// # use reth_primitives::{Address, U256, hex, Bytes}; +/// # use reth_testing_utils::GenesisAllocator; +/// # use std::str::FromStr; +/// let mut allocator = GenesisAllocator::default(); +/// +/// // This will add a genesis account to the alloc builder, with the provided balance. The +/// // signer for the account will be returned. +/// let (_signer, _addr) = allocator.new_funded_account(U256::from(100_000_000_000_000_000u128)); +/// +/// // You can also provide code for the account. +/// let code = Bytes::from_str("0x1234").unwrap(); +/// let (_second_signer, _second_addr) = +/// allocator.new_funded_account_with_code(U256::from(100_000_000_000_000_000u128), code); +/// +/// // You can also add an account with a specific address. +/// // This will not return a signer, since the address is provided by the user and the signer +/// // may be unknown. +/// let addr = "0Ac1dF02185025F65202660F8167210A80dD5086".parse::
().unwrap(); +/// allocator.add_funded_account_with_address(addr, U256::from(100_000_000_000_000_000u128)); +/// +/// // Once you're done adding accounts, you can build the alloc. +/// let alloc = allocator.build(); +/// ``` +pub struct GenesisAllocator<'a> { + /// The genesis alloc to be built. + alloc: HashMap, + /// The rng to use for generating key pairs. + rng: Box, +} + +impl<'a> GenesisAllocator<'a> { + /// Initialize a new alloc builder with the provided rng. + pub fn new_with_rng(rng: &'a mut R) -> Self + where + R: RngCore, + { + Self { alloc: HashMap::default(), rng: Box::new(rng) } + } + + /// Use the provided rng for generating key pairs. + pub fn with_rng(mut self, rng: &'a mut R) -> Self + where + R: RngCore + std::fmt::Debug, + { + self.rng = Box::new(rng); + self + } + + /// Add a funded account to the genesis alloc. + /// + /// Returns the key pair for the account and the account's address. + pub fn new_funded_account(&mut self, balance: U256) -> (Keypair, Address) { + let secp = Secp256k1::new(); + let pair = Keypair::new(&secp, &mut self.rng); + let address = public_key_to_address(pair.public_key()); + + self.alloc.insert(address, GenesisAccount::default().with_balance(balance)); + + (pair, address) + } + + /// Add a funded account to the genesis alloc with the provided code. + /// + /// Returns the key pair for the account and the account's address. + pub fn new_funded_account_with_code( + &mut self, + balance: U256, + code: Bytes, + ) -> (Keypair, Address) { + let secp = Secp256k1::new(); + let pair = Keypair::new(&secp, &mut self.rng); + let address = public_key_to_address(pair.public_key()); + + self.alloc + .insert(address, GenesisAccount::default().with_balance(balance).with_code(Some(code))); + + (pair, address) + } + + /// Adds a funded account to the genesis alloc with the provided storage. + /// + /// Returns the key pair for the account and the account's address. + pub fn new_funded_account_with_storage( + &mut self, + balance: U256, + storage: BTreeMap, + ) -> (Keypair, Address) { + let secp = Secp256k1::new(); + let pair = Keypair::new(&secp, &mut self.rng); + let address = public_key_to_address(pair.public_key()); + + self.alloc.insert( + address, + GenesisAccount::default().with_balance(balance).with_storage(Some(storage)), + ); + + (pair, address) + } + + /// Adds an account with code and storage to the genesis alloc. + /// + /// Returns the key pair for the account and the account's address. + pub fn new_account_with_code_and_storage( + &mut self, + code: Bytes, + storage: BTreeMap, + ) -> (Keypair, Address) { + let secp = Secp256k1::new(); + let pair = Keypair::new(&secp, &mut self.rng); + let address = public_key_to_address(pair.public_key()); + + self.alloc.insert( + address, + GenesisAccount::default().with_code(Some(code)).with_storage(Some(storage)), + ); + + (pair, address) + } + + /// Adds an account with code to the genesis alloc. + /// + /// Returns the key pair for the account and the account's address. + pub fn new_account_with_code(&mut self, code: Bytes) -> (Keypair, Address) { + let secp = Secp256k1::new(); + let pair = Keypair::new(&secp, &mut self.rng); + let address = public_key_to_address(pair.public_key()); + + self.alloc.insert(address, GenesisAccount::default().with_code(Some(code))); + + (pair, address) + } + + /// Add a funded account to the genesis alloc with the provided address. + /// + /// Neither the key pair nor the account will be returned, since the address is provided by + /// the user and the signer may be unknown. + pub fn add_funded_account_with_address(&mut self, address: Address, balance: U256) { + self.alloc.insert(address, GenesisAccount::default().with_balance(balance)); + } + + /// Adds the given [GenesisAccount] to the genesis alloc. + /// + /// Returns the key pair for the account and the account's address. + pub fn add_account(&mut self, account: GenesisAccount) -> Address { + let secp = Secp256k1::new(); + let pair = Keypair::new(&secp, &mut self.rng); + let address = public_key_to_address(pair.public_key()); + + self.alloc.insert(address, account); + + address + } + + /// Gets the account for the provided address. + /// + /// If it does not exist, this returns `None`. + pub fn get_account(&self, address: &Address) -> Option<&GenesisAccount> { + self.alloc.get(address) + } + + /// Gets a mutable version of the account for the provided address, if it exists. + pub fn get_account_mut(&mut self, address: &Address) -> Option<&mut GenesisAccount> { + self.alloc.get_mut(address) + } + + /// Gets an [Entry] for the provided address. + pub fn account_entry(&mut self, address: Address) -> Entry<'_, Address, GenesisAccount> { + self.alloc.entry(address) + } + + /// Build the genesis alloc. + pub fn build(self) -> HashMap { + self.alloc + } +} + +impl Default for GenesisAllocator<'_> { + fn default() -> Self { + Self { alloc: HashMap::default(), rng: Box::new(thread_rng()) } + } +} + +impl fmt::Debug for GenesisAllocator<'_> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("GenesisAllocator").field("alloc", &self.alloc).finish_non_exhaustive() + } +} diff --git a/testing/testing-utils/src/lib.rs b/testing/testing-utils/src/lib.rs new file mode 100644 index 000000000..27b54b19e --- /dev/null +++ b/testing/testing-utils/src/lib.rs @@ -0,0 +1,13 @@ +//! Testing utilities. + +#![doc( + html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", + html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", + issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" +)] +#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] + +pub mod genesis_allocator; + +pub use genesis_allocator::GenesisAllocator;