diff --git a/.github/actions/setup-kurtosis/action.yml b/.github/actions/setup-kurtosis/action.yml new file mode 100644 index 00000000000..fcea1609f7a --- /dev/null +++ b/.github/actions/setup-kurtosis/action.yml @@ -0,0 +1,74 @@ + +name: "Setup Kurtosis" +description: "Setup Kurtosis CDK for tests" +runs: + using: "composite" + steps: + - name: Checkout cdk-erigon + uses: actions/checkout@v4 + with: + path: cdk-erigon + + - name: Checkout kurtosis-cdk + uses: actions/checkout@v4 + with: + repository: 0xPolygon/kurtosis-cdk + ref: v0.2.24 + path: kurtosis-cdk + + - name: Install Kurtosis CDK tools + uses: ./kurtosis-cdk/.github/actions/setup-kurtosis-cdk + + - name: Install Foundry + uses: foundry-rs/foundry-toolchain@v1 + + - name: Install polycli + shell: bash + run: | + tmp_dir=$(mktemp -d) && curl -L https://github.com/0xPolygon/polygon-cli/releases/download/v0.1.48/polycli_v0.1.48_linux_amd64.tar.gz | tar -xz -C "$tmp_dir" && mv "$tmp_dir"/* /usr/local/bin/polycli && rm -rf "$tmp_dir" + sudo chmod +x /usr/local/bin/polycli + /usr/local/bin/polycli version + + - name: Install yq + shell: bash + run: | + sudo curl -L https://github.com/mikefarah/yq/releases/download/v4.44.2/yq_linux_amd64 -o /usr/local/bin/yq + sudo chmod +x /usr/local/bin/yq + /usr/local/bin/yq --version + + - name: Build docker image + working-directory: ./cdk-erigon + shell: bash + run: docker build -t cdk-erigon:local --file Dockerfile . + + - name: Remove unused flags + working-directory: ./kurtosis-cdk + shell: bash + run: | + sed -i '/zkevm.sequencer-batch-seal-time:/d' templates/cdk-erigon/config.yml + sed -i '/zkevm.sequencer-non-empty-batch-seal-time:/d' templates/cdk-erigon/config.yml + sed -i '/zkevm\.sequencer-initial-fork-id/d' ./templates/cdk-erigon/config.yml + sed -i '/sentry.drop-useless-peers:/d' templates/cdk-erigon/config.yml + sed -i '/zkevm\.pool-manager-url/d' ./templates/cdk-erigon/config.yml + sed -i '$a\zkevm.disable-virtual-counters: true' ./templates/cdk-erigon/config.yml + sed -i '/zkevm.l2-datastreamer-timeout:/d' templates/cdk-erigon/config.yml + + - name: Create params.yml overrides + working-directory: ./kurtosis-cdk + shell: bash + run: | + echo 'args:' > params.yml + echo ' cdk_erigon_node_image: cdk-erigon:local' >> params.yml + echo ' el-1-geth-lighthouse: ethpandaops/lighthouse@sha256:4902d9e4a6b6b8d4c136ea54f0e51582a32f356f3dec7194a1adee13ed2d662e' >> params.yml + /usr/local/bin/yq -i '.args.data_availability_mode = "${{ matrix.da-mode }}"' params.yml + sed -i 's/"londonBlock": [0-9]\+/"londonBlock": 0/' ./templates/cdk-erigon/chainspec.json + sed -i 's/"normalcyBlock": [0-9]\+/"normalcyBlock": 0/' ./templates/cdk-erigon/chainspec.json + sed -i 's/"shanghaiTime": [0-9]\+/"shanghaiTime": 0/' ./templates/cdk-erigon/chainspec.json + sed -i 's/"cancunTime": [0-9]\+/"cancunTime": 0/' ./templates/cdk-erigon/chainspec.json + sed -i '/"terminalTotalDifficulty"/d' ./templates/cdk-erigon/chainspec.json + + - name: Deploy Kurtosis CDK package + working-directory: ./kurtosis-cdk + shell: bash + run: | + kurtosis run --enclave cdk-v1 --args-file params.yml --image-download always . '{"args": {"erigon_strict_mode": false, "cdk_erigon_node_image": "cdk-erigon:local"}}' \ No newline at end of file diff --git a/.github/scripts/test_resequence.sh b/.github/scripts/test_resequence.sh index b36bc878236..d59c780a33c 100755 --- a/.github/scripts/test_resequence.sh +++ b/.github/scripts/test_resequence.sh @@ -50,7 +50,7 @@ wait_for_l1_batch() { current_batch=$(cast logs --rpc-url "$(kurtosis port print cdk-v1 el-1-geth-lighthouse rpc)" --address 0x1Fe038B54aeBf558638CA51C91bC8cCa06609e91 --from-block 0 --json | jq -r '.[] | select(.topics[0] == "0x3e54d0825ed78523037d00a81759237eb436ce774bd546993ee67a1b67b6e766") | .topics[1]' | tail -n 1 | sed 's/^0x//') current_batch=$((16#$current_batch)) elif [ "$batch_type" = "verified" ]; then - current_batch=$(cast rpc zkevm_verifiedBatchNumber --rpc-url "$(kurtosis port print cdk-v1 cdk-erigon-node-001 rpc)" | sed 's/^"//;s/"$//') + current_batch=$(cast rpc zkevm_verifiedBatchNumber --rpc-url "$(kurtosis port print cdk-v1 cdk-erigon-rpc-001 rpc)" | sed 's/^"//;s/"$//') else echo "Invalid batch type. Use 'virtual' or 'verified'." return 1 @@ -121,7 +121,7 @@ kurtosis service exec cdk-v1 cdk-erigon-sequencer-001 "nohup cdk-erigon --pprof= sleep 30 echo "Running loadtest using polycli" -/usr/local/bin/polycli loadtest --rpc-url "$(kurtosis port print cdk-v1 cdk-erigon-node-001 rpc)" --private-key "0x12d7de8621a77640c9241b2595ba78ce443d05e94090365ab3bb5e19df82c625" --verbosity 600 --requests 2000 --rate-limit 500 --mode uniswapv3 --legacy +/usr/local/bin/polycli loadtest --rpc-url "$(kurtosis port print cdk-v1 cdk-erigon-rpc-001 rpc)" --private-key "0x12d7de8621a77640c9241b2595ba78ce443d05e94090365ab3bb5e19df82c625" --verbosity 600 --requests 2000 --rate-limit 500 --mode uniswapv3 --legacy echo "Waiting for batch virtualization" if ! wait_for_l1_batch 600 "virtual"; then @@ -174,13 +174,13 @@ echo "Getting block hash from sequencer" sequencer_hash=$(cast block $comparison_block --rpc-url "$(kurtosis port print cdk-v1 cdk-erigon-sequencer-001 rpc)" | grep "hash" | awk '{print $2}') # wait for block to be available on sync node -if ! wait_for_l2_block_number $comparison_block "$(kurtosis port print cdk-v1 cdk-erigon-node-001 rpc)"; then +if ! wait_for_l2_block_number $comparison_block "$(kurtosis port print cdk-v1 cdk-erigon-rpc-001 rpc)"; then echo "Failed to wait for batch verification" exit 1 fi echo "Getting block hash from node" -node_hash=$(cast block $comparison_block --rpc-url "$(kurtosis port print cdk-v1 cdk-erigon-node-001 rpc)" | grep "hash" | awk '{print $2}') +node_hash=$(cast block $comparison_block --rpc-url "$(kurtosis port print cdk-v1 cdk-erigon-rpc-001 rpc)" | grep "hash" | awk '{print $2}') echo "Sequencer block hash: $sequencer_hash" echo "Node block hash: $node_hash" diff --git a/.github/workflows/ci_zkevm.yml b/.github/workflows/ci_zkevm.yml index f906bf93847..df2a8497e55 100644 --- a/.github/workflows/ci_zkevm.yml +++ b/.github/workflows/ci_zkevm.yml @@ -3,9 +3,14 @@ on: push: branches: - dev + - zkevm + - zkevm-2.60 pull_request: branches: - dev + - zkevm + - zkevm-2.60 + - stable* types: - opened - reopened @@ -17,51 +22,58 @@ concurrency: group: ${{ github.ref }} cancel-in-progress: true +env: + DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }} + DOCKERHUB_TOKEN: ${{ secrets.DOCKERHUB_TOKEN }} + jobs: -# tests: -# strategy: -# matrix: -# os: [ ubuntu-22.04, macos-14-xlarge ] # list of os: https://github.com/actions/virtual-environments -# runs-on: ${{ matrix.os }} -# timeout-minutes: ${{ matrix.os == 'macos-14-xlarge' && 40 || 30 }} -# -# steps: -# - uses: actions/checkout@v3 -# - uses: actions/setup-go@v4 -# with: -# go-version: '1.21' -# - name: Install dependencies on Linux + tests: + if: false + strategy: + max-parallel: 2 + fail-fast: true + matrix: + os: [ ubuntu-22.04, macos-14-xlarge ] # list of os: https://github.com/actions/virtual-environments + runs-on: ${{ matrix.os }} + timeout-minutes: ${{ matrix.os == 'macos-14-xlarge' && 40 || 30 }} + + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-go@v4 + with: + go-version: '1.21' + - name: Install dependencies on Linux + if: runner.os == 'Linux' + run: sudo apt update && sudo apt install build-essential + + - name: Build + run: make BUILD_TAGS=nosqlite,noboltdb,nosilkworm cdk-erigon + + - name: Reproducible build test + run: | + make cdk-erigon + shasum -a256 ./build/bin/cdk-erigon > erigon1.sha256 + make cdk-erigon + shasum -a256 ./build/bin/cdk-erigon > erigon2.sha256 + if ! cmp -s erigon1.sha256 erigon2.sha256; then + echo >&2 "Reproducible build broken"; cat erigon1.sha256; cat erigon2.sha256; exit 1 + fi + +# - name: Lint # if: runner.os == 'Linux' -# run: sudo apt update && sudo apt install build-essential -# -# - name: Build -# run: make BUILD_TAGS=nosqlite,noboltdb,nosilkworm all -# -# - name: Reproducible build test -# run: | -# make cdk-erigon -# shasum -a256 ./build/bin/cdk-erigon > erigon1.sha256 -# make cdk-erigon -# shasum -a256 ./build/bin/cdk-erigon > erigon2.sha256 -# if ! cmp -s erigon1.sha256 erigon2.sha256; then -# echo >&2 "Reproducible build broken"; cat erigon1.sha256; cat erigon2.sha256; exit 1 -# fi -# -## - name: Lint -## if: runner.os == 'Linux' -## uses: golangci/golangci-lint-action@v4 -## with: -## version: v1.54 -# -# - name: Test -# run: make test -# -# - name: SonarCloud Scan -# uses: SonarSource/sonarcloud-github-action@master -# if: ${{ matrix.os == 'ubuntu-20.04' }} -# env: -# GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} # Needed to get PR information, if any -# SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }} +# uses: golangci/golangci-lint-action@v4 +# with: +# version: v1.54 + + - name: Test + run: make test + + - name: SonarCloud Scan + uses: SonarSource/sonarcloud-github-action@master + if: ${{ matrix.os == 'ubuntu-20.04' }} + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} # Needed to get PR information, if any + SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }} kurtosis-cdk: if: true @@ -72,49 +84,9 @@ jobs: steps: - name: Checkout cdk-erigon uses: actions/checkout@v4 - with: - path: cdk-erigon - - - name: Checkout kurtosis-cdk - uses: actions/checkout@v4 - with: - repository: 0xPolygon/kurtosis-cdk - ref: v0.2.12 - path: kurtosis-cdk - - - name: Install Kurtosis CDK tools - uses: ./kurtosis-cdk/.github/actions/setup-kurtosis-cdk + - name: Setup kurtosis + uses: ./.github/actions/setup-kurtosis - - name: Install Foundry - uses: foundry-rs/foundry-toolchain@v1 - - - name: Install yq - run: | - sudo curl -L https://github.com/mikefarah/yq/releases/download/v4.44.2/yq_linux_amd64 -o /usr/local/bin/yq - sudo chmod +x /usr/local/bin/yq - /usr/local/bin/yq --version - - - name: Build docker image - working-directory: ./cdk-erigon - run: docker build -t cdk-erigon:local --file Dockerfile . - - - name: Remove unused flags - working-directory: ./kurtosis-cdk - run: | - sed -i '/zkevm.sequencer-batch-seal-time:/d' templates/cdk-erigon/config.yml - sed -i '/zkevm.sequencer-non-empty-batch-seal-time:/d' templates/cdk-erigon/config.yml - sed -i '/sentry.drop-useless-peers:/d' templates/cdk-erigon/config.yml - sed -i '/zkevm.l2-datastreamer-timeout:/d' templates/cdk-erigon/config.yml - - name: Configure Kurtosis CDK - working-directory: ./kurtosis-cdk - run: | - /usr/local/bin/yq -i '.args.data_availability_mode = "${{ matrix.da-mode }}"' params.yml - /usr/local/bin/yq -i '.args.cdk_erigon_node_image = "cdk-erigon:local"' params.yml - - - name: Deploy Kurtosis CDK package - working-directory: ./kurtosis-cdk - run: | - kurtosis run --enclave cdk-v1 --image-download always . '{"args": {"data_availability_mode": "${{ matrix.da-mode }}", "cdk_erigon_node_image": "cdk-erigon:local"}}' - name: Run process with CPU monitoring working-directory: ./cdk-erigon @@ -136,7 +108,7 @@ jobs: - name: Monitor verified batches working-directory: ./kurtosis-cdk shell: bash - run: timeout 900s .github/scripts/monitor-verified-batches.sh --rpc-url $(kurtosis port print cdk-v1 cdk-erigon-node-001 rpc) --target 20 --timeout 900 + run: timeout 900s .github/scripts/monitor-verified-batches.sh --enclave zdk-v1 --rpc-url $(kurtosis port print cdk-v1 cdk-erigon-rpc-001 rpc) --target 20 --timeout 900 - name: Set up Docker Buildx uses: docker/setup-buildx-action@v2 @@ -146,9 +118,8 @@ jobs: kurtosis files download cdk-v1 bridge-config-artifact echo "BRIDGE_ADDRESS=$(/usr/local/bin/yq '.NetworkConfig.PolygonBridgeAddress' bridge-config-artifact/bridge-config.toml)" >> $GITHUB_ENV echo "ETH_RPC_URL=$(kurtosis port print cdk-v1 el-1-geth-lighthouse rpc)" >> $GITHUB_ENV - echo "L2_RPC_URL=$(kurtosis port print cdk-v1 cdk-erigon-node-001 rpc)" >> $GITHUB_ENV echo "BRIDGE_API_URL=$(kurtosis port print cdk-v1 zkevm-bridge-service-001 rpc)" >> $GITHUB_ENV - + echo "L2_RPC_URL=$(kurtosis port print cdk-v1 cdk-erigon-rpc-001 rpc)" >> $GITHUB_ENV - name: Clone bridge repository run: git clone --recurse-submodules -j8 https://github.com/0xPolygonHermez/zkevm-bridge-service.git -b develop bridge @@ -186,7 +157,7 @@ jobs: run: | mkdir -p ci_logs cd ci_logs - kurtosis service logs cdk-v1 cdk-erigon-node-001 --all > cdk-erigon-node-001.log + kurtosis service logs cdk-v1 cdk-erigon-rpc-001 --all > cdk-erigon-rpc-001.log kurtosis service logs cdk-v1 cdk-erigon-sequencer-001 --all > cdk-erigon-sequencer-001.log kurtosis service logs cdk-v1 zkevm-agglayer-001 --all > zkevm-agglayer-001.log kurtosis service logs cdk-v1 zkevm-prover-001 --all > zkevm-prover-001.log @@ -211,62 +182,12 @@ jobs: - name: Checkout kurtosis-cdk uses: actions/checkout@v4 - with: - repository: 0xPolygon/kurtosis-cdk - ref: v0.2.12 - path: kurtosis-cdk - - - name: Install Kurtosis CDK tools - uses: ./kurtosis-cdk/.github/actions/setup-kurtosis-cdk - - - name: Install Foundry - uses: foundry-rs/foundry-toolchain@v1 + - name: Setup kurtosis + uses: ./.github/actions/setup-kurtosis - - name: Install yq - run: | - sudo curl -L https://github.com/mikefarah/yq/releases/download/v4.44.2/yq_linux_amd64 -o /usr/local/bin/yq - sudo chmod +x /usr/local/bin/yq - /usr/local/bin/yq --version - - - name: Install polycli - run: | - tmp_dir=$(mktemp -d) && curl -L https://github.com/0xPolygon/polygon-cli/releases/download/v0.1.48/polycli_v0.1.48_linux_amd64.tar.gz | tar -xz -C "$tmp_dir" && mv "$tmp_dir"/* /usr/local/bin/polycli && rm -rf "$tmp_dir" - sudo chmod +x /usr/local/bin/polycli - /usr/local/bin/polycli version - - - name: Build docker image - working-directory: ./cdk-erigon - run: docker build -t cdk-erigon:local --file Dockerfile . - - - name: Modify cdk-erigon flags - working-directory: ./kurtosis-cdk - run: | - sed -i '/zkevm.sequencer-batch-seal-time:/d' templates/cdk-erigon/config.yml - sed -i '/zkevm.sequencer-non-empty-batch-seal-time:/d' templates/cdk-erigon/config.yml - sed -i '/zkevm\.sequencer-initial-fork-id/d' ./templates/cdk-erigon/config.yml - sed -i '/sentry.drop-useless-peers:/d' templates/cdk-erigon/config.yml - sed -i '/zkevm\.pool-manager-url/d' ./templates/cdk-erigon/config.yml - sed -i '$a\zkevm.disable-virtual-counters: true' ./templates/cdk-erigon/config.yml - sed -i '/zkevm.l2-datastreamer-timeout:/d' templates/cdk-erigon/config.yml - - - - name: Configure Kurtosis CDK - working-directory: ./kurtosis-cdk - run: | - sed -i 's/"londonBlock": [0-9]\+/"londonBlock": 0/' ./templates/cdk-erigon/chainspec.json - sed -i 's/"normalcyBlock": [0-9]\+/"normalcyBlock": 0/' ./templates/cdk-erigon/chainspec.json - sed -i 's/"shanghaiTime": [0-9]\+/"shanghaiTime": 0/' ./templates/cdk-erigon/chainspec.json - sed -i 's/"cancunTime": [0-9]\+/"cancunTime": 0/' ./templates/cdk-erigon/chainspec.json - sed -i '/"terminalTotalDifficulty"/d' ./templates/cdk-erigon/chainspec.json - - - name: Deploy Kurtosis CDK package - working-directory: ./kurtosis-cdk - run: | - kurtosis run --enclave cdk-v1 --image-download always . '{"args": {"erigon_strict_mode": false, "cdk_erigon_node_image": "cdk-erigon:local"}}' - - name: Dynamic gas fee tx load test working-directory: ./kurtosis-cdk - run: /usr/local/bin/polycli loadtest --rpc-url "$(kurtosis port print cdk-v1 cdk-erigon-node-001 rpc)" --private-key "0x12d7de8621a77640c9241b2595ba78ce443d05e94090365ab3bb5e19df82c625" --verbosity 700 --requests 500 --rate-limit 50 --mode uniswapv3 + run: /usr/local/bin/polycli loadtest --rpc-url "$(kurtosis port print cdk-v1 cdk-erigon-rpc-001 rpc)" --private-key "0x12d7de8621a77640c9241b2595ba78ce443d05e94090365ab3bb5e19df82c625" --verbosity 700 --requests 500 --rate-limit 50 --mode uniswapv3 --legacy - name: Upload logs uses: actions/upload-artifact@v3 @@ -280,7 +201,7 @@ jobs: run: | mkdir -p ci_logs cd ci_logs - kurtosis service logs cdk-v1 cdk-erigon-node-001 --all > cdk-erigon-node-001.log + kurtosis service logs cdk-v1 cdk-erigon-rpc-001 --all > cdk-erigon-rpc-001.log kurtosis service logs cdk-v1 cdk-erigon-sequencer-001 --all > cdk-erigon-sequencer-001.log - name: Upload logs diff --git a/.github/workflows/doc-rpc.yml b/.github/workflows/doc-rpc.yml index 2a298409112..1323977f4da 100644 --- a/.github/workflows/doc-rpc.yml +++ b/.github/workflows/doc-rpc.yml @@ -2,12 +2,12 @@ name: RPC endpoint doc on: push: branches: + - dev - zkevm - - stable* pull_request: branches: + - dev - zkevm - - stable* types: - opened - reopened diff --git a/.github/workflows/test-resequence.yml b/.github/workflows/test-resequence.yml index c1949caaf6d..cc58f71d5d9 100644 --- a/.github/workflows/test-resequence.yml +++ b/.github/workflows/test-resequence.yml @@ -2,69 +2,30 @@ name: Resequence test on: push: branches: + - dev - zkevm - - stable* pull_request: branches: + - dev - zkevm - - stable* types: - opened - reopened - synchronize - ready_for_review +env: + DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }} + DOCKERHUB_TOKEN: ${{ secrets.DOCKERHUB_TOKEN }} + jobs: resequence-test: runs-on: ubuntu-latest steps: - name: Checkout cdk-erigon uses: actions/checkout@v4 - with: - path: cdk-erigon - - - name: Checkout kurtosis-cdk - uses: actions/checkout@v4 - with: - repository: 0xPolygon/kurtosis-cdk - ref: v0.2.12 - path: kurtosis-cdk - - - name: Install Kurtosis CDK tools - uses: ./kurtosis-cdk/.github/actions/setup-kurtosis-cdk - - - name: Install Foundry - uses: foundry-rs/foundry-toolchain@v1 - - - name: Install yq - run: | - sudo curl -L https://github.com/mikefarah/yq/releases/download/v4.44.2/yq_linux_amd64 -o /usr/local/bin/yq - sudo chmod +x /usr/local/bin/yq - /usr/local/bin/yq --version - - name: Install polycli - run: | - tmp_dir=$(mktemp -d) && curl -L https://github.com/0xPolygon/polygon-cli/releases/download/v0.1.48/polycli_v0.1.48_linux_amd64.tar.gz | tar -xz -C "$tmp_dir" && mv "$tmp_dir"/* /usr/local/bin/polycli && rm -rf "$tmp_dir" - sudo chmod +x /usr/local/bin/polycli - /usr/local/bin/polycli version - - name: Build docker image - working-directory: ./cdk-erigon - run: docker build -t cdk-erigon:local --file Dockerfile . - - - name: Remove unused flags - working-directory: ./kurtosis-cdk - run: | - sed -i '/zkevm.sequencer-batch-seal-time:/d' templates/cdk-erigon/config.yml - sed -i '/zkevm.sequencer-non-empty-batch-seal-time:/d' templates/cdk-erigon/config.yml - sed -i '/sentry.drop-useless-peers:/d' templates/cdk-erigon/config.yml - sed -i '/zkevm.pool-manager-url/d' templates/cdk-erigon/config.yml - sed -i '/zkevm.l2-datastreamer-timeout:/d' templates/cdk-erigon/config.yml - - name: Configure Kurtosis CDK - working-directory: ./kurtosis-cdk - run: | - /usr/local/bin/yq -i '.args.cdk_erigon_node_image = "cdk-erigon:local"' params.yml - - name: Deploy Kurtosis CDK package - working-directory: ./kurtosis-cdk - run: kurtosis run --enclave cdk-v1 --args-file params.yml --image-download always . + - name: Setup kurtosis + uses: ./.github/actions/setup-kurtosis - name: Test resequence working-directory: ./cdk-erigon @@ -82,7 +43,7 @@ jobs: run: | mkdir -p ci_logs cd ci_logs - kurtosis service logs cdk-v1 cdk-erigon-node-001 --all > cdk-erigon-node-001.log + kurtosis service logs cdk-v1 cdk-erigon-rpc-001 --all > cdk-erigon-rpc-001.log kurtosis service logs cdk-v1 cdk-erigon-sequencer-001 --all > cdk-erigon-sequencer-001.log kurtosis service logs cdk-v1 zkevm-agglayer-001 --all > zkevm-agglayer-001.log kurtosis service logs cdk-v1 zkevm-prover-001 --all > zkevm-prover-001.log diff --git a/.github/workflows/test-unwinds.yml b/.github/workflows/test-unwinds.yml index 926498e0211..14575ef47d3 100644 --- a/.github/workflows/test-unwinds.yml +++ b/.github/workflows/test-unwinds.yml @@ -2,9 +2,9 @@ name: Unwinds tests on: pull_request: branches: + - dev - zkevm - zkevm-2.60 - - stable* workflow_dispatch: jobs: @@ -23,7 +23,7 @@ jobs: - name: Install dependencies on Linux if: runner.os == 'Linux' - run: sudo apt update && sudo apt install build-essential + run: sudo apt update && sudo apt install -y build-essential - name: Build run: | diff --git a/.gitignore b/.gitignore index afb082b0f5e..fbec92a780b 100644 --- a/.gitignore +++ b/.gitignore @@ -119,3 +119,6 @@ xlayerconfig-mainnet.yaml xlayerconfig-testnet.yaml **/cover.out **/cover.html + +datadir +zk/tests/unwinds/datastream/hermez-dynamic-integration8-datastream diff --git a/Dockerfile.lite b/Dockerfile.lite new file mode 100644 index 00000000000..47a2d261888 --- /dev/null +++ b/Dockerfile.lite @@ -0,0 +1,71 @@ +# syntax = docker/dockerfile:1.2 +FROM docker.io/library/golang:1.21-alpine3.17 AS builder + +RUN apk --no-cache add build-base linux-headers git bash ca-certificates libstdc++ + +WORKDIR /app +ADD go.mod go.mod +ADD go.sum go.sum +ADD erigon-lib/go.mod erigon-lib/go.mod +ADD erigon-lib/go.sum erigon-lib/go.sum + +RUN go mod download +ADD . . + +RUN --mount=type=cache,target=/root/.cache \ + --mount=type=cache,target=/tmp/go-build \ + --mount=type=cache,target=/go/pkg/mod \ + make BUILD_TAGS=nosqlite,noboltdb,nosilkworm cdk-erigon + +FROM docker.io/library/alpine:3.17 + +# install required runtime libs, along with some helpers for debugging +RUN apk add --no-cache ca-certificates libstdc++ tzdata +RUN apk add --no-cache curl jq bind-tools + +# Setup user and group +# +# from the perspective of the container, uid=1000, gid=1000 is a sensible choice +# (mimicking Ubuntu Server), but if caller creates a .env (example in repo root), +# these defaults will get overridden when make calls docker-compose +ARG UID=1000 +ARG GID=1000 +RUN adduser -D -u $UID -g $GID erigon +USER erigon +RUN mkdir -p ~/.local/share/erigon +WORKDIR /home/erigon + +## add pre-defined run configs +ADD hermezconfig-mainnet.yaml.example mainnet.yaml +ADD hermezconfig-cardona.yaml.example cardona.yaml +ADD hermezconfig-bali.yaml.example bali.yaml + +## then give each binary its own layer +COPY --from=builder /app/build/bin/cdk-erigon /usr/local/bin/cdk-erigon + +EXPOSE 8545 \ + 8551 \ + 8546 \ + 30303 \ + 30303/udp \ + 42069 \ + 42069/udp \ + 8080 \ + 9090 \ + 6060 + +# https://github.com/opencontainers/image-spec/blob/main/annotations.md +ARG BUILD_DATE +ARG VCS_REF +ARG VERSION +LABEL org.label-schema.build-date=$BUILD_DATE \ + org.label-schema.description="Erigon ZKEVM Client" \ + org.label-schema.name="ZKEVM Erigon" \ + org.label-schema.schema-version="1.0" \ + org.label-schema.url="https://torquem.ch" \ + org.label-schema.vcs-ref=$VCS_REF \ + org.label-schema.vcs-url="https://github.com/0xPolygonHermez/cdk-erigon.git" \ + org.label-schema.vendor="Torquem" \ + org.label-schema.version=$VERSION + +ENTRYPOINT ["cdk-erigon"] diff --git a/Makefile b/Makefile index 69ed8bb762a..8faad91deac 100644 --- a/Makefile +++ b/Makefile @@ -159,6 +159,13 @@ db-tools: rm -rf vendor @echo "Run \"$(GOBIN)/mdbx_stat -h\" to get info about mdbx db file." + +## test-unwind: run the unwind tests +test-unwind: + make cdk-erigon + ./zk/tests/unwinds/unwind.sh + + test-erigon-lib: @cd erigon-lib && $(MAKE) test diff --git a/cmd/rpcdaemon/README.md b/cmd/rpcdaemon/README.md index 4629b79180b..fe5e786e7aa 100644 --- a/cmd/rpcdaemon/README.md +++ b/cmd/rpcdaemon/README.md @@ -338,6 +338,7 @@ The following table shows the current implementation status of Erigon's RPC daem | debug_traceTransaction | Yes | Streaming (can handle huge results) | | debug_traceCall | Yes | Streaming (can handle huge results) | | debug_traceCallMany | Yes | Erigon Method PR#4567. | +| debug_traceBatchByNumber | Yes | Streaming (can handle huge results) | | | | | | trace_call | Yes | | | trace_callMany | Yes | | diff --git a/cmd/rpcdaemon/commands/mocks/l1_syncer_mock.go b/cmd/rpcdaemon/commands/mocks/l1_syncer_mock.go deleted file mode 100644 index 86825d378f3..00000000000 --- a/cmd/rpcdaemon/commands/mocks/l1_syncer_mock.go +++ /dev/null @@ -1,151 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: /home/rr/Documentos/Iden3/cdk-erigon/zk/syncer/l1_syncer.go -// -// Generated by this command: -// -// mockgen -source /home/rr/Documentos/Iden3/cdk-erigon/zk/syncer/l1_syncer.go -destination /home/rr/Documentos/Iden3/cdk-erigon/cmd/rpcdaemon/commands/mock/l1_syncer_mock.go -package=mocks -// - -// Package mocks is a generated GoMock package. -package mocks - -import ( - context "context" - big "math/big" - reflect "reflect" - - common "github.com/ledgerwatch/erigon-lib/common" - ethereum "github.com/ledgerwatch/erigon" - types "github.com/ledgerwatch/erigon/core/types" - gomock "go.uber.org/mock/gomock" -) - -// MockIEtherman is a mock of IEtherman interface. -type MockIEtherman struct { - ctrl *gomock.Controller - recorder *MockIEthermanMockRecorder -} - -// MockIEthermanMockRecorder is the mock recorder for MockIEtherman. -type MockIEthermanMockRecorder struct { - mock *MockIEtherman -} - -// NewMockIEtherman creates a new mock instance. -func NewMockIEtherman(ctrl *gomock.Controller) *MockIEtherman { - mock := &MockIEtherman{ctrl: ctrl} - mock.recorder = &MockIEthermanMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockIEtherman) EXPECT() *MockIEthermanMockRecorder { - return m.recorder -} - -// BlockByNumber mocks base method. -func (m *MockIEtherman) BlockByNumber(ctx context.Context, blockNumber *big.Int) (*types.Block, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "BlockByNumber", ctx, blockNumber) - ret0, _ := ret[0].(*types.Block) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// BlockByNumber indicates an expected call of BlockByNumber. -func (mr *MockIEthermanMockRecorder) BlockByNumber(ctx, blockNumber any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BlockByNumber", reflect.TypeOf((*MockIEtherman)(nil).BlockByNumber), ctx, blockNumber) -} - -// CallContract mocks base method. -func (m *MockIEtherman) CallContract(ctx context.Context, msg ethereum.CallMsg, blockNumber *big.Int) ([]byte, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CallContract", ctx, msg, blockNumber) - ret0, _ := ret[0].([]byte) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// CallContract indicates an expected call of CallContract. -func (mr *MockIEthermanMockRecorder) CallContract(ctx, msg, blockNumber any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CallContract", reflect.TypeOf((*MockIEtherman)(nil).CallContract), ctx, msg, blockNumber) -} - -// CallContract indicates an expected call of CallContract. -func (m *MockIEtherman) StorageAt(ctx context.Context, contract common.Address, key common.Hash, blockNumber *big.Int) ([]byte, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "StorageAt", ctx, contract, key, blockNumber) - ret0, _ := ret[0].([]byte) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// CallContract indicates an expected call of CallContract. -func (mr *MockIEthermanMockRecorder) StorageAt(ctx, contract, key, blockNumber any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StorageAt", reflect.TypeOf((*MockIEtherman)(nil).StorageAt), ctx, contract, key, blockNumber) -} - - -// FilterLogs mocks base method. -func (m *MockIEtherman) FilterLogs(ctx context.Context, query ethereum.FilterQuery) ([]types.Log, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "FilterLogs", ctx, query) - ret0, _ := ret[0].([]types.Log) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// FilterLogs indicates an expected call of FilterLogs. -func (mr *MockIEthermanMockRecorder) FilterLogs(ctx, query any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FilterLogs", reflect.TypeOf((*MockIEtherman)(nil).FilterLogs), ctx, query) -} - -// HeaderByNumber mocks base method. -func (m *MockIEtherman) HeaderByNumber(ctx context.Context, blockNumber *big.Int) (*types.Header, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "HeaderByNumber", ctx, blockNumber) - ret0, _ := ret[0].(*types.Header) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// HeaderByNumber indicates an expected call of HeaderByNumber. -func (mr *MockIEthermanMockRecorder) HeaderByNumber(ctx, blockNumber any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HeaderByNumber", reflect.TypeOf((*MockIEtherman)(nil).HeaderByNumber), ctx, blockNumber) -} - -// TransactionByHash mocks base method. -func (m *MockIEtherman) TransactionByHash(ctx context.Context, hash common.Hash) (types.Transaction, bool, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "TransactionByHash", ctx, hash) - ret0, _ := ret[0].(types.Transaction) - ret1, _ := ret[1].(bool) - ret2, _ := ret[2].(error) - return ret0, ret1, ret2 -} - -// TransactionByHash indicates an expected call of TransactionByHash. -func (mr *MockIEthermanMockRecorder) TransactionByHash(ctx, hash any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TransactionByHash", reflect.TypeOf((*MockIEtherman)(nil).TransactionByHash), ctx, hash) -} - -// TransactionReceipt mocks base method. -func (m *MockIEtherman) TransactionReceipt(ctx context.Context, txHash common.Hash) (*types.Receipt, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "TransactionReceipt", ctx, txHash) - ret0, _ := ret[0].(*types.Receipt) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// TransactionReceipt indicates an expected call of TransactionReceipt. -func (mr *MockIEthermanMockRecorder) TransactionReceipt(ctx, txHash any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TransactionReceipt", reflect.TypeOf((*MockIEtherman)(nil).TransactionReceipt), ctx, txHash) -} diff --git a/cmd/txpool/main.go b/cmd/txpool/main.go index 83315c3c7f9..82c31c7435c 100644 --- a/cmd/txpool/main.go +++ b/cmd/txpool/main.go @@ -59,7 +59,6 @@ var ( noTxGossip bool - commitEvery time.Duration // For X Layer enableWhiteList bool @@ -71,6 +70,10 @@ var ( freeGasExAddrs []string freeGasCountPerAddr uint64 freeGasLimit uint64 + + commitEvery time.Duration + purgeEvery time.Duration + purgeDistance time.Duration ) func init() { @@ -96,6 +99,8 @@ func init() { rootCmd.PersistentFlags().Uint64Var(&priceBump, "txpool.pricebump", txpoolcfg.DefaultConfig.PriceBump, "Price bump percentage to replace an already existing transaction") rootCmd.PersistentFlags().Uint64Var(&blobPriceBump, "txpool.blobpricebump", txpoolcfg.DefaultConfig.BlobPriceBump, "Price bump percentage to replace an existing blob (type-3) transaction") rootCmd.PersistentFlags().DurationVar(&commitEvery, utils.TxPoolCommitEveryFlag.Name, utils.TxPoolCommitEveryFlag.Value, utils.TxPoolCommitEveryFlag.Usage) + rootCmd.PersistentFlags().DurationVar(&purgeEvery, utils.TxpoolPurgeEveryFlag.Name, utils.TxpoolPurgeEveryFlag.Value, utils.TxpoolPurgeEveryFlag.Usage) + rootCmd.PersistentFlags().DurationVar(&purgeDistance, utils.TxpoolPurgeDistanceFlag.Name, utils.TxpoolPurgeDistanceFlag.Value, utils.TxpoolPurgeDistanceFlag.Usage) rootCmd.PersistentFlags().BoolVar(&noTxGossip, utils.TxPoolGossipDisableFlag.Name, utils.TxPoolGossipDisableFlag.Value, utils.TxPoolGossipDisableFlag.Usage) rootCmd.Flags().StringSliceVar(&traceSenders, utils.TxPoolTraceSendersFlag.Name, []string{}, utils.TxPoolTraceSendersFlag.Usage) // For X Layer @@ -165,6 +170,8 @@ func doTxpool(ctx context.Context, logger log.Logger) error { cfg.DBDir = dirs.TxPool cfg.CommitEvery = common2.RandomizeDuration(commitEvery) + cfg.PurgeEvery = common2.RandomizeDuration(purgeEvery) + cfg.PurgeDistance = purgeDistance cfg.PendingSubPoolLimit = pendingPoolLimit cfg.BaseFeeSubPoolLimit = baseFeePoolLimit cfg.QueuedSubPoolLimit = queuedPoolLimit diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index e99fb7cdbfb..f4cb4aaa88f 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -237,6 +237,16 @@ var ( Usage: "How often transactions should be committed to the storage", Value: txpoolcfg.DefaultConfig.CommitEvery, } + TxpoolPurgeEveryFlag = cli.DurationFlag{ + Name: "txpool.purge.every", + Usage: "How often transactions should be purged from the storage", + Value: txpoolcfg.DefaultConfig.PurgeEvery, + } + TxpoolPurgeDistanceFlag = cli.DurationFlag{ + Name: "txpool.purge.distance", + Usage: "Transactions older than this distance will be purged", + Value: txpoolcfg.DefaultConfig.PurgeDistance, + } // Miner settings MiningEnabledFlag = cli.BoolFlag{ Name: "mine", @@ -763,6 +773,16 @@ var ( Usage: "Mock the witness generation", Value: false, } + WitnessCacheEnable = cli.BoolFlag{ + Name: "zkevm.witness-cache-enable", + Usage: "Enable witness cache", + Value: false, + } + WitnessCacheLimit = cli.UintFlag{ + Name: "zkevm.witness-cache-limit", + Usage: "Amount of blocks behind the last executed one to keep witnesses for. Needs a lot of HDD space. Default value 10 000.", + Value: 10000, + } WitnessContractInclusion = cli.StringFlag{ Name: "zkevm.witness-contract-inclusion", Usage: "Contracts that will have all of their storage added to the witness every time", @@ -1925,6 +1945,12 @@ func setTxPool(ctx *cli.Context, fullCfg *ethconfig.Config) { // For X Layer setTxPoolXLayer(ctx, cfg) + + purgeEvery := ctx.Duration(TxpoolPurgeEveryFlag.Name) + purgeDistance := ctx.Duration(TxpoolPurgeDistanceFlag.Name) + + fullCfg.TxPool.PurgeEvery = common2.RandomizeDuration(purgeEvery) + fullCfg.TxPool.PurgeDistance = purgeDistance } func setEthash(ctx *cli.Context, datadir string, cfg *ethconfig.Config) { diff --git a/consensus/consensus.go b/consensus/consensus.go index 334b7f85a1f..7e8a6e4d5cf 100644 --- a/consensus/consensus.go +++ b/consensus/consensus.go @@ -104,6 +104,8 @@ type Reward struct { } // Engine is an algorithm agnostic consensus engine. +// +//go:generate mockgen -typed=true -destination=./engine_mock.go -package=consensus . Engine type Engine interface { EngineReader EngineWriter diff --git a/consensus/engine_mock.go b/consensus/engine_mock.go new file mode 100644 index 00000000000..0043ae89875 --- /dev/null +++ b/consensus/engine_mock.go @@ -0,0 +1,659 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/ledgerwatch/erigon/consensus (interfaces: Engine) +// +// Generated by this command: +// +// mockgen -typed=true -destination=./engine_mock.go -package=consensus . Engine +// + +// Package consensus is a generated GoMock package. +package consensus + +import ( + big "math/big" + reflect "reflect" + + chain "github.com/ledgerwatch/erigon-lib/chain" + common "github.com/ledgerwatch/erigon-lib/common" + state "github.com/ledgerwatch/erigon/core/state" + types "github.com/ledgerwatch/erigon/core/types" + rpc "github.com/ledgerwatch/erigon/rpc" + log "github.com/ledgerwatch/log/v3" + gomock "go.uber.org/mock/gomock" +) + +// MockEngine is a mock of Engine interface. +type MockEngine struct { + ctrl *gomock.Controller + recorder *MockEngineMockRecorder +} + +// MockEngineMockRecorder is the mock recorder for MockEngine. +type MockEngineMockRecorder struct { + mock *MockEngine +} + +// NewMockEngine creates a new mock instance. +func NewMockEngine(ctrl *gomock.Controller) *MockEngine { + mock := &MockEngine{ctrl: ctrl} + mock.recorder = &MockEngineMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockEngine) EXPECT() *MockEngineMockRecorder { + return m.recorder +} + +// APIs mocks base method. +func (m *MockEngine) APIs(arg0 ChainHeaderReader) []rpc.API { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "APIs", arg0) + ret0, _ := ret[0].([]rpc.API) + return ret0 +} + +// APIs indicates an expected call of APIs. +func (mr *MockEngineMockRecorder) APIs(arg0 any) *MockEngineAPIsCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "APIs", reflect.TypeOf((*MockEngine)(nil).APIs), arg0) + return &MockEngineAPIsCall{Call: call} +} + +// MockEngineAPIsCall wrap *gomock.Call +type MockEngineAPIsCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockEngineAPIsCall) Return(arg0 []rpc.API) *MockEngineAPIsCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockEngineAPIsCall) Do(f func(ChainHeaderReader) []rpc.API) *MockEngineAPIsCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockEngineAPIsCall) DoAndReturn(f func(ChainHeaderReader) []rpc.API) *MockEngineAPIsCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// Author mocks base method. +func (m *MockEngine) Author(arg0 *types.Header) (common.Address, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Author", arg0) + ret0, _ := ret[0].(common.Address) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Author indicates an expected call of Author. +func (mr *MockEngineMockRecorder) Author(arg0 any) *MockEngineAuthorCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Author", reflect.TypeOf((*MockEngine)(nil).Author), arg0) + return &MockEngineAuthorCall{Call: call} +} + +// MockEngineAuthorCall wrap *gomock.Call +type MockEngineAuthorCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockEngineAuthorCall) Return(arg0 common.Address, arg1 error) *MockEngineAuthorCall { + c.Call = c.Call.Return(arg0, arg1) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockEngineAuthorCall) Do(f func(*types.Header) (common.Address, error)) *MockEngineAuthorCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockEngineAuthorCall) DoAndReturn(f func(*types.Header) (common.Address, error)) *MockEngineAuthorCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// CalcDifficulty mocks base method. +func (m *MockEngine) CalcDifficulty(arg0 ChainHeaderReader, arg1, arg2 uint64, arg3 *big.Int, arg4 uint64, arg5, arg6 common.Hash, arg7 uint64) *big.Int { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CalcDifficulty", arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7) + ret0, _ := ret[0].(*big.Int) + return ret0 +} + +// CalcDifficulty indicates an expected call of CalcDifficulty. +func (mr *MockEngineMockRecorder) CalcDifficulty(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7 any) *MockEngineCalcDifficultyCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CalcDifficulty", reflect.TypeOf((*MockEngine)(nil).CalcDifficulty), arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7) + return &MockEngineCalcDifficultyCall{Call: call} +} + +// MockEngineCalcDifficultyCall wrap *gomock.Call +type MockEngineCalcDifficultyCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockEngineCalcDifficultyCall) Return(arg0 *big.Int) *MockEngineCalcDifficultyCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockEngineCalcDifficultyCall) Do(f func(ChainHeaderReader, uint64, uint64, *big.Int, uint64, common.Hash, common.Hash, uint64) *big.Int) *MockEngineCalcDifficultyCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockEngineCalcDifficultyCall) DoAndReturn(f func(ChainHeaderReader, uint64, uint64, *big.Int, uint64, common.Hash, common.Hash, uint64) *big.Int) *MockEngineCalcDifficultyCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// CalculateRewards mocks base method. +func (m *MockEngine) CalculateRewards(arg0 *chain.Config, arg1 *types.Header, arg2 []*types.Header, arg3 SystemCall) ([]Reward, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CalculateRewards", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].([]Reward) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CalculateRewards indicates an expected call of CalculateRewards. +func (mr *MockEngineMockRecorder) CalculateRewards(arg0, arg1, arg2, arg3 any) *MockEngineCalculateRewardsCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CalculateRewards", reflect.TypeOf((*MockEngine)(nil).CalculateRewards), arg0, arg1, arg2, arg3) + return &MockEngineCalculateRewardsCall{Call: call} +} + +// MockEngineCalculateRewardsCall wrap *gomock.Call +type MockEngineCalculateRewardsCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockEngineCalculateRewardsCall) Return(arg0 []Reward, arg1 error) *MockEngineCalculateRewardsCall { + c.Call = c.Call.Return(arg0, arg1) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockEngineCalculateRewardsCall) Do(f func(*chain.Config, *types.Header, []*types.Header, SystemCall) ([]Reward, error)) *MockEngineCalculateRewardsCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockEngineCalculateRewardsCall) DoAndReturn(f func(*chain.Config, *types.Header, []*types.Header, SystemCall) ([]Reward, error)) *MockEngineCalculateRewardsCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// Close mocks base method. +func (m *MockEngine) Close() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Close") + ret0, _ := ret[0].(error) + return ret0 +} + +// Close indicates an expected call of Close. +func (mr *MockEngineMockRecorder) Close() *MockEngineCloseCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockEngine)(nil).Close)) + return &MockEngineCloseCall{Call: call} +} + +// MockEngineCloseCall wrap *gomock.Call +type MockEngineCloseCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockEngineCloseCall) Return(arg0 error) *MockEngineCloseCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockEngineCloseCall) Do(f func() error) *MockEngineCloseCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockEngineCloseCall) DoAndReturn(f func() error) *MockEngineCloseCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// Finalize mocks base method. +func (m *MockEngine) Finalize(arg0 *chain.Config, arg1 *types.Header, arg2 *state.IntraBlockState, arg3 types.Transactions, arg4 []*types.Header, arg5 types.Receipts, arg6 []*types.Withdrawal, arg7 ChainReader, arg8 SystemCall, arg9 log.Logger) (types.Transactions, types.Receipts, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Finalize", arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9) + ret0, _ := ret[0].(types.Transactions) + ret1, _ := ret[1].(types.Receipts) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 +} + +// Finalize indicates an expected call of Finalize. +func (mr *MockEngineMockRecorder) Finalize(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9 any) *MockEngineFinalizeCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Finalize", reflect.TypeOf((*MockEngine)(nil).Finalize), arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9) + return &MockEngineFinalizeCall{Call: call} +} + +// MockEngineFinalizeCall wrap *gomock.Call +type MockEngineFinalizeCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockEngineFinalizeCall) Return(arg0 types.Transactions, arg1 types.Receipts, arg2 error) *MockEngineFinalizeCall { + c.Call = c.Call.Return(arg0, arg1, arg2) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockEngineFinalizeCall) Do(f func(*chain.Config, *types.Header, *state.IntraBlockState, types.Transactions, []*types.Header, types.Receipts, []*types.Withdrawal, ChainReader, SystemCall, log.Logger) (types.Transactions, types.Receipts, error)) *MockEngineFinalizeCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockEngineFinalizeCall) DoAndReturn(f func(*chain.Config, *types.Header, *state.IntraBlockState, types.Transactions, []*types.Header, types.Receipts, []*types.Withdrawal, ChainReader, SystemCall, log.Logger) (types.Transactions, types.Receipts, error)) *MockEngineFinalizeCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// FinalizeAndAssemble mocks base method. +func (m *MockEngine) FinalizeAndAssemble(arg0 *chain.Config, arg1 *types.Header, arg2 *state.IntraBlockState, arg3 types.Transactions, arg4 []*types.Header, arg5 types.Receipts, arg6 []*types.Withdrawal, arg7 ChainReader, arg8 SystemCall, arg9 Call, arg10 log.Logger) (*types.Block, types.Transactions, types.Receipts, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "FinalizeAndAssemble", arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10) + ret0, _ := ret[0].(*types.Block) + ret1, _ := ret[1].(types.Transactions) + ret2, _ := ret[2].(types.Receipts) + ret3, _ := ret[3].(error) + return ret0, ret1, ret2, ret3 +} + +// FinalizeAndAssemble indicates an expected call of FinalizeAndAssemble. +func (mr *MockEngineMockRecorder) FinalizeAndAssemble(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10 any) *MockEngineFinalizeAndAssembleCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FinalizeAndAssemble", reflect.TypeOf((*MockEngine)(nil).FinalizeAndAssemble), arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10) + return &MockEngineFinalizeAndAssembleCall{Call: call} +} + +// MockEngineFinalizeAndAssembleCall wrap *gomock.Call +type MockEngineFinalizeAndAssembleCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockEngineFinalizeAndAssembleCall) Return(arg0 *types.Block, arg1 types.Transactions, arg2 types.Receipts, arg3 error) *MockEngineFinalizeAndAssembleCall { + c.Call = c.Call.Return(arg0, arg1, arg2, arg3) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockEngineFinalizeAndAssembleCall) Do(f func(*chain.Config, *types.Header, *state.IntraBlockState, types.Transactions, []*types.Header, types.Receipts, []*types.Withdrawal, ChainReader, SystemCall, Call, log.Logger) (*types.Block, types.Transactions, types.Receipts, error)) *MockEngineFinalizeAndAssembleCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockEngineFinalizeAndAssembleCall) DoAndReturn(f func(*chain.Config, *types.Header, *state.IntraBlockState, types.Transactions, []*types.Header, types.Receipts, []*types.Withdrawal, ChainReader, SystemCall, Call, log.Logger) (*types.Block, types.Transactions, types.Receipts, error)) *MockEngineFinalizeAndAssembleCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// GenerateSeal mocks base method. +func (m *MockEngine) GenerateSeal(arg0 ChainHeaderReader, arg1, arg2 *types.Header, arg3 Call) []byte { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GenerateSeal", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].([]byte) + return ret0 +} + +// GenerateSeal indicates an expected call of GenerateSeal. +func (mr *MockEngineMockRecorder) GenerateSeal(arg0, arg1, arg2, arg3 any) *MockEngineGenerateSealCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GenerateSeal", reflect.TypeOf((*MockEngine)(nil).GenerateSeal), arg0, arg1, arg2, arg3) + return &MockEngineGenerateSealCall{Call: call} +} + +// MockEngineGenerateSealCall wrap *gomock.Call +type MockEngineGenerateSealCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockEngineGenerateSealCall) Return(arg0 []byte) *MockEngineGenerateSealCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockEngineGenerateSealCall) Do(f func(ChainHeaderReader, *types.Header, *types.Header, Call) []byte) *MockEngineGenerateSealCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockEngineGenerateSealCall) DoAndReturn(f func(ChainHeaderReader, *types.Header, *types.Header, Call) []byte) *MockEngineGenerateSealCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// Initialize mocks base method. +func (m *MockEngine) Initialize(arg0 *chain.Config, arg1 ChainHeaderReader, arg2 *types.Header, arg3 *state.IntraBlockState, arg4 SysCallCustom, arg5 log.Logger) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Initialize", arg0, arg1, arg2, arg3, arg4, arg5) +} + +// Initialize indicates an expected call of Initialize. +func (mr *MockEngineMockRecorder) Initialize(arg0, arg1, arg2, arg3, arg4, arg5 any) *MockEngineInitializeCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Initialize", reflect.TypeOf((*MockEngine)(nil).Initialize), arg0, arg1, arg2, arg3, arg4, arg5) + return &MockEngineInitializeCall{Call: call} +} + +// MockEngineInitializeCall wrap *gomock.Call +type MockEngineInitializeCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockEngineInitializeCall) Return() *MockEngineInitializeCall { + c.Call = c.Call.Return() + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockEngineInitializeCall) Do(f func(*chain.Config, ChainHeaderReader, *types.Header, *state.IntraBlockState, SysCallCustom, log.Logger)) *MockEngineInitializeCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockEngineInitializeCall) DoAndReturn(f func(*chain.Config, ChainHeaderReader, *types.Header, *state.IntraBlockState, SysCallCustom, log.Logger)) *MockEngineInitializeCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// IsServiceTransaction mocks base method. +func (m *MockEngine) IsServiceTransaction(arg0 common.Address, arg1 SystemCall) bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "IsServiceTransaction", arg0, arg1) + ret0, _ := ret[0].(bool) + return ret0 +} + +// IsServiceTransaction indicates an expected call of IsServiceTransaction. +func (mr *MockEngineMockRecorder) IsServiceTransaction(arg0, arg1 any) *MockEngineIsServiceTransactionCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsServiceTransaction", reflect.TypeOf((*MockEngine)(nil).IsServiceTransaction), arg0, arg1) + return &MockEngineIsServiceTransactionCall{Call: call} +} + +// MockEngineIsServiceTransactionCall wrap *gomock.Call +type MockEngineIsServiceTransactionCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockEngineIsServiceTransactionCall) Return(arg0 bool) *MockEngineIsServiceTransactionCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockEngineIsServiceTransactionCall) Do(f func(common.Address, SystemCall) bool) *MockEngineIsServiceTransactionCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockEngineIsServiceTransactionCall) DoAndReturn(f func(common.Address, SystemCall) bool) *MockEngineIsServiceTransactionCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// Prepare mocks base method. +func (m *MockEngine) Prepare(arg0 ChainHeaderReader, arg1 *types.Header, arg2 *state.IntraBlockState) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Prepare", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// Prepare indicates an expected call of Prepare. +func (mr *MockEngineMockRecorder) Prepare(arg0, arg1, arg2 any) *MockEnginePrepareCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Prepare", reflect.TypeOf((*MockEngine)(nil).Prepare), arg0, arg1, arg2) + return &MockEnginePrepareCall{Call: call} +} + +// MockEnginePrepareCall wrap *gomock.Call +type MockEnginePrepareCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockEnginePrepareCall) Return(arg0 error) *MockEnginePrepareCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockEnginePrepareCall) Do(f func(ChainHeaderReader, *types.Header, *state.IntraBlockState) error) *MockEnginePrepareCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockEnginePrepareCall) DoAndReturn(f func(ChainHeaderReader, *types.Header, *state.IntraBlockState) error) *MockEnginePrepareCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// Seal mocks base method. +func (m *MockEngine) Seal(arg0 ChainHeaderReader, arg1 *types.Block, arg2 chan<- *types.Block, arg3 <-chan struct{}) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Seal", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(error) + return ret0 +} + +// Seal indicates an expected call of Seal. +func (mr *MockEngineMockRecorder) Seal(arg0, arg1, arg2, arg3 any) *MockEngineSealCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Seal", reflect.TypeOf((*MockEngine)(nil).Seal), arg0, arg1, arg2, arg3) + return &MockEngineSealCall{Call: call} +} + +// MockEngineSealCall wrap *gomock.Call +type MockEngineSealCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockEngineSealCall) Return(arg0 error) *MockEngineSealCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockEngineSealCall) Do(f func(ChainHeaderReader, *types.Block, chan<- *types.Block, <-chan struct{}) error) *MockEngineSealCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockEngineSealCall) DoAndReturn(f func(ChainHeaderReader, *types.Block, chan<- *types.Block, <-chan struct{}) error) *MockEngineSealCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// SealHash mocks base method. +func (m *MockEngine) SealHash(arg0 *types.Header) common.Hash { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SealHash", arg0) + ret0, _ := ret[0].(common.Hash) + return ret0 +} + +// SealHash indicates an expected call of SealHash. +func (mr *MockEngineMockRecorder) SealHash(arg0 any) *MockEngineSealHashCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SealHash", reflect.TypeOf((*MockEngine)(nil).SealHash), arg0) + return &MockEngineSealHashCall{Call: call} +} + +// MockEngineSealHashCall wrap *gomock.Call +type MockEngineSealHashCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockEngineSealHashCall) Return(arg0 common.Hash) *MockEngineSealHashCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockEngineSealHashCall) Do(f func(*types.Header) common.Hash) *MockEngineSealHashCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockEngineSealHashCall) DoAndReturn(f func(*types.Header) common.Hash) *MockEngineSealHashCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// Type mocks base method. +func (m *MockEngine) Type() chain.ConsensusName { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Type") + ret0, _ := ret[0].(chain.ConsensusName) + return ret0 +} + +// Type indicates an expected call of Type. +func (mr *MockEngineMockRecorder) Type() *MockEngineTypeCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Type", reflect.TypeOf((*MockEngine)(nil).Type)) + return &MockEngineTypeCall{Call: call} +} + +// MockEngineTypeCall wrap *gomock.Call +type MockEngineTypeCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockEngineTypeCall) Return(arg0 chain.ConsensusName) *MockEngineTypeCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockEngineTypeCall) Do(f func() chain.ConsensusName) *MockEngineTypeCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockEngineTypeCall) DoAndReturn(f func() chain.ConsensusName) *MockEngineTypeCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// VerifyHeader mocks base method. +func (m *MockEngine) VerifyHeader(arg0 ChainHeaderReader, arg1 *types.Header, arg2 bool) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "VerifyHeader", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// VerifyHeader indicates an expected call of VerifyHeader. +func (mr *MockEngineMockRecorder) VerifyHeader(arg0, arg1, arg2 any) *MockEngineVerifyHeaderCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "VerifyHeader", reflect.TypeOf((*MockEngine)(nil).VerifyHeader), arg0, arg1, arg2) + return &MockEngineVerifyHeaderCall{Call: call} +} + +// MockEngineVerifyHeaderCall wrap *gomock.Call +type MockEngineVerifyHeaderCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockEngineVerifyHeaderCall) Return(arg0 error) *MockEngineVerifyHeaderCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockEngineVerifyHeaderCall) Do(f func(ChainHeaderReader, *types.Header, bool) error) *MockEngineVerifyHeaderCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockEngineVerifyHeaderCall) DoAndReturn(f func(ChainHeaderReader, *types.Header, bool) error) *MockEngineVerifyHeaderCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// VerifyUncles mocks base method. +func (m *MockEngine) VerifyUncles(arg0 ChainReader, arg1 *types.Header, arg2 []*types.Header) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "VerifyUncles", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// VerifyUncles indicates an expected call of VerifyUncles. +func (mr *MockEngineMockRecorder) VerifyUncles(arg0, arg1, arg2 any) *MockEngineVerifyUnclesCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "VerifyUncles", reflect.TypeOf((*MockEngine)(nil).VerifyUncles), arg0, arg1, arg2) + return &MockEngineVerifyUnclesCall{Call: call} +} + +// MockEngineVerifyUnclesCall wrap *gomock.Call +type MockEngineVerifyUnclesCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockEngineVerifyUnclesCall) Return(arg0 error) *MockEngineVerifyUnclesCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockEngineVerifyUnclesCall) Do(f func(ChainReader, *types.Header, []*types.Header) error) *MockEngineVerifyUnclesCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockEngineVerifyUnclesCall) DoAndReturn(f func(ChainReader, *types.Header, []*types.Header) error) *MockEngineVerifyUnclesCall { + c.Call = c.Call.DoAndReturn(f) + return c +} diff --git a/core/blockchain_zkevm.go b/core/blockchain_zkevm.go index ab972b7b2c6..3d49a32637b 100644 --- a/core/blockchain_zkevm.go +++ b/core/blockchain_zkevm.go @@ -87,7 +87,7 @@ func ExecuteBlockEphemerallyZk( blockContext, _, ger, l1Blockhash, err := PrepareBlockTxExecution(chainConfig, vmConfig, blockHashFunc, nil, engine, chainReader, block, ibs, roHermezDb, blockGasLimit) if err != nil { - return nil, err + return nil, fmt.Errorf("PrepareBlockTxExecution: %w", err) } blockNum := block.NumberU64() @@ -98,22 +98,28 @@ func ExecuteBlockEphemerallyZk( ibs.SetTxContext(tx.Hash(), block.Hash(), txIndex) writeTrace := false if vmConfig.Debug && vmConfig.Tracer == nil { - tracer, err := getTracer(txIndex, tx.Hash()) - if err != nil { - return nil, fmt.Errorf("could not obtain tracer: %w", err) + if vmConfig.Tracer, err = getTracer(txIndex, tx.Hash()); err != nil { + return nil, fmt.Errorf("getTracer: %w", err) } - vmConfig.Tracer = tracer writeTrace = true } txHash := tx.Hash() evm, effectiveGasPricePercentage, err := PrepareForTxExecution(chainConfig, vmConfig, blockContext, roHermezDb, ibs, block, &txHash, txIndex) if err != nil { - return nil, err + return nil, fmt.Errorf("PrepareForTxExecution: %w", err) } receipt, execResult, innerTxs, err := ApplyTransaction_zkevm(chainConfig, engine, evm, gp, ibs, state.NewNoopWriter(), header, tx, usedGas, effectiveGasPricePercentage, true) if err != nil { - return nil, err + if !vmConfig.StatelessExec { + return nil, fmt.Errorf("ApplyTransaction_zkevm tx %d from block %d [%v]: %w", txIndex, block.NumberU64(), tx.Hash().Hex(), err) + } + rejectedTxs = append(rejectedTxs, &RejectedTx{txIndex, err.Error()}) + } else { + includedTxs = append(includedTxs, tx) + if !vmConfig.NoReceipts { + receipts = append(receipts, receipt) + } } if writeTrace { if ftracer, ok := vmConfig.Tracer.(vm.FlushableTracer); ok { @@ -125,36 +131,25 @@ func ExecuteBlockEphemerallyZk( localReceipt := CreateReceiptForBlockInfoTree(receipt, chainConfig, blockNum, execResult) if err = ProcessReceiptForBlockExecution(receipt, roHermezDb, chainConfig, blockNum, header, tx); err != nil { - return nil, err + return nil, fmt.Errorf("ProcessReceiptForBlockExecution: %w", err) } - if err != nil { - if !vmConfig.StatelessExec { - return nil, fmt.Errorf("could not apply tx %d from block %d [%v]: %w", txIndex, block.NumberU64(), tx.Hash().Hex(), err) - } - rejectedTxs = append(rejectedTxs, &RejectedTx{txIndex, err.Error()}) - } else { - includedTxs = append(includedTxs, tx) - if !vmConfig.NoReceipts { - receipts = append(receipts, receipt) - } - // For X Layer - if !vmConfig.NoInnerTxs { - blockInnerTxs = append(blockInnerTxs, innerTxs) - } + // For X Layer + if !vmConfig.NoInnerTxs { + blockInnerTxs = append(blockInnerTxs, innerTxs) } + if !chainConfig.IsForkID7Etrog(block.NumberU64()) && !chainConfig.IsNormalcy(block.NumberU64()) { if err := ibs.ScalableSetSmtRootHash(roHermezDb); err != nil { - return nil, err + return nil, fmt.Errorf("ScalableSetSmtRootHash: %w", err) } } txSender, ok := tx.GetSender() if !ok { signer := types.MakeSigner(chainConfig, blockNum, block.Time()) - txSender, err = tx.Sender(*signer) - if err != nil { - return nil, err + if txSender, err = tx.Sender(*signer); err != nil { + return nil, fmt.Errorf("tx.Sender: %w", err) } } @@ -168,7 +163,7 @@ func ExecuteBlockEphemerallyZk( var l2InfoRoot *libcommon.Hash if chainConfig.IsForkID7Etrog(blockNum) { - l2InfoRoot, err = blockinfo.BuildBlockInfoTree( + if l2InfoRoot, err = blockinfo.BuildBlockInfoTree( &header.Coinbase, header.Number.Uint64(), header.Time, @@ -178,9 +173,8 @@ func ExecuteBlockEphemerallyZk( *l1Blockhash, *prevBlockRoot, &txInfos, - ) - if err != nil { - return nil, err + ); err != nil { + return nil, fmt.Errorf("BuildBlockInfoTree: %w", err) } } @@ -208,7 +202,7 @@ func ExecuteBlockEphemerallyZk( if !vmConfig.ReadOnly { txs := blockTransactions if _, _, _, err := FinalizeBlockExecution(engine, stateReader, block.Header(), txs, block.Uncles(), stateWriter, chainConfig, ibs, receipts, block.Withdrawals(), chainReader, false, log.New()); err != nil { - return nil, err + return nil, fmt.Errorf("FinalizeBlockExecution: %w", err) } } blockLogs := ibs.Logs() @@ -256,7 +250,7 @@ func PrepareBlockTxExecution( if !vmConfig.ReadOnly { if err := InitializeBlockExecution(engine, chainReader, block.Header(), chainConfig, ibs, log.Root()); err != nil { - return nil, nil, nil, nil, err + return nil, nil, nil, nil, fmt.Errorf("InitializeBlockExecution: %w", err) } } @@ -270,36 +264,36 @@ func PrepareBlockTxExecution( //[zkevm] - get the last batch number so we can check for empty batches in between it and the new one lastBatchInserted, err := roHermezDb.GetBatchNoByL2Block(blockNum - 1) if err != nil && !errors.Is(err, hermez_db.ErrorNotStored) { - return nil, nil, nil, nil, fmt.Errorf("failed to get last batch inserted: %v", err) + return nil, nil, nil, nil, fmt.Errorf("GetBatchNoByL2Block: %w", err) } // write batches between last block and this if they exist currentBatch, err := roHermezDb.GetBatchNoByL2Block(blockNum) if err != nil && !errors.Is(err, hermez_db.ErrorNotStored) { - return nil, nil, nil, nil, err + return nil, nil, nil, nil, fmt.Errorf("GetBatchNoByL2Block: %w", err) } //[zkevm] get batches between last block and this one // plus this blocks ger gersInBetween, err := roHermezDb.GetBatchGlobalExitRoots(lastBatchInserted, currentBatch) if err != nil { - return nil, nil, nil, nil, err + return nil, nil, nil, nil, fmt.Errorf("GetBatchGlobalExitRoots: %w", err) } blockGer, err := roHermezDb.GetBlockGlobalExitRoot(blockNum) if err != nil { - return nil, nil, nil, nil, err + return nil, nil, nil, nil, fmt.Errorf("GetBlockGlobalExitRoot: %w", err) } blockL1BlockHash, err := roHermezDb.GetBlockL1BlockHash(blockNum) if err != nil { - return nil, nil, nil, nil, err + return nil, nil, nil, nil, fmt.Errorf("GetBlockL1BlockHash: %w", err) } blockTime := block.Time() prevBlockRoot := prevBlockheader.Root l1InfoTreeIndexReused, err := roHermezDb.GetReusedL1InfoTreeIndex(blockNum) if err != nil { - return nil, nil, nil, nil, err + return nil, nil, nil, nil, fmt.Errorf("GetReusedL1InfoTreeIndex: %w", err) } ibs.SyncerPreExecuteStateSet(chainConfig, blockNum, blockTime, &prevBlockRoot, &blockGer, &blockL1BlockHash, gersInBetween, l1InfoTreeIndexReused) /////////////////////////////////////////// @@ -332,7 +326,7 @@ func ProcessReceiptForBlockExecution(receipt *types.Receipt, roHermezDb state.Re // receipt root holds the intermediate stateroot after the tx intermediateState, err := roHermezDb.GetIntermediateTxStateRoot(blockNum, tx.Hash()) if err != nil { - return err + return fmt.Errorf("GetIntermediateTxStateRoot: %w", err) } receipt.PostState = intermediateState.Bytes() } else { diff --git a/core/rawdb/accessors_chain_zkevm.go b/core/rawdb/accessors_chain_zkevm.go index f50d073eb64..e6bfe2787d0 100644 --- a/core/rawdb/accessors_chain_zkevm.go +++ b/core/rawdb/accessors_chain_zkevm.go @@ -6,6 +6,7 @@ import ( "fmt" "math" + "github.com/ledgerwatch/erigon-lib/common" libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/common/dbg" "github.com/ledgerwatch/erigon-lib/common/hexutility" @@ -252,3 +253,29 @@ func ReadReceipts_zkEvm(db kv.Tx, block *types.Block, senders []libcommon.Addres } return receipts } + +func ReadHeaderByNumber_zkevm(db kv.Getter, number uint64) (header *types.Header, err error) { + hash, err := ReadCanonicalHash(db, number) + if err != nil { + return nil, fmt.Errorf("ReadCanonicalHash: %w", err) + } + if hash == (common.Hash{}) { + return nil, nil + } + + return ReadHeader_zkevm(db, hash, number) +} + +// ReadHeader retrieves the block header corresponding to the hash. +func ReadHeader_zkevm(db kv.Getter, hash common.Hash, number uint64) (header *types.Header, err error) { + data := ReadHeaderRLP(db, hash, number) + if len(data) == 0 { + return nil, nil + } + + header = new(types.Header) + if err := rlp.Decode(bytes.NewReader(data), header); err != nil { + return nil, fmt.Errorf("invalid block header RLP hash: %v, err: %w", hash, err) + } + return header, nil +} diff --git a/core/rawdb/accessors_indexes_zkevm.go b/core/rawdb/accessors_indexes_zkevm.go index d4a31721196..282e05906f4 100644 --- a/core/rawdb/accessors_indexes_zkevm.go +++ b/core/rawdb/accessors_indexes_zkevm.go @@ -30,23 +30,23 @@ func WriteTxLookupEntries_zkEvm(db kv.Putter, block *types.Block) error { for _, tx := range block.Transactions() { data := block.Number().Bytes() if err := db.Put(kv.TxLookup, tx.Hash().Bytes(), data); err != nil { - return fmt.Errorf("failed to store transaction lookup entry: %W", err) + return fmt.Errorf("db.Put %s: %W", kv.TxLookup, err) } } return nil } -func TruncateTxLookupEntries_zkEvm(db kv.RwTx, fromBlockNum, toBlockNum uint64) error { +func TruncateTxLookupEntries_zkEvm(db kv.RwTx, fromBlockNum, toBlockNum uint64) (err error) { + var block *types.Block for i := fromBlockNum; i <= toBlockNum; i++ { - block, err := ReadBlockByNumber(db, i) - if err != nil { - return err + if block, err = ReadBlockByNumber(db, i); err != nil { + return fmt.Errorf("ReadBlockByNumber %d: %W", i, err) } for _, tx := range block.Transactions() { if err := db.Delete(kv.TxLookup, tx.Hash().Bytes()); err != nil { - return fmt.Errorf("failed to store transaction lookup entry: %W", err) + return fmt.Errorf("db.Delete %s: %W", kv.TxLookup, err) } } } diff --git a/core/state/trie_db.go b/core/state/trie_db.go index 3a13013b83e..965562315d0 100644 --- a/core/state/trie_db.go +++ b/core/state/trie_db.go @@ -740,7 +740,7 @@ type TrieStateWriter struct { tds *TrieDbState } -func (tds *TrieDbState) TrieStateWriter() *TrieStateWriter { +func (tds *TrieDbState) NewTrieStateWriter() *TrieStateWriter { return &TrieStateWriter{tds: tds} } diff --git a/core/types/trace.go b/core/types/trace.go index 979b7fe6fbc..73d71326fab 100644 --- a/core/types/trace.go +++ b/core/types/trace.go @@ -78,7 +78,6 @@ func (t *TxnTrace) MarshalJSON() ([]byte, error) { type TxnMeta struct { ByteCode HexBytes `json:"byte_code,omitempty"` - NewTxnTrieNode HexBytes `json:"new_txn_trie_node_byte,omitempty"` NewReceiptTrieNode HexBytes `json:"new_receipt_trie_node_byte,omitempty"` GasUsed uint64 `json:"gas_used,omitempty"` } @@ -88,8 +87,6 @@ type TxnInfo struct { Meta TxnMeta `json:"meta,omitempty"` } -type BlockUsedCodeHashes []libcommon.Hash - type CombinedPreImages struct { Compact HexBytes `json:"compact,omitempty"` } diff --git a/core/vm/contracts_zkevm.go b/core/vm/contracts_zkevm.go index 895e1162820..e2ab0d1c5f3 100644 --- a/core/vm/contracts_zkevm.go +++ b/core/vm/contracts_zkevm.go @@ -300,33 +300,6 @@ func (c *bigModExp_zkevm) RequiredGas(input []byte) uint64 { input = input[:0] } - // Retrieve the operands and execute the exponentiation - var ( - base = new(big.Int).SetBytes(getData(input, 0, baseLen.Uint64())) - exp = new(big.Int).SetBytes(getData(input, baseLen.Uint64(), expLen.Uint64())) - mod = new(big.Int).SetBytes(getData(input, baseLen.Uint64()+expLen.Uint64(), modLen.Uint64())) - baseBitLen = base.BitLen() - expBitLen = exp.BitLen() - modBitLen = mod.BitLen() - ) - - // zk special cases - // - if mod = 0 we consume gas as normal - // - if base is 0 and mod < 8192 we consume gas as normal - // - if neither of the above are true we check for reverts and return 0 gas fee - - if modBitLen == 0 { - // consume as normal - will return 0 - } else if baseBitLen == 0 { - if modBitLen > 8192 { - return 0 - } else { - // consume as normal - will return 0 - } - } else if baseBitLen > 8192 || expBitLen > 8192 || modBitLen > 8192 { - return 0 - } - // Retrieve the head 32 bytes of exp for the adjusted exponent length var expHead *big.Int if big.NewInt(int64(len(input))).Cmp(baseLen) <= 0 { @@ -362,7 +335,16 @@ func (c *bigModExp_zkevm) RequiredGas(input []byte) uint64 { //where is x is max(length_of_MODULUS, length_of_BASE) gas = gas.Add(gas, big7) gas = gas.Div(gas, big8) + // word = ceiling(x/8) + // if gas(word) > MAX_GAS_WORD_MODEXP --> out of gas + if gas.Uint64() > 9487 { + return math.MaxUint64 + } gas.Mul(gas, gas) + // if adjExpLen > MAX_GAS_IT_MODEXP --> out of gas + if adjExpLen.Uint64() > 90000000 { + return math.MaxUint64 + } gas.Mul(gas, math.BigMax(adjExpLen, big1)) // 2. Different divisor (`GQUADDIVISOR`) (3) @@ -374,6 +356,21 @@ func (c *bigModExp_zkevm) RequiredGas(input []byte) uint64 { if gas.Uint64() < 200 { return 200 } + // zk special cases + // - if mod = 0 we consume gas as normal + // - if base is 0 and mod < 8192 we consume gas as normal + // - if neither of the above are true we check for reverts and return 0 gas fee + if modLen.Uint64() == 0 { + // consume as normal - will return 0 + } else if baseLen.Uint64() == 0 { + if modLen.Uint64() > 1024 { + return 0 + } else { + // consume as normal - will return 0 + } + } else if baseLen.Uint64() > 1024 || expLen.Uint64() > 1024 || modLen.Uint64() > 1024 { + return 0 + } return gas.Uint64() } gas = modexpMultComplexity(gas) @@ -383,6 +380,21 @@ func (c *bigModExp_zkevm) RequiredGas(input []byte) uint64 { if gas.BitLen() > 64 { return math.MaxUint64 } + // zk special cases + // - if mod = 0 we consume gas as normal + // - if base is 0 and mod < 8192 we consume gas as normal + // - if neither of the above are true we check for reverts and return 0 gas fee + if modLen.Uint64() == 0 { + // consume as normal - will return 0 + } else if baseLen.Uint64() == 0 { + if modLen.Uint64() > 1024 { + return 0 + } else { + // consume as normal - will return 0 + } + } else if baseLen.Uint64() > 1024 || expLen.Uint64() > 1024 || modLen.Uint64() > 1024 { + return 0 + } return gas.Uint64() } @@ -395,47 +407,58 @@ func (c *bigModExp_zkevm) Run(input []byte) ([]byte, error) { baseLen = new(big.Int).SetBytes(getData(input, 0, 32)).Uint64() expLen = new(big.Int).SetBytes(getData(input, 32, 32)).Uint64() modLen = new(big.Int).SetBytes(getData(input, 64, 32)).Uint64() - base = big.NewInt(0) - exp = big.NewInt(0) - mod = big.NewInt(0) ) - if len(input) >= 96 + int(baseLen) { - base = new(big.Int).SetBytes(getData(input, 96, uint64(baseLen))) + if modLen == 0 { + // normal execution + } else if baseLen == 0 { + if modLen > 1024 { + return nil, ErrExecutionReverted + } else { + // normal execution + } + } else if baseLen > 1024 || expLen > 1024 || modLen > 1024 { + return nil, ErrExecutionReverted } - if len(input) >= 96 + int(baseLen) + int(expLen) { - exp = new(big.Int).SetBytes(getData(input, 96 + uint64(baseLen), uint64(expLen))) + + var ( + base = new(big.Int).SetBytes(getData(input, 0, baseLen)) + exp = new(big.Int).SetBytes(getData(input, baseLen, expLen)) + mod = new(big.Int).SetBytes(getData(input, baseLen+expLen, modLen)) + ) + + // Extract `base`, `exp`, and `mod` with padding as needed + baseData := getData(input, 96, uint64(baseLen)) + if uint64(len(baseData)) < baseLen { + baseData = common.RightPadBytes(baseData, int(baseLen)) } - if len(input) >= 96 + int(baseLen) + int(expLen) + int(modLen) { - mod = new(big.Int).SetBytes(getData(input, 96 + uint64(baseLen) + uint64(expLen), uint64(modLen))) + base.SetBytes(baseData) + + expData := getData(input, 96+uint64(baseLen), uint64(expLen)) + if uint64(len(expData)) < expLen { + expData = common.RightPadBytes(expData, int(expLen)) } - if len(input) < 96 + int(baseLen) + int(expLen) + int(modLen) { - input = common.LeftPadBytes(input, 96 + int(baseLen) + int(expLen) + int(modLen)) + exp.SetBytes(expData) + + modData := getData(input, 96+uint64(baseLen)+uint64(expLen), uint64(modLen)) + if uint64(len(modData)) < modLen { + modData = common.RightPadBytes(modData, int(modLen)) } + mod.SetBytes(modData) // Retrieve the operands and execute the exponentiation var ( v []byte baseBitLen = base.BitLen() - expBitLen = exp.BitLen() modBitLen = mod.BitLen() ) if modBitLen == 0 { - return []byte{}, nil + return common.LeftPadBytes([]byte{}, int(modLen)), nil } if baseBitLen == 0 { - if modBitLen > 8192 { - return nil, ErrExecutionReverted - } else { - return common.LeftPadBytes([]byte{}, int(modLen)), nil - } - } - - // limit to 8192 bits for base, exp, and mod in ZK - if baseBitLen > 8192 || expBitLen > 8192 || modBitLen > 8192 { - return nil, ErrExecutionReverted + return common.LeftPadBytes([]byte{}, int(modLen)), nil } switch { diff --git a/core/vm/contracts_zkevm_test.go b/core/vm/contracts_zkevm_test.go index e3a8d27d3c1..890dda4f27e 100644 --- a/core/vm/contracts_zkevm_test.go +++ b/core/vm/contracts_zkevm_test.go @@ -1,69 +1,337 @@ package vm import ( - "testing" + "bytes" + "fmt" "math/big" + "testing" + + libcommon "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/smt/pkg/utils" ) var ( - big0 = big.NewInt(0) - big10 = big.NewInt(10) - big8194 = big.NewInt(0).Lsh(big.NewInt(1), 8194) + big0 = "0x0" + big10 = "0xA" + big8194 = "0x4362992542477663717851936978935131449793056647984930769727331144758885327396477180865056938100807948895829167184628101092439483280708736254443919060310219196315625195368771958154439300912192820638787406454366552787087546170437298240485162215607545204071601010143199668800040318400106143347203621191223523809402006520781902615644021249458240059389704141174204983375715675011074785117376352222470062777399781626711300563259602464423681025754018312053305974263344188969629529771248980526071030076659596905462974889729109472300110511532180826007171046803782796673989031518735406948199987603844482062620200462245085965970061368422995866518188131145286022923313720886659881297584554541006505638064672021710493743985235686765784724749625581242661619542957739331509712669629981483974047475025439881560468087294466997834483879428025054448330826861632628266300549124108089243710259641107036642083513218529644197458275019683869291929836739053709535161089793752074107911059764290862446321738763309837359967845656970965642396469704242225935055025258110445450634513533474484631974552083514150702181347159662776937735822665260280348854141881022681248016522901983338033429758615315744308323914202315651871629411120219742486244363183380691816463891709919510110954240032816474235720019110994911047415254041975362327446394608846423841233425623767284758856151474904877925994910414614465195424699289652133941755141296099005677668732049124312838917214149491218297488380914814491105455781163479225033689420594030284158477549798519467232755078651263112612317572716372574593363046954327278252011977691163019820245155273233720318594772928716635063672142260864628461611968481104622431492431749909867366113451950834797800608124927450376812342775355778628245384946816936107284411835819804788348306184746491185178145806483026037404075624095095286158131104834707914358927865321235573218660677744740313402566273347778120205749965245195337469060954381619617093823714895798100908738469617471419018441897508079072102306423524152309082830871768887908361541754343376381968464399410155615898622815775892344363722387853443070119859753658392742823855018245989423311254495333155625236017152069285699234655850685334112036931033403492238456474895125688405840794462989547420387584356756721765358234099291470164452850555174702270601361451880127720093591313861274188952976928112060758758641675291904003261750442609017080654382603501735404588492856909066421614327125876363226305875802350647988746022661902863171584" ) +func uint64To32Bytes(input int) []byte { + bigInt := new(big.Int).SetUint64(uint64(input)) + bytes := bigInt.Bytes() + result := make([]byte, 32) + copy(result[32-len(bytes):], bytes) + return result +} + +func uint64ToDeterminedBytes(input *big.Int, length int) []byte { + bytes := input.Bytes() + result := make([]byte, length) + copy(result[length-len(bytes):], bytes) + return result +} + +// This relies on PrecompiledContractsForkID13Durian. +func testPrecompiledZkevm(t *testing.T, addr string, test precompiledTest) { + p := PrecompiledContractsForkID13Durian[libcommon.HexToAddress(addr)] + in := libcommon.Hex2Bytes(test.Input) + gas := p.RequiredGas(in) + t.Run(fmt.Sprintf("%s-Gas=%d", test.Name, gas), func(t *testing.T) { + t.Parallel() + if res, _, err := RunPrecompiledContract(p, in, gas); err != nil { + t.Error(err) + } else if common.Bytes2Hex(res) != test.Expected { + t.Errorf("Expected %v, got %v", test.Expected, common.Bytes2Hex(res)) + } + if expGas := test.Gas; expGas != gas { + t.Errorf("%v: gas wrong, expected %d, got %d", test.Name, expGas, gas) + } + // Verify that the precompile did not touch the input buffer + exp := libcommon.Hex2Bytes(test.Input) + if !bytes.Equal(in, exp) { + t.Errorf("Precompiled %v modified input data", addr) + } + }) +} + +func testJsonZkevm(name, addr string, t *testing.T) { + tests, err := loadJson(name) + if err != nil { + t.Fatal(err) + } + for _, test := range tests { + testPrecompiledZkevm(t, addr, test) + } +} + func Test_ModExpZkevm_Gas(t *testing.T) { modExp := bigModExp_zkevm{enabled: true, eip2565: true} cases := map[string]struct { - base *big.Int - exp *big.Int - mod *big.Int - expected uint64 + lenBase int + lenExp int + lenMod int + base string + exp string + mod string + nonZeroGas bool + revert bool }{ - "simple test": {big10, big10, big10, 200}, - "0 mod - normal gas": {big10, big10, big0, 200}, - "base 0 - mod < 8192 - normal gas": {big0, big10, big10, 200}, - "base 0 - mod > 8192 - 0 gas": {big0, big10, big8194, 0}, - "base over 8192 - 0 gas": {big8194, big10, big10, 0}, - "exp over 8192 - 0 gas": {big10, big8194, big10, 0}, - "mod over 8192 - 0 gas": {big10, big10, big8194, 0}, + "simple test": { + 1, + 1, + 1, + big10, + big10, + big10, + true, + false, + }, + "0 mod - normal gas": { + 1, + 1, + 1, + big10, + big10, + big0, + true, + false, + }, + "base 0 - mod < 8192 - normal gas": { + 1, + 1, + 1, + big0, + big10, + big10, + true, + false, + }, + "base 0 - mod > 8192 - 0 gas": { + 1, + 1, + 1234, + big0, + big10, + big8194, + false, + true, + }, + "base over 8192 - 0 gas": { + 1234, + 1, + 1, + big8194, + big10, + big10, + false, + true, + }, + "exp over 8192 - 0 gas": { + 1, + 1234, + 1, + big10, + big8194, + big10, + false, + true, + }, + "mod over 8192 - 0 gas": { + 1, + 1, + 1234, + big10, + big10, + big8194, + false, + true, + }, + // tests beyond here are taken from the test vectors here https://github.com/0xPolygonHermez/zkevm-testvectors/blob/2b70027e11a427c15994713b41ef9b6794c2f3bb/tools-inputs/data/calldata/pre-modexp.json#L787 + "pre-modexp-test-case_0": { + 64, + 32, + 32, + "0x00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000001", + "0x1111111111000000000000000000000000000000000000000000000000000000", + true, + false, + }, + "pre-modexp_0": { + 32, + 32, + 32, + "0x0000000000000000000000000000000000000000000000000000000000000007", + "0x0000000000000000000000000000000000000000000000000000000000000008", + "0x0000000000000000000000000000000000000000000000000000000000000009", + true, + false, + }, + "pre-modexp_1": { + 64, + 32, + 32, + "0x00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000001", + "0x0000000000000000000000000000000000000000000000000000000000000009", + true, + false, + }, + "pre-modexp_2": { + 64, + 32, + 34, + "0x00000000000000000000000000000000000000000000000000000000000001110000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000001000", + "0x00000000000000000000000000000000000000000000000000000000000000ffffff", + true, + false, + }, + "pre-modexp_3": { + 1025, + 32, + 32, + big8194, + "0x1000", + "0x0000000000000000000000000000000000000000000000000000000000ffffff0000000000000000000000000000000000000000000000000000000000000000", + false, + true, + }, + "pre-modexp_4": { + 32, + 1025, + 32, + "0x0000000000000000000000000000000000000000000000000000000000000001", + big8194, + "0x0000000000000000000000000000000000000000000000000000000000000001", + false, + true, + }, + "pre-modexp_5": { + 32, + 32, + 1025, + "0xf000000000000000000000000000000000000000000000000000000000000000", + "0xf000000000000000000000000000000000000000000000000000000000000010", + big8194, + false, + true, + }, + "pre-modexp_6": { + 32, + 32, + 32, + "0xf000000000000000000000000000000000000000000000000000000000000000", + "0xf000000000000000000000000000000000000000000000000000000000000010", + "0xf000000000000000000000000000000000000000000000000000000000000055", + true, + false, + }, + "pre-modexp_7": { + 32, + 32, + 32, + "0x0000000000000000000000000000000000000000000000000000000000000020", + "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "0xf000000000000000000000000000000000000000000000000000000000000055", + true, + false, + }, + "pre-modexp_8": { + 128, + 32, + 128, + "0x000000000000000000000000000000000000000000000000000000000000006400000000000000000000000000000000000000000000000000000000002b32af000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004339f6e1061a", + "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001eb07e0ea000000000000000000000000000000000000000000000000000000056101669d", + true, + false, + }, + "pre-modexp_9": { + 64, + 32, + 34, + "0x10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000001", + "0x00000000000000000000000000000000000000000000000000000000000000000009", + true, + false, + }, + "pre-modexp_10": { + 64, + 32, + 34, + "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000111", + "0x0000000000000000000000000000000000000000000000000000000000001000", + "0x00000000000000000000000000000000000000000000000000000000000000ffffff", + true, + false, + }, + "pre-modexp_11": { + 1, + 32, + 32, + "0x3", + "0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2e", + "0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f", + true, + false, + }, + "pre-modexp_12": { + 0, + 32, + 32, + "0x0", + "0x0000000000000000000000000000000000000000000000000000000000000001", + "0x0000000000000000000000000000000000000000000000000000000000000009", + true, + false, + }, } for name, test := range cases { t.Run(name, func(t *testing.T) { input := make([]byte, 0) - base := len(test.base.Bytes()) - exp := len(test.exp.Bytes()) - mod := len(test.mod.Bytes()) + bigBase := utils.ConvertHexToBigInt(test.base) + bigExp := utils.ConvertHexToBigInt(test.exp) + bigMod := utils.ConvertHexToBigInt(test.mod) - input = append(input, uint64To32Bytes(base)...) - input = append(input, uint64To32Bytes(exp)...) - input = append(input, uint64To32Bytes(mod)...) - input = append(input, uint64ToDeterminedBytes(test.base, base)...) - input = append(input, uint64ToDeterminedBytes(test.exp, exp)...) - input = append(input, uint64ToDeterminedBytes(test.mod, mod)...) + base := len(bigBase.Bytes()) + exp := len(bigExp.Bytes()) + mod := len(bigMod.Bytes()) + + input = append(input, uint64To32Bytes(test.lenBase)...) + input = append(input, uint64To32Bytes(test.lenExp)...) + input = append(input, uint64To32Bytes(test.lenMod)...) + input = append(input, uint64ToDeterminedBytes(bigBase, base)...) + input = append(input, uint64ToDeterminedBytes(bigExp, exp)...) + input = append(input, uint64ToDeterminedBytes(bigMod, mod)...) gas := modExp.RequiredGas(input) + if test.nonZeroGas && gas == 0 { + t.Errorf("Expected non-zero gas") + } else if !test.nonZeroGas && gas != 0 { + t.Errorf("Expected zero gas") + } - if gas != test.expected { - t.Errorf("Expected %d, got %d", test.expected, gas) + _, err := modExp.Run(input) + if test.revert && err == nil { + t.Errorf("Expected revert") + } else if !test.revert && err != nil { + t.Errorf("Unexpected revert: %s", err) } }) } } -func uint64To32Bytes(input int) []byte { - bigInt := new(big.Int).SetUint64(uint64(input)) - bytes := bigInt.Bytes() - result := make([]byte, 32) - copy(result[32-len(bytes):], bytes) - return result +func TestP256VerifyZkevm(t *testing.T) { + testJsonZkevm("p256Verify", "0x0000000000000000000000000000000000000100", t) } -func uint64ToDeterminedBytes(input *big.Int, length int) []byte { - bytes := input.Bytes() - result := make([]byte, length) - copy(result[length-len(bytes):], bytes) - return result +// EIP2565 is enabled by default. +func TestBigModExpZkevm(t *testing.T) { + testJsonZkevm("modexp_eip2565", "0x0000000000000000000000000000000000000005", t) } diff --git a/core/vm/evm_zkevm.go b/core/vm/evm_zkevm.go index 288730f706f..2d18dcef8dc 100644 --- a/core/vm/evm_zkevm.go +++ b/core/vm/evm_zkevm.go @@ -278,6 +278,14 @@ func (evm *EVM) call_zkevm(typ OpCode, caller ContractRef, addr libcommon.Addres var code []byte if !isPrecompile { code = evm.intraBlockState.GetCode(addr) + + // zk - up to fork 10 we cannot handle a contract code that ends with just a push and nothing to push to the stack + // so check for this scenario + if !evm.chainConfig.IsForkID10(evm.Context.BlockNumber) { + if len(code) > 0 && code[len(code)-1] == byte(PUSH1) { + return nil, gas, ErrInvalidCode + } + } } snapshot := evm.intraBlockState.Snapshot() diff --git a/core/vm/testdata/precompiles/modexp_eip2565.json b/core/vm/testdata/precompiles/modexp_eip2565.json index c55441439eb..cafc908d24f 100644 --- a/core/vm/testdata/precompiles/modexp_eip2565.json +++ b/core/vm/testdata/precompiles/modexp_eip2565.json @@ -13,6 +13,41 @@ "Gas": 1360, "NoBenchmark": false }, + { + "Input": "000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000000fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f", + "Expected": "", + "Name": "return_empty_byte_slice", + "Gas": 200, + "NoBenchmark": false + }, + { + "Input": "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000002003ffff80", + "Expected": "3b01b01ac41f2d6e917c6d6a221ce793802469026d9ab7578fa2e79e4da6aaab", + "Name": "right_padding_mod", + "Gas": 200, + "NoBenchmark": false + }, + { + "Input": "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000002003ffff800000000000000000000000000000000000000000000000000000000000000007", + "Expected": "3b01b01ac41f2d6e917c6d6a221ce793802469026d9ab7578fa2e79e4da6aaab", + "Name": "right_padding_mod_with_excess_data", + "Gas": 200, + "NoBenchmark": false + }, + { + "Input": "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000002003ffff80", + "Expected": "0000000000000000000000000000000000000000000000000000000000000000", + "Name": "zero_base_right_pad", + "Gas": 200, + "NoBenchmark": false + }, + { + "Input": "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000002003ffff8000000000000000000000000000000000000000000000000000000000000000", + "Expected": "3b01b01ac41f2d6e917c6d6a221ce793802469026d9ab7578fa2e79e4da6aaab", + "Name": "eip_example_3", + "Gas": 200, + "NoBenchmark": false + }, { "Input": "000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000040e09ad9675465c53a109fac66a445c91b292d2bb2c5268addb30cd82f80fcb0033ff97c80a5fc6f39193ae969c6ede6710a6b7ac27078a06d90ef1c72e5c85fb502fc9e1f6beb81516545975218075ec2af118cd8798df6e08a147c60fd6095ac2bb02c2908cf4dd7c81f11c289e4bce98f3553768f392a80ce22bf5c4f4a248c6b", "Expected": "60008f1614cc01dcfb6bfb09c625cf90b47d4468db81b5f8b7a39d42f332eab9b2da8f2d95311648a8f243f4bb13cfb3d8f7f2a3c014122ebb3ed41b02783adc", diff --git a/core/vm/zk_batch_counters.go b/core/vm/zk_batch_counters.go index b40da0a02c1..83b2c36ed96 100644 --- a/core/vm/zk_batch_counters.go +++ b/core/vm/zk_batch_counters.go @@ -142,6 +142,10 @@ func (bcc *BatchCounterCollector) processBatchLevelData() error { // CheckForOverflow returns true in the case that any counter has less than 0 remaining func (bcc *BatchCounterCollector) CheckForOverflow(verifyMerkleProof bool) (bool, error) { + // unlimited counters shouldn't overflow + if bcc.unlimitedCounters { + return false, nil + } combined, err := bcc.CombineCollectors(verifyMerkleProof) if err != nil { return false, err diff --git a/docs/endpoints/Makefile b/docs/endpoints/Makefile index d9af1597ee3..271a7c96569 100644 --- a/docs/endpoints/Makefile +++ b/docs/endpoints/Makefile @@ -1,11 +1,11 @@ -DOC_NAME:="endpoints.md" +DOC_NAME:=endpoints.md .PHONY: gen-doc -gen-doc: +gen-doc: go run main.go $(DOC_NAME) .PHONY: check-doc -check-doc: +check-doc: go run main.go tmp$(DOC_NAME) cmp -s ./$(DOC_NAME) ./tmp$(DOC_NAME); \ RETVAL=$$?; \ diff --git a/docs/endpoints/endpoints.md b/docs/endpoints/endpoints.md index e8756c070a7..0101b931f89 100644 --- a/docs/endpoints/endpoints.md +++ b/docs/endpoints/endpoints.md @@ -41,6 +41,7 @@ If the endpoint is not in the list below, it means this specific endpoint is not - debug_getRawBlock - debug_getRawHeader - debug_storageRangeAt +- debug_traceBatchByNumber - debug_traceBlockByHash - debug_traceBlockByNumber - debug_traceCall @@ -198,6 +199,7 @@ If the endpoint is not in the list below, it means this specific endpoint is not - zkevm_getFullBlockByHash - zkevm_getFullBlockByNumber - zkevm_getL2BlockInfoTree +- zkevm_getLatestDataStreamBlock - zkevm_getLatestGlobalExitRoot - zkevm_getProverInput - zkevm_getRollupAddress diff --git a/erigon-lib/direct/sentry_client_mock.go b/erigon-lib/direct/sentry_client_mock.go index 48074023d41..3cf18f11298 100644 --- a/erigon-lib/direct/sentry_client_mock.go +++ b/erigon-lib/direct/sentry_client_mock.go @@ -10,14 +10,14 @@ package direct import ( - "context" - "reflect" - - "github.com/ledgerwatch/erigon-lib/gointerfaces/sentry" - "github.com/ledgerwatch/erigon-lib/gointerfaces/types" - "go.uber.org/mock/gomock" - "google.golang.org/grpc" - "google.golang.org/protobuf/types/known/emptypb" + context "context" + reflect "reflect" + + sentry "github.com/ledgerwatch/erigon-lib/gointerfaces/sentry" + types "github.com/ledgerwatch/erigon-lib/gointerfaces/types" + gomock "go.uber.org/mock/gomock" + grpc "google.golang.org/grpc" + emptypb "google.golang.org/protobuf/types/known/emptypb" ) // MockSentryClient is a mock of SentryClient interface. diff --git a/erigon-lib/kv/kvcache/cache.go b/erigon-lib/kv/kvcache/cache.go index 700029a4fa1..9159f0c5121 100644 --- a/erigon-lib/kv/kvcache/cache.go +++ b/erigon-lib/kv/kvcache/cache.go @@ -48,6 +48,7 @@ type CacheValidationResult struct { CodeKeysOutOfSync [][]byte } +//go:generate mockgen -typed=true -destination=./mocks/cache_mock.go -package=mocks . Cache type Cache interface { // View - returns CacheView consistent with givent kv.Tx View(ctx context.Context, tx kv.Tx) (CacheView, error) diff --git a/erigon-lib/kv/kvcache/mocks/cache_mock.go b/erigon-lib/kv/kvcache/mocks/cache_mock.go new file mode 100644 index 00000000000..8c408571d05 --- /dev/null +++ b/erigon-lib/kv/kvcache/mocks/cache_mock.go @@ -0,0 +1,195 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/ledgerwatch/erigon-lib/kv/kvcache (interfaces: Cache) +// +// Generated by this command: +// +// mockgen -typed=true -destination=./mocks/cache_mock.go -package=mocks . Cache +// + +// Package mocks is a generated GoMock package. +package mocks + +import ( + context "context" + reflect "reflect" + + remote "github.com/ledgerwatch/erigon-lib/gointerfaces/remote" + kv "github.com/ledgerwatch/erigon-lib/kv" + kvcache "github.com/ledgerwatch/erigon-lib/kv/kvcache" + gomock "go.uber.org/mock/gomock" +) + +// MockCache is a mock of Cache interface. +type MockCache struct { + ctrl *gomock.Controller + recorder *MockCacheMockRecorder +} + +// MockCacheMockRecorder is the mock recorder for MockCache. +type MockCacheMockRecorder struct { + mock *MockCache +} + +// NewMockCache creates a new mock instance. +func NewMockCache(ctrl *gomock.Controller) *MockCache { + mock := &MockCache{ctrl: ctrl} + mock.recorder = &MockCacheMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockCache) EXPECT() *MockCacheMockRecorder { + return m.recorder +} + +// Len mocks base method. +func (m *MockCache) Len() int { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Len") + ret0, _ := ret[0].(int) + return ret0 +} + +// Len indicates an expected call of Len. +func (mr *MockCacheMockRecorder) Len() *MockCacheLenCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Len", reflect.TypeOf((*MockCache)(nil).Len)) + return &MockCacheLenCall{Call: call} +} + +// MockCacheLenCall wrap *gomock.Call +type MockCacheLenCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockCacheLenCall) Return(arg0 int) *MockCacheLenCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockCacheLenCall) Do(f func() int) *MockCacheLenCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockCacheLenCall) DoAndReturn(f func() int) *MockCacheLenCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// OnNewBlock mocks base method. +func (m *MockCache) OnNewBlock(arg0 *remote.StateChangeBatch) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "OnNewBlock", arg0) +} + +// OnNewBlock indicates an expected call of OnNewBlock. +func (mr *MockCacheMockRecorder) OnNewBlock(arg0 any) *MockCacheOnNewBlockCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OnNewBlock", reflect.TypeOf((*MockCache)(nil).OnNewBlock), arg0) + return &MockCacheOnNewBlockCall{Call: call} +} + +// MockCacheOnNewBlockCall wrap *gomock.Call +type MockCacheOnNewBlockCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockCacheOnNewBlockCall) Return() *MockCacheOnNewBlockCall { + c.Call = c.Call.Return() + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockCacheOnNewBlockCall) Do(f func(*remote.StateChangeBatch)) *MockCacheOnNewBlockCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockCacheOnNewBlockCall) DoAndReturn(f func(*remote.StateChangeBatch)) *MockCacheOnNewBlockCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// ValidateCurrentRoot mocks base method. +func (m *MockCache) ValidateCurrentRoot(arg0 context.Context, arg1 kv.Tx) (*kvcache.CacheValidationResult, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ValidateCurrentRoot", arg0, arg1) + ret0, _ := ret[0].(*kvcache.CacheValidationResult) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ValidateCurrentRoot indicates an expected call of ValidateCurrentRoot. +func (mr *MockCacheMockRecorder) ValidateCurrentRoot(arg0, arg1 any) *MockCacheValidateCurrentRootCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateCurrentRoot", reflect.TypeOf((*MockCache)(nil).ValidateCurrentRoot), arg0, arg1) + return &MockCacheValidateCurrentRootCall{Call: call} +} + +// MockCacheValidateCurrentRootCall wrap *gomock.Call +type MockCacheValidateCurrentRootCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockCacheValidateCurrentRootCall) Return(arg0 *kvcache.CacheValidationResult, arg1 error) *MockCacheValidateCurrentRootCall { + c.Call = c.Call.Return(arg0, arg1) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockCacheValidateCurrentRootCall) Do(f func(context.Context, kv.Tx) (*kvcache.CacheValidationResult, error)) *MockCacheValidateCurrentRootCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockCacheValidateCurrentRootCall) DoAndReturn(f func(context.Context, kv.Tx) (*kvcache.CacheValidationResult, error)) *MockCacheValidateCurrentRootCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// View mocks base method. +func (m *MockCache) View(arg0 context.Context, arg1 kv.Tx) (kvcache.CacheView, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "View", arg0, arg1) + ret0, _ := ret[0].(kvcache.CacheView) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// View indicates an expected call of View. +func (mr *MockCacheMockRecorder) View(arg0, arg1 any) *MockCacheViewCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "View", reflect.TypeOf((*MockCache)(nil).View), arg0, arg1) + return &MockCacheViewCall{Call: call} +} + +// MockCacheViewCall wrap *gomock.Call +type MockCacheViewCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockCacheViewCall) Return(arg0 kvcache.CacheView, arg1 error) *MockCacheViewCall { + c.Call = c.Call.Return(arg0, arg1) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockCacheViewCall) Do(f func(context.Context, kv.Tx) (kvcache.CacheView, error)) *MockCacheViewCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockCacheViewCall) DoAndReturn(f func(context.Context, kv.Tx) (kvcache.CacheView, error)) *MockCacheViewCall { + c.Call = c.Call.DoAndReturn(f) + return c +} diff --git a/erigon-lib/kv/tables.go b/erigon-lib/kv/tables.go index ce8baaa5b8b..e9aebf625fe 100644 --- a/erigon-lib/kv/tables.go +++ b/erigon-lib/kv/tables.go @@ -547,6 +547,7 @@ const ( TableHashKey = "HermezSmtHashKey" TablePoolLimbo = "PoolLimbo" BATCH_ENDS = "batch_ends" + WITNESS_CACHE = "witness_cache" //Diagnostics tables DiagSystemInfo = "DiagSystemInfo" DiagSyncStages = "DiagSyncStages" @@ -791,6 +792,7 @@ var ChaindataTables = []string{ TableHashKey, TablePoolLimbo, BATCH_ENDS, + WITNESS_CACHE, } const ( diff --git a/erigon-lib/txpool/txpoolcfg/txpoolcfg.go b/erigon-lib/txpool/txpoolcfg/txpoolcfg.go index 54f5e0dfa9a..0ee6d97bed5 100644 --- a/erigon-lib/txpool/txpoolcfg/txpoolcfg.go +++ b/erigon-lib/txpool/txpoolcfg/txpoolcfg.go @@ -52,6 +52,7 @@ type Config struct { ProcessRemoteTxsEvery time.Duration CommitEvery time.Duration LogEvery time.Duration + PurgeEvery time.Duration //txpool db MdbxPageSize datasize.ByteSize @@ -59,6 +60,8 @@ type Config struct { MdbxGrowthStep datasize.ByteSize NoGossip bool // this mode doesn't broadcast any txs, and if receive remote-txn - skip it + + PurgeDistance time.Duration } var DefaultConfig = Config{ @@ -66,6 +69,8 @@ var DefaultConfig = Config{ ProcessRemoteTxsEvery: 100 * time.Millisecond, CommitEvery: 15 * time.Second, LogEvery: 30 * time.Second, + PurgeEvery: 1 * time.Minute, + PurgeDistance: 24 * time.Hour, PendingSubPoolLimit: 10_000, BaseFeeSubPoolLimit: 10_000, diff --git a/eth/ethconfig/config_zkevm.go b/eth/ethconfig/config_zkevm.go index 76f0c796f75..f1233d4ce5f 100644 --- a/eth/ethconfig/config_zkevm.go +++ b/eth/ethconfig/config_zkevm.go @@ -97,6 +97,8 @@ type Zk struct { BadBatches []uint64 SealBatchImmediatelyOnOverflow bool MockWitnessGeneration bool + WitnessCacheEnabled bool + WitnessCacheLimit uint64 WitnessContractInclusion []common.Address } diff --git a/eth/ethconfig/tx_pool.go b/eth/ethconfig/tx_pool.go index 05444d04569..454739446e0 100644 --- a/eth/ethconfig/tx_pool.go +++ b/eth/ethconfig/tx_pool.go @@ -107,6 +107,8 @@ var DefaultTxPool2Config = func(fullCfg *Config) txpoolcfg.Config { cfg.CommitEvery = 5 * time.Minute cfg.TracedSenders = pool1Cfg.TracedSenders cfg.CommitEvery = pool1Cfg.CommitEvery + cfg.PurgeEvery = fullCfg.TxPool.PurgeEvery + cfg.PurgeDistance = fullCfg.TxPool.PurgeDistance return cfg } diff --git a/eth/stagedsync/stage_execute_zkevm.go b/eth/stagedsync/stage_execute_zkevm.go index 41a6a533a58..f57d69351bc 100644 --- a/eth/stagedsync/stage_execute_zkevm.go +++ b/eth/stagedsync/stage_execute_zkevm.go @@ -39,7 +39,7 @@ import ( func SpawnExecuteBlocksStageZk(s *StageState, u Unwinder, tx kv.RwTx, toBlock uint64, ctx context.Context, cfg ExecuteBlockCfg, initialCycle bool) (err error) { if cfg.historyV3 { if err = ExecBlockV3(s, u, wrap.TxContainer{Tx: tx}, toBlock, ctx, cfg, initialCycle, log.New()); err != nil { - return err + return fmt.Errorf("ExecBlockV3: %w", err) } return nil } @@ -59,16 +59,15 @@ func SpawnExecuteBlocksStageZk(s *StageState, u Unwinder, tx kv.RwTx, toBlock ui quit := ctx.Done() useExternalTx := tx != nil if !useExternalTx { - tx, err = cfg.db.BeginRw(context.Background()) - if err != nil { - return err + if tx, err = cfg.db.BeginRw(context.Background()); err != nil { + return fmt.Errorf("beginRw: %w", err) } defer tx.Rollback() } nextStageProgress, err := stages.GetStageProgress(tx, stages.HashState) if err != nil { - return err + return fmt.Errorf("getStageProgress: %w", err) } nextStagesExpectData := nextStageProgress > 0 // Incremental move of next stages depend on fully written ChangeSets, Receipts, CallTraceSet @@ -87,19 +86,19 @@ func SpawnExecuteBlocksStageZk(s *StageState, u Unwinder, tx kv.RwTx, toBlock ui }() if err := utils.UpdateZkEVMBlockCfg(cfg.chainConfig, hermezDb, s.LogPrefix()); err != nil { - return err + return fmt.Errorf("UpdateZkEVMBlockCfg: %w", err) } eridb := erigon_db.NewErigonDb(tx) prevBlockRoot, prevBlockHash, err := getBlockHashValues(cfg, ctx, tx, s.BlockNumber) if err != nil { - return err + return fmt.Errorf("getBlockHashValues: %w", err) } to, total, err := getExecRange(cfg, tx, s.BlockNumber, toBlock, s.LogPrefix()) if err != nil { - return err + return fmt.Errorf("getExecRange: %w", err) } log.Info(fmt.Sprintf("[%s] Blocks execution", s.LogPrefix()), "from", s.BlockNumber, "to", to) @@ -126,7 +125,7 @@ Loop: //fetch values pre execute datastreamBlockHash, block, senders, err := getPreexecuteValues(cfg, ctx, tx, blockNum, prevBlockHash) if err != nil { - stoppedErr = err + stoppedErr = fmt.Errorf("getPreexecuteValues: %w", err) break } @@ -145,7 +144,7 @@ Loop: cfg.hd.ReportBadHeaderPoS(datastreamBlockHash, block.ParentHash()) } if cfg.badBlockHalt { - return err + return fmt.Errorf("executeBlockZk: %w", err) } } u.UnwindTo(blockNum-1, UnwindReason{Block: &datastreamBlockHash}) @@ -154,7 +153,7 @@ Loop: if execRs.BlockInfoTree != nil { if err = hermezDb.WriteBlockInfoRoot(blockNum, *execRs.BlockInfoTree); err != nil { - return err + return fmt.Errorf("WriteBlockInfoRoot: %w", err) } } @@ -176,20 +175,19 @@ Loop: log.Info("Committed State", "gas reached", currentStateGas, "gasTarget", gasState) currentStateGas = 0 if err = s.Update(batch, stageProgress); err != nil { - return err + return fmt.Errorf("s.Update: %w", err) } if err = batch.Flush(ctx, tx); err != nil { - return err + return fmt.Errorf("batch.Flush: %w", err) } if !useExternalTx { if err = tx.Commit(); err != nil { - return err + return fmt.Errorf("tx.Commit: %w", err) } tx, err = cfg.db.BeginRw(context.Background()) if err != nil { - return err + return fmt.Errorf("cfg.db.BeginRw: %w", err) } - // TODO: This creates stacked up deferrals defer tx.Rollback() eridb = erigon_db.NewErigonDb(tx) logger.SetTx(tx) @@ -200,41 +198,40 @@ Loop: //commit values post execute if err := postExecuteCommitValues(s.LogPrefix(), cfg, tx, eridb, batch, datastreamBlockHash, block, senders); err != nil { - return err + return fmt.Errorf("postExecuteCommitValues: %w", err) } } if err = s.Update(batch, stageProgress); err != nil { - return err + return fmt.Errorf("s.Update: %w", err) } // we need to artificially update the headers stage here as well to ensure that notifications // can fire at the end of the stage loop and inform RPC subscriptions of new blocks for example if err = stages.SaveStageProgress(tx, stages.Headers, stageProgress); err != nil { - return err + return fmt.Errorf("SaveStageProgress: %w", err) } if err = batch.Flush(ctx, tx); err != nil { - return fmt.Errorf("batch commit: %w", err) + return fmt.Errorf("batch.Flush: %w", err) } - _, err = rawdb.IncrementStateVersionByBlockNumberIfNeeded(tx, stageProgress) // stageProgress is latest processsed block number - if err != nil { - return fmt.Errorf("writing plain state version: %w", err) + // stageProgress is latest processsed block number + if _, err = rawdb.IncrementStateVersionByBlockNumberIfNeeded(tx, stageProgress); err != nil { + return fmt.Errorf("IncrementStateVersionByBlockNumberIfNeeded: %w", err) } if !useExternalTx { log.Info(fmt.Sprintf("[%s] Commiting DB transaction...", s.LogPrefix()), "block", stageProgress) if err = tx.Commit(); err != nil { - return err + return fmt.Errorf("tx.Commit: %w", err) } } log.Info(fmt.Sprintf("[%s] Completed on", s.LogPrefix()), "block", stageProgress) - err = stoppedErr - return err + return stoppedErr } // returns the block's blockHash and header stateroot @@ -256,7 +253,7 @@ func getExecRange(cfg ExecuteBlockCfg, tx kv.RwTx, stageProgress, toBlock uint64 if cfg.zk.DebugLimit > 0 { prevStageProgress, err := stages.GetStageProgress(tx, stages.Senders) if err != nil { - return 0, 0, err + return 0, 0, fmt.Errorf("getStageProgress: %w", err) } to := prevStageProgress if cfg.zk.DebugLimit < to { @@ -268,11 +265,11 @@ func getExecRange(cfg ExecuteBlockCfg, tx kv.RwTx, stageProgress, toBlock uint64 shouldShortCircuit, noProgressTo, err := utils.ShouldShortCircuitExecution(tx, logPrefix, cfg.zk.L2ShortCircuitToVerifiedBatch) if err != nil { - return 0, 0, err + return 0, 0, fmt.Errorf("ShouldShortCircuitExecution: %w", err) } prevStageProgress, err := stages.GetStageProgress(tx, stages.Senders) if err != nil { - return 0, 0, err + return 0, 0, fmt.Errorf("getStageProgress: %w", err) } // skip if no progress @@ -298,12 +295,12 @@ func getExecRange(cfg ExecuteBlockCfg, tx kv.RwTx, stageProgress, toBlock uint64 func getPreexecuteValues(cfg ExecuteBlockCfg, ctx context.Context, tx kv.RwTx, blockNum uint64, prevBlockHash common.Hash) (common.Hash, *types.Block, []common.Address, error) { preExecuteHeaderHash, err := rawdb.ReadCanonicalHash(tx, blockNum) if err != nil { - return common.Hash{}, nil, nil, err + return common.Hash{}, nil, nil, fmt.Errorf("ReadCanonicalHash: %w", err) } block, senders, err := cfg.blockReader.BlockWithSenders(ctx, tx, preExecuteHeaderHash, blockNum) if err != nil { - return common.Hash{}, nil, nil, err + return common.Hash{}, nil, nil, fmt.Errorf("BlockWithSenders: %w", err) } if block == nil { @@ -315,7 +312,7 @@ func getPreexecuteValues(cfg ExecuteBlockCfg, ctx context.Context, tx kv.RwTx, b if cfg.chainConfig.IsLondon(blockNum) { parentHeader, err := cfg.blockReader.Header(ctx, tx, prevBlockHash, blockNum-1) if err != nil { - return common.Hash{}, nil, nil, err + return common.Hash{}, nil, nil, fmt.Errorf("cfg.blockReader.Header: %w", err) } block.HeaderNoCopy().BaseFee = misc.CalcBaseFeeZk(cfg.chainConfig, parentHeader) } @@ -343,29 +340,29 @@ func postExecuteCommitValues( log.Warn(fmt.Sprintf("[%s] Blockhash mismatch", logPrefix), "blockNumber", blockNum, "datastreamBlockHash", datastreamBlockHash, "calculatedBlockHash", blockHash) } if err := rawdbZk.DeleteSenders(tx, datastreamBlockHash, blockNum); err != nil { - return fmt.Errorf("failed to delete senders: %v", err) + return fmt.Errorf("DeleteSenders: %w", err) } if err := rawdbZk.DeleteHeader(tx, datastreamBlockHash, blockNum); err != nil { - return fmt.Errorf("failed to delete header: %v", err) + return fmt.Errorf("DeleteHeader: %w", err) } bodyForStorage, err := rawdb.ReadBodyForStorageByKey(tx, dbutils.BlockBodyKey(blockNum, datastreamBlockHash)) if err != nil { - return err + return fmt.Errorf("ReadBodyForStorageByKey: %w", err) } if err := rawdb.DeleteBodyAndTransactions(tx, blockNum, datastreamBlockHash); err != nil { - return err + return fmt.Errorf("DeleteBodyAndTransactions: %w", err) } if err := rawdb.WriteBodyAndTransactions(tx, blockHash, blockNum, block.Transactions(), bodyForStorage); err != nil { - return err + return fmt.Errorf("WriteBodyAndTransactions: %w", err) } // [zkevm] senders were saved in stage_senders for headerHashes based on incomplete headers // in stage execute we complete the headers and senders should be moved to the correct headerHash // also we should delete other data based on the old hash, since it is unaccessable now if err := rawdb.WriteSenders(tx, blockHash, blockNum, senders); err != nil { - return fmt.Errorf("failed to write senders: %v", err) + return fmt.Errorf("failed to write senders: %w", err) } } @@ -390,13 +387,13 @@ func postExecuteCommitValues( later. */ if err := rawdb.WriteHeader_zkEvm(tx, header); err != nil { - return fmt.Errorf("failed to write header: %v", err) + return fmt.Errorf("WriteHeader_zkEvm: %w", err) } if err := rawdb.WriteHeadHeaderHash(tx, blockHash); err != nil { - return err + return fmt.Errorf("WriteHeadHeaderHash: %w", err) } if err := rawdb.WriteCanonicalHash(tx, blockHash, blockNum); err != nil { - return fmt.Errorf("failed to write header: %v", err) + return fmt.Errorf("WriteCanonicalHash: %w", err) } // if err := eridb.WriteBody(block.Number(), blockHash, block.Transactions()); err != nil { // return fmt.Errorf("failed to write body: %v", err) @@ -404,7 +401,7 @@ func postExecuteCommitValues( // write the new block lookup entries if err := rawdb.WriteTxLookupEntries_zkEvm(tx, block); err != nil { - return fmt.Errorf("failed to write tx lookup entries: %v", err) + return fmt.Errorf("WriteTxLookupEntries_zkEvm: %w", err) } return nil @@ -425,12 +422,12 @@ func executeBlockZk( stateStream bool, roHermezDb state.ReadOnlyHermezDb, woHermezDb *hermez_db.HermezDb, -) (*core.EphemeralExecResultZk, error) { +) (execRs *core.EphemeralExecResultZk, err error) { blockNum := block.NumberU64() stateReader, stateWriter, err := newStateReaderWriter(batch, tx, block, writeChangesets, cfg.accumulator, cfg.blockReader, stateStream) if err != nil { - return nil, err + return nil, fmt.Errorf("newStateReaderWriter: %w", err) } // where the magic happens @@ -449,20 +446,19 @@ func executeBlockZk( vmConfig.Tracer = callTracer getHashFn := core.GetHashFn(block.Header(), getHeader) - execRs, err := core.ExecuteBlockEphemerallyZk(cfg.chainConfig, &vmConfig, getHashFn, cfg.engine, block, stateReader, stateWriter, ChainReaderImpl{config: cfg.chainConfig, tx: tx, blockReader: cfg.blockReader}, getTracer, roHermezDb, prevBlockRoot) - if err != nil { - return nil, err + if execRs, err = core.ExecuteBlockEphemerallyZk(cfg.chainConfig, &vmConfig, getHashFn, cfg.engine, block, stateReader, stateWriter, ChainReaderImpl{config: cfg.chainConfig, tx: tx, blockReader: cfg.blockReader}, getTracer, roHermezDb, prevBlockRoot); err != nil { + return nil, fmt.Errorf("ExecuteBlockEphemerallyZk: %w", err) } if writeReceipts { if err := rawdb.AppendReceipts(tx, blockNum, execRs.Receipts); err != nil { - return nil, err + return nil, fmt.Errorf("AppendReceipts: %w", err) } stateSyncReceipt := execRs.StateSyncReceipt if stateSyncReceipt != nil && stateSyncReceipt.Status == types.ReceiptStatusSuccessful { if err := rawdb.WriteBorReceipt(tx, block.NumberU64(), stateSyncReceipt); err != nil { - return nil, err + return nil, fmt.Errorf("WriteBorReceipt: %w", err) } } } @@ -481,7 +477,7 @@ func executeBlockZk( } if writeCallTraces { if err := callTracer.WriteToDb(tx, block, *cfg.vmConfig); err != nil { - return nil, err + return nil, fmt.Errorf("WriteToDb: %w", err) } } return execRs, nil @@ -493,9 +489,8 @@ func UnwindExecutionStageZk(u *UnwindState, s *StageState, tx kv.RwTx, ctx conte } useExternalTx := tx != nil if !useExternalTx { - tx, err = cfg.db.BeginRw(context.Background()) - if err != nil { - return err + if tx, err = cfg.db.BeginRw(context.Background()); err != nil { + return fmt.Errorf("beginRw: %w", err) } defer tx.Rollback() } @@ -503,24 +498,24 @@ func UnwindExecutionStageZk(u *UnwindState, s *StageState, tx kv.RwTx, ctx conte logger := log.New() if err = unwindExecutionStage(u, s, wrap.TxContainer{Tx: tx}, ctx, cfg, initialCycle, logger); err != nil { - return err + return fmt.Errorf("unwindExecutionStage: %w", err) } if err = UnwindExecutionStageDbWrites(ctx, u, s, tx); err != nil { - return err + return fmt.Errorf("UnwindExecutionStageDbWrites: %w", err) } // update the headers stage as we mark progress there as part of execution if err = stages.SaveStageProgress(tx, stages.Headers, u.UnwindPoint); err != nil { - return err + return fmt.Errorf("SaveStageProgress: %w", err) } if err = u.Done(tx); err != nil { - return err + return fmt.Errorf("u.Done: %w", err) } if !useExternalTx { if err = tx.Commit(); err != nil { - return err + return fmt.Errorf("tx.Commit: %w", err) } } return nil @@ -533,9 +528,8 @@ func UnwindExecutionStageErigon(u *UnwindState, s *StageState, tx kv.RwTx, ctx c func PruneExecutionStageZk(s *PruneState, tx kv.RwTx, cfg ExecuteBlockCfg, ctx context.Context, initialCycle bool) (err error) { useExternalTx := tx != nil if !useExternalTx { - tx, err = cfg.db.BeginRw(ctx) - if err != nil { - return err + if tx, err = cfg.db.BeginRw(ctx); err != nil { + return fmt.Errorf("beginRw: %w", err) } defer tx.Rollback() } @@ -547,48 +541,43 @@ func PruneExecutionStageZk(s *PruneState, tx kv.RwTx, cfg ExecuteBlockCfg, ctx c cfg.agg.SetTx(tx) if initialCycle { if err = cfg.agg.Prune(ctx, config3.HistoryV3AggregationStep/10); err != nil { // prune part of retired data, before commit - return err + return fmt.Errorf("cfg.agg.prune: %w", err) } } else { if err = cfg.agg.PruneWithTiemout(ctx, 1*time.Second); err != nil { // prune part of retired data, before commit - return err + return fmt.Errorf("cfg.agg.PruneWithTiemout: %w", err) } } } else { if cfg.prune.History.Enabled() { if err = rawdb.PruneTableDupSort(tx, kv.AccountChangeSet, s.LogPrefix(), cfg.prune.History.PruneTo(s.ForwardProgress), logEvery, ctx); err != nil { - return err + return fmt.Errorf("PruneTableDupSort: %w", err) } if err = rawdb.PruneTableDupSort(tx, kv.StorageChangeSet, s.LogPrefix(), cfg.prune.History.PruneTo(s.ForwardProgress), logEvery, ctx); err != nil { - return err + return fmt.Errorf("PruneTableDupSort: %w", err) } } if cfg.prune.Receipts.Enabled() { - if err = rawdb.PruneTable(tx, kv.Receipts, cfg.prune.Receipts.PruneTo(s.ForwardProgress), ctx, math.MaxInt32); err != nil { - return err - } - if err = rawdb.PruneTable(tx, kv.BorReceipts, cfg.prune.Receipts.PruneTo(s.ForwardProgress), ctx, math.MaxUint32); err != nil { - return err - } - // LogIndex.Prune will read everything what not pruned here - if err = rawdb.PruneTable(tx, kv.Log, cfg.prune.Receipts.PruneTo(s.ForwardProgress), ctx, math.MaxInt32); err != nil { - return err + for _, table := range []string{kv.Receipts, kv.BorReceipts, kv.Log} { + if err = rawdb.PruneTable(tx, table, cfg.prune.Receipts.PruneTo(s.ForwardProgress), ctx, math.MaxInt32); err != nil { + return fmt.Errorf("rawdb.PruneTable %s: %w", table, err) + } } } if cfg.prune.CallTraces.Enabled() { if err = rawdb.PruneTableDupSort(tx, kv.CallTraceSet, s.LogPrefix(), cfg.prune.CallTraces.PruneTo(s.ForwardProgress), logEvery, ctx); err != nil { - return err + return fmt.Errorf("PruneTableDupSort: %w", err) } } } if err = s.Done(tx); err != nil { - return err + return fmt.Errorf("s.Done: %w", err) } if !useExternalTx { if err = tx.Commit(); err != nil { - return err + return fmt.Errorf("tx.Commit: %w", err) } } return nil @@ -599,9 +588,11 @@ func UnwindExecutionStageDbWrites(ctx context.Context, u *UnwindState, s *StageS // TODO: check for other missing value like - WriteHeader_zkEvm, WriteHeadHeaderHash, WriteCanonicalHash, WriteBody, WriteSenders, WriteTxLookupEntries_zkEvm hash, err := rawdb.ReadCanonicalHash(tx, u.UnwindPoint) if err != nil { - return err + return fmt.Errorf("ReadCanonicalHash: %w", err) + } + if err := rawdb.WriteHeadHeaderHash(tx, hash); err != nil { + return fmt.Errorf("WriteHeadHeaderHash: %w", err) } - rawdb.WriteHeadHeaderHash(tx, hash) /* unwind EffectiveGasPricePercentage here although it is written in stage batches (RPC) or stage execute (Sequencer) @@ -612,34 +603,34 @@ func UnwindExecutionStageDbWrites(ctx context.Context, u *UnwindState, s *StageS transactions, err := eriDb.GetBodyTransactions(u.UnwindPoint+1, s.BlockNumber) if err != nil { - return fmt.Errorf("get body transactions error: %v", err) + return fmt.Errorf("GetBodyTransactions: %w", err) } transactionHashes := make([]common.Hash, 0, len(*transactions)) for _, tx := range *transactions { transactionHashes = append(transactionHashes, tx.Hash()) } if err := hermezDb.DeleteEffectiveGasPricePercentages(&transactionHashes); err != nil { - return fmt.Errorf("delete effective gas price percentages error: %v", err) + return fmt.Errorf("DeleteEffectiveGasPricePercentages: %w", err) } if err = rawdbZk.TruncateSenders(tx, u.UnwindPoint+1, s.BlockNumber); err != nil { - return fmt.Errorf("delete senders: %w", err) + return fmt.Errorf("TruncateSenders: %w", err) } if err = rawdb.TruncateTxLookupEntries_zkEvm(tx, u.UnwindPoint+1, s.BlockNumber); err != nil { return fmt.Errorf("delete tx lookup entires: %w", err) } if err = rawdb.TruncateBlocks(ctx, tx, u.UnwindPoint+1); err != nil { - return fmt.Errorf("delete blocks: %w", err) + return fmt.Errorf("dTruncateBlocks: %w", err) } if err = rawdb.TruncateCanonicalHash(tx, u.UnwindPoint+1, true); err != nil { - return fmt.Errorf("delete cannonical hash with headers: %w", err) + return fmt.Errorf("TruncateCanonicalHash: %w", err) } if err = rawdb.TruncateStateVersion(tx, u.UnwindPoint+1); err != nil { - return err + return fmt.Errorf("TruncateStateVersion: %w", err) } if err = hermezDb.DeleteBlockInfoRoots(u.UnwindPoint+1, s.BlockNumber); err != nil { - return fmt.Errorf("delete block info roots: %w", err) + return fmt.Errorf("DeleteBlockInfoRoots: %w", err) } return nil diff --git a/eth/stagedsync/stages/stages_zk.go b/eth/stagedsync/stages/stages_zk.go index 4ac4583fa82..42936bdb615 100644 --- a/eth/stagedsync/stages/stages_zk.go +++ b/eth/stagedsync/stages/stages_zk.go @@ -31,4 +31,5 @@ var ( // HighestUsedL1InfoIndex SyncStage = "HighestUsedL1InfoTree" SequenceExecutorVerify SyncStage = "SequenceExecutorVerify" L1BlockSync SyncStage = "L1BlockSync" + Witness SyncStage = "Witness" ) diff --git a/eth/tracers/native/zero.go b/eth/tracers/native/zero.go index 3593a38eea1..e5228bfec16 100644 --- a/eth/tracers/native/zero.go +++ b/eth/tracers/native/zero.go @@ -27,7 +27,8 @@ func init() { } type zeroTracer struct { - noopTracer // stub struct to mock not used interface methods + noopTracer // stub struct to mock not used interface methods + env *vm.EVM tx types.TxnInfo gasLimit uint64 // Amount of gas bought for the whole tx @@ -39,7 +40,7 @@ type zeroTracer struct { addrOpCodes map[libcommon.Address]map[vm.OpCode]struct{} } -func newZeroTracer(ctx *tracers.Context, cfg json.RawMessage) (tracers.Tracer, error) { +func newZeroTracer(ctx *tracers.Context, _ json.RawMessage) (tracers.Tracer, error) { return &zeroTracer{ tx: types.TxnInfo{ Traces: make(map[libcommon.Address]*types.TxnTrace), @@ -72,19 +73,22 @@ func (t *zeroTracer) CaptureStart(env *vm.EVM, from libcommon.Address, to libcom } } + receiverTxTrace := t.tx.Traces[to] + senderTxTrace := t.tx.Traces[from] + // The recipient balance includes the value transferred. - toBal := new(big.Int).Sub(t.tx.Traces[to].Balance.ToBig(), value.ToBig()) - t.tx.Traces[to].Balance = uint256.MustFromBig(toBal) + toBal := new(big.Int).Sub(receiverTxTrace.Balance.ToBig(), value.ToBig()) + receiverTxTrace.Balance = uint256.MustFromBig(toBal) // The sender balance is after reducing: value and gasLimit. // We need to re-add them to get the pre-tx balance. - fromBal := new(big.Int).Set(t.tx.Traces[from].Balance.ToBig()) + fromBal := new(big.Int).Set(senderTxTrace.Balance.ToBig()) gasPrice := env.TxContext.GasPrice consumedGas := new(big.Int).Mul(gasPrice.ToBig(), new(big.Int).SetUint64(t.gasLimit)) fromBal.Add(fromBal, new(big.Int).Add(value.ToBig(), consumedGas)) - t.tx.Traces[from].Balance = uint256.MustFromBig(fromBal) - if t.tx.Traces[from].Nonce.Cmp(uint256.NewInt(0)) > 0 { - t.tx.Traces[from].Nonce.Sub(t.tx.Traces[from].Nonce, uint256.NewInt(1)) + senderTxTrace.Balance = uint256.MustFromBig(fromBal) + if senderTxTrace.Nonce.Cmp(uint256.NewInt(0)) > 0 { + senderTxTrace.Nonce.Sub(senderTxTrace.Nonce, uint256.NewInt(1)) } } @@ -291,24 +295,22 @@ func (t *zeroTracer) CaptureTxEnd(restGas uint64) { // Set the receipt logs and create a bloom for filtering receipt.Logs = t.env.IntraBlockState().GetLogs(t.ctx.Txn.Hash()) receipt.Bloom = types.CreateBloom(types.Receipts{receipt}) - receipt.BlockNumber = big.NewInt(0).SetUint64(t.ctx.BlockNum) + receipt.BlockNumber = new(big.Int).SetUint64(t.ctx.BlockNum) receipt.TransactionIndex = uint(t.ctx.TxIndex) receiptBuffer := &bytes.Buffer{} - encodeErr := receipt.EncodeRLP(receiptBuffer) - - if encodeErr != nil { - log.Error("failed to encode receipt", "err", encodeErr) + err := receipt.EncodeRLP(receiptBuffer) + if err != nil { + log.Error("failed to encode receipt", "err", err) return } t.tx.Meta.NewReceiptTrieNode = receiptBuffer.Bytes() txBuffer := &bytes.Buffer{} - encodeErr = t.ctx.Txn.MarshalBinary(txBuffer) - - if encodeErr != nil { - log.Error("failed to encode transaction", "err", encodeErr) + err = t.ctx.Txn.EncodeRLP(txBuffer) + if err != nil { + log.Error("failed to encode transaction", "err", err) return } @@ -326,10 +328,7 @@ func (t *zeroTracer) CaptureEnd(output []byte, gasUsed uint64, err error) { // GetResult returns the json-encoded nested list of call traces, and any // error arising from the encoding or forceful termination (via `Stop`). func (t *zeroTracer) GetResult() (json.RawMessage, error) { - var res []byte - var err error - res, err = json.Marshal(t.tx) - + res, err := json.Marshal(t.tx) if err != nil { return nil, err } diff --git a/go.sum b/go.sum index fa1bdb27b5d..04a112eb862 100644 --- a/go.sum +++ b/go.sum @@ -53,10 +53,6 @@ filippo.io/edwards25519 v1.0.0-rc.1/go.mod h1:N1IkdkCkiLB6tki+MYJoSx2JTY9NUlxZE7 gfx.cafe/util/go/generic v0.0.0-20230721185457-c559e86c829c h1:alCfDKmPC0EC0KGlZWrNF0hilVWBkzMz+aAYTJ/2hY4= gfx.cafe/util/go/generic v0.0.0-20230721185457-c559e86c829c/go.mod h1:WvSX4JsCRBuIXj0FRBFX9YLg+2SoL3w8Ww19uZO9yNE= git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= -github.com/0xPolygonHermez/zkevm-data-streamer v0.2.5 h1:p0epAhai44c34G+nzX0CZ67q3vkJtOXlO07lbhAEe9g= -github.com/0xPolygonHermez/zkevm-data-streamer v0.2.5/go.mod h1:RC6ouyNsUtJrv5aGPcM6Dm5xhXN209tRSzcsJsaOtZI= -github.com/0xPolygonHermez/zkevm-data-streamer v0.2.6 h1:BSO1uu6dmLQ5kKb3uyDvsUxbnIoyumKvlwr0OtpTYMo= -github.com/0xPolygonHermez/zkevm-data-streamer v0.2.6/go.mod h1:RC6ouyNsUtJrv5aGPcM6Dm5xhXN209tRSzcsJsaOtZI= github.com/0xPolygonHermez/zkevm-data-streamer v0.2.7 h1:73sYxRQ9cOmtYBEyHePgEwrVULR+YruSQxVXCt/SmzU= github.com/0xPolygonHermez/zkevm-data-streamer v0.2.7/go.mod h1:7nM7Ihk+fTG1TQPwdZoGOYd3wprqqyIyjtS514uHzWE= github.com/99designs/gqlgen v0.17.40 h1:/l8JcEVQ93wqIfmH9VS1jsAkwm6eAF1NwQn3N+SDqBY= @@ -351,7 +347,6 @@ github.com/go-chi/chi/v5 v5.0.12 h1:9euLV5sTrTNTRUU9POmDUvfxyj6LAABLUcEWO+JJb4s= github.com/go-chi/chi/v5 v5.0.12/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8= github.com/go-chi/cors v1.2.1 h1:xEC8UT3Rlp2QuWNEr4Fs/c2EAGVKBwy/1vHx3bppil4= github.com/go-chi/cors v1.2.1/go.mod h1:sSbTewc+6wYHBBCW7ytsFSn836hqM7JxpglAy2Vzc58= -github.com/go-delve/delve v1.21.2 h1:eaS+ziJo+660mi3D2q/VP8RxW5GcF4Y1zyKSi82alsU= github.com/go-delve/delve v1.21.2/go.mod h1:FgTAiRUe43RS5EexL06RPyMtP8AMZVL/t9Qqgy3qUe4= github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= @@ -542,8 +537,6 @@ github.com/hermeznetwork/tracerr v0.3.2/go.mod h1:nsWC1+tc4qUEbUGRv4DcPJJTjLsedl github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM= github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg= github.com/holiman/uint256 v1.2.0/go.mod h1:y4ga/t+u+Xwd7CpDgZESaRcWy0I7XMlTMA25ApIH5Jw= -github.com/holiman/uint256 v1.2.4 h1:jUc4Nk8fm9jZabQuqr2JzednajVmBpC+oiTiXZJEApU= -github.com/holiman/uint256 v1.2.4/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E= github.com/holiman/uint256 v1.3.1 h1:JfTzmih28bittyHM8z360dCjIA9dbPIBlcTI6lmctQs= github.com/holiman/uint256 v1.3.1/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= @@ -1466,8 +1459,6 @@ golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= -golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= @@ -1703,8 +1694,6 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= -google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= diff --git a/smt/pkg/db/mdbx.go b/smt/pkg/db/mdbx.go index adca963eaac..c3c642a1037 100644 --- a/smt/pkg/db/mdbx.go +++ b/smt/pkg/db/mdbx.go @@ -252,7 +252,7 @@ func (m *EriRoDb) GetKeySource(key utils.NodeKey) ([]byte, error) { } if data == nil { - return nil, fmt.Errorf("key %x not found", keyConc.Bytes()) + return nil, ErrNotFound } return data, nil diff --git a/smt/pkg/db/mem-db.go b/smt/pkg/db/mem-db.go index 949f267b402..bd45994628a 100644 --- a/smt/pkg/db/mem-db.go +++ b/smt/pkg/db/mem-db.go @@ -9,6 +9,10 @@ import ( "github.com/ledgerwatch/erigon/smt/pkg/utils" ) +var ( + ErrNotFound = fmt.Errorf("key not found") +) + type MemDb struct { Db map[string][]string DbAccVal map[string][]string @@ -184,7 +188,7 @@ func (m *MemDb) GetKeySource(key utils.NodeKey) ([]byte, error) { s, ok := m.DbKeySource[keyConc.String()] if !ok { - return nil, fmt.Errorf("key not found") + return nil, ErrNotFound } return s, nil @@ -224,7 +228,7 @@ func (m *MemDb) GetHashKey(key utils.NodeKey) (utils.NodeKey, error) { s, ok := m.DbHashKey[k] if !ok { - return utils.NodeKey{}, fmt.Errorf("key not found") + return utils.NodeKey{}, ErrNotFound } nv := big.NewInt(0).SetBytes(s) @@ -243,7 +247,7 @@ func (m *MemDb) GetCode(codeHash []byte) ([]byte, error) { s, ok := m.DbCode["0x"+hex.EncodeToString(codeHash)] if !ok { - return nil, fmt.Errorf("key not found") + return nil, ErrNotFound } return s, nil diff --git a/smt/pkg/smt/smt.go b/smt/pkg/smt/smt.go index 50d0221916d..6d541cc8914 100644 --- a/smt/pkg/smt/smt.go +++ b/smt/pkg/smt/smt.go @@ -503,7 +503,9 @@ func (s *SMT) insert(k utils.NodeKey, v utils.NodeValue8, newValH [4]uint64, old utils.RemoveOver(siblings, level+1) - s.updateDepth(len(siblings)) + if err := s.updateDepth(len(siblings)); err != nil { + return nil, fmt.Errorf("updateDepth: %w", err) + } for level >= 0 { hashValueIn, err := utils.NodeValue8FromBigIntArray(siblings[level][0:8]) @@ -639,7 +641,7 @@ func (s *SMT) CheckOrphanedNodes(ctx context.Context) int { return len(orphanedNodes) } -func (s *SMT) updateDepth(newDepth int) { +func (s *SMT) updateDepth(newDepth int) error { oldDepth, err := s.Db.GetDepth() if err != nil { oldDepth = 0 @@ -652,8 +654,11 @@ func (s *SMT) updateDepth(newDepth int) { newDepthAsByte := byte(newDepth & 0xFF) if oldDepth < newDepthAsByte { - _ = s.Db.SetDepth(newDepthAsByte) + if err := s.Db.SetDepth(newDepthAsByte); err != nil { + return fmt.Errorf("s.Db.SetDepth: %w", err) + } } + return nil } /* @@ -713,7 +718,6 @@ func (s *RoSMT) traverse(ctx context.Context, node *big.Int, action TraverseActi childPrefix[len(prefix)] = byte(i) err := s.traverse(ctx, child.ToBigInt(), action, childPrefix) if err != nil { - fmt.Println(err) return err } } diff --git a/smt/pkg/smt/smt_batch.go b/smt/pkg/smt/smt_batch.go index e7676665918..85b33030dd3 100644 --- a/smt/pkg/smt/smt_batch.go +++ b/smt/pkg/smt/smt_batch.go @@ -24,77 +24,64 @@ func NewInsertBatchConfig(ctx context.Context, logPrefix string, shouldPrintProg } } -func (s *SMT) InsertBatch(cfg InsertBatchConfig, nodeKeys []*utils.NodeKey, nodeValues []*utils.NodeValue8, nodeValuesHashes []*[4]uint64, rootNodeHash *utils.NodeKey) (*SMTResponse, error) { - s.clearUpMutex.Lock() - defer s.clearUpMutex.Unlock() - - var maxInsertingNodePathLevel = 0 - var size int = len(nodeKeys) - var err error - var smtBatchNodeRoot *smtBatchNode - nodeHashesForDelete := make(map[uint64]map[uint64]map[uint64]map[uint64]*utils.NodeKey) - - var progressChanPre chan uint64 - var stopProgressPrinterPre func() - if cfg.shouldPrintProgress { - progressChanPre, stopProgressPrinterPre = zk.ProgressPrinter(fmt.Sprintf("[%s] SMT incremental progress (pre-process)", cfg.logPrefix), uint64(4), false) +func getProgressPrinterPre(logPrefix string, progressType string, size uint64, shouldPrintProgress bool) (progressChanPre *chan uint64, stopProgressPrinterPre func()) { + var newChan chan uint64 + if shouldPrintProgress { + newChan, stopProgressPrinterPre = zk.ProgressPrinter(fmt.Sprintf("[%s] SMT incremental progress (%s)", logPrefix, progressType), size, false) } else { - progressChanPre = make(chan uint64, 100) + newChan = make(chan uint64, size) var once sync.Once stopProgressPrinterPre = func() { - once.Do(func() { close(progressChanPre) }) + once.Do(func() { close(newChan) }) } } - defer stopProgressPrinterPre() - if err = validateDataLengths(nodeKeys, nodeValues, &nodeValuesHashes); err != nil { - return nil, err - } - progressChanPre <- uint64(1) - - if err = removeDuplicateEntriesByKeys(&size, &nodeKeys, &nodeValues, &nodeValuesHashes); err != nil { - return nil, err - } - progressChanPre <- uint64(1) - - if err = calculateNodeValueHashesIfMissing(nodeValues, &nodeValuesHashes); err != nil { - return nil, err - } - progressChanPre <- uint64(1) + return &newChan, stopProgressPrinterPre +} - if err = calculateRootNodeHashIfNil(s, &rootNodeHash); err != nil { - return nil, err - } - progressChanPre <- uint64(1) - stopProgressPrinterPre() - var progressChan chan uint64 - var stopProgressPrinter func() - if cfg.shouldPrintProgress { - progressChan, stopProgressPrinter = zk.ProgressPrinter(fmt.Sprintf("[%s] SMT incremental progress (process)", cfg.logPrefix), uint64(size), false) - } else { - progressChan = make(chan uint64) - var once sync.Once +func (s *SMT) InsertBatch(cfg InsertBatchConfig, nodeKeys []*utils.NodeKey, nodeValues []*utils.NodeValue8, nodeValuesHashes []*[4]uint64, rootNodeHash *utils.NodeKey) (r *SMTResponse, err error) { + s.clearUpMutex.Lock() + defer s.clearUpMutex.Unlock() - stopProgressPrinter = func() { - once.Do(func() { close(progressChan) }) - } - } + var ( + maxInsertingNodePathLevel = 0 + size = len(nodeKeys) + smtBatchNodeRoot *smtBatchNode + nodeHashesForDelete = make(map[uint64]map[uint64]map[uint64]map[uint64]*utils.NodeKey) + ) + + //BE CAREFUL: modifies the arrays + if err := s.preprocessBatchedNodeValues( + cfg.logPrefix, + cfg.shouldPrintProgress, + &nodeKeys, + &nodeValues, + &nodeValuesHashes, + &rootNodeHash, + ); err != nil { + return nil, fmt.Errorf("preprocessBatchedNodeValues: %w", err) + } + + //DO NOT MOVE ABOVE PREPROCESS + size = len(nodeKeys) + + progressChan, stopProgressPrinter := getProgressPrinterPre(cfg.logPrefix, "process", uint64(size), cfg.shouldPrintProgress) defer stopProgressPrinter() for i := 0; i < size; i++ { select { case <-cfg.ctx.Done(): - return nil, fmt.Errorf(fmt.Sprintf("[%s] Context done", cfg.logPrefix)) - case progressChan <- uint64(1): + return nil, fmt.Errorf("context done") + case *progressChan <- uint64(1): default: } insertingNodeKey := nodeKeys[i] insertingNodeValue := nodeValues[i] insertingNodeValueHash := nodeValuesHashes[i] - - insertingNodePathLevel, insertingNodePath, insertingPointerToSmtBatchNode, visitedNodeHashes, err := findInsertingPoint(s, insertingNodeKey, rootNodeHash, &smtBatchNodeRoot, insertingNodeValue.IsZero()) + insertingNodePath := insertingNodeKey.GetPath() + insertingNodePathLevel, insertingPointerToSmtBatchNode, visitedNodeHashes, err := s.findInsertingPoint(insertingNodePath, rootNodeHash, &smtBatchNodeRoot, insertingNodeValue.IsZero()) if err != nil { return nil, err } @@ -182,68 +169,22 @@ func (s *SMT) InsertBatch(cfg InsertBatchConfig, nodeKeys []*utils.NodeKey, node } } select { - case progressChan <- uint64(1): + case *progressChan <- uint64(1): default: } stopProgressPrinter() - s.updateDepth(maxInsertingNodePathLevel) - - totalDeleteOps := len(nodeHashesForDelete) - - var progressChanDel chan uint64 - var stopProgressPrinterDel func() - if cfg.shouldPrintProgress { - progressChanDel, stopProgressPrinterDel = zk.ProgressPrinter(fmt.Sprintf("[%s] SMT incremental progress (deletes)", cfg.logPrefix), uint64(totalDeleteOps), false) - } else { - progressChanDel = make(chan uint64, 100) - var once sync.Once - - stopProgressPrinterDel = func() { - once.Do(func() { close(progressChanDel) }) - } - } - defer stopProgressPrinterDel() - for _, mapLevel0 := range nodeHashesForDelete { - progressChanDel <- uint64(1) - for _, mapLevel1 := range mapLevel0 { - for _, mapLevel2 := range mapLevel1 { - for _, nodeHash := range mapLevel2 { - s.Db.DeleteByNodeKey(*nodeHash) - s.Db.DeleteHashKey(*nodeHash) - } - } - } + if err := s.updateDepth(maxInsertingNodePathLevel); err != nil { + return nil, fmt.Errorf("updateDepth: %w", err) } - stopProgressPrinterDel() - - totalFinalizeOps := len(nodeValues) - var progressChanFin chan uint64 - var stopProgressPrinterFin func() - if cfg.shouldPrintProgress { - progressChanFin, stopProgressPrinterFin = zk.ProgressPrinter(fmt.Sprintf("[%s] SMT incremental progress (finalize)", cfg.logPrefix), uint64(totalFinalizeOps), false) - } else { - progressChanFin = make(chan uint64, 100) - var once sync.Once - stopProgressPrinterFin = func() { - once.Do(func() { close(progressChanFin) }) - } + if err := s.deleteBatchedNodeValues(cfg.logPrefix, nodeHashesForDelete); err != nil { + return nil, fmt.Errorf("deleteBatchedNodeValues: %w", err) } - defer stopProgressPrinterFin() - for i, nodeValue := range nodeValues { - select { - case progressChanFin <- uint64(1): - default: - } - if !nodeValue.IsZero() { - err = s.hashSaveByPointers(nodeValue.ToUintArrayByPointer(), &utils.BranchCapacity, nodeValuesHashes[i]) - if err != nil { - return nil, err - } - } + + if err := s.saveBatchedNodeValues(cfg.logPrefix, nodeValues, nodeValuesHashes); err != nil { + return nil, fmt.Errorf("saveBatchedNodeValues: %w", err) } - stopProgressPrinterFin() if smtBatchNodeRoot == nil { rootNodeHash = &utils.NodeKey{0, 0, 0, 0} @@ -274,7 +215,97 @@ func (s *SMT) InsertBatch(cfg InsertBatchConfig, nodeKeys []*utils.NodeKey, node }, nil } -func validateDataLengths(nodeKeys []*utils.NodeKey, nodeValues []*utils.NodeValue8, nodeValuesHashes *[]*[4]uint64) error { +// returns the new size of the values batch after removing duplicate entries +func (s *SMT) preprocessBatchedNodeValues( + logPrefix string, + shouldPrintProgress bool, + nodeKeys *[]*utils.NodeKey, + nodeValues *[]*utils.NodeValue8, + nodeValuesHashes *[]*[4]uint64, + rootNodeHash **utils.NodeKey, +) error { + progressChanPre, stopProgressPrinterPre := getProgressPrinterPre(logPrefix, "pre-process", 4, shouldPrintProgress) + defer stopProgressPrinterPre() + + if err := validateDataLengths(*nodeKeys, *nodeValues, nodeValuesHashes); err != nil { + return fmt.Errorf("validateDataLengths: %w", err) + } + *progressChanPre <- uint64(1) + + if err := removeDuplicateEntriesByKeys(nodeKeys, nodeValues, nodeValuesHashes); err != nil { + return fmt.Errorf("removeDuplicateEntriesByKeys: %w", err) + } + *progressChanPre <- uint64(1) + + if err := calculateNodeValueHashesIfMissing(*nodeValues, nodeValuesHashes); err != nil { + return fmt.Errorf("calculateNodeValueHashesIfMissing: %w", err) + } + *progressChanPre <- uint64(1) + + if err := calculateRootNodeHashIfNil(s, rootNodeHash); err != nil { + return fmt.Errorf("calculateRootNodeHashIfNil: %w", err) + } + *progressChanPre <- uint64(1) + stopProgressPrinterPre() + + return nil +} + +func (s *SMT) deleteBatchedNodeValues( + logPrefix string, + nodeHashesForDelete map[uint64]map[uint64]map[uint64]map[uint64]*utils.NodeKey, +) error { + progressChanDel, stopProgressPrinterDel := getProgressPrinterPre(logPrefix, "deletes", uint64(len(nodeHashesForDelete)), false) + defer stopProgressPrinterDel() + + for _, mapLevel0 := range nodeHashesForDelete { + *progressChanDel <- uint64(1) + for _, mapLevel1 := range mapLevel0 { + for _, mapLevel2 := range mapLevel1 { + for _, nodeHash := range mapLevel2 { + if err := s.Db.DeleteByNodeKey(*nodeHash); err != nil { + return fmt.Errorf("DeleteByNodeKey: %w", err) + } + if err := s.Db.DeleteHashKey(*nodeHash); err != nil { + return fmt.Errorf("DeleteHashKey: %w", err) + } + } + } + } + } + stopProgressPrinterDel() + + return nil +} + +func (s *SMT) saveBatchedNodeValues( + logPrefix string, + nodeValues []*utils.NodeValue8, + nodeValuesHashes []*[4]uint64, +) error { + progressChanFin, stopProgressPrinterFin := getProgressPrinterPre(logPrefix, "finalize", uint64(len(nodeValues)), false) + defer stopProgressPrinterFin() + + for i, nodeValue := range nodeValues { + select { + case *progressChanFin <- uint64(1): + default: + } + if !nodeValue.IsZero() { + if err := s.hashSaveByPointers(nodeValue.ToUintArrayByPointer(), &utils.BranchCapacity, nodeValuesHashes[i]); err != nil { + return err + } + } + } + stopProgressPrinterFin() + return nil +} + +func validateDataLengths( + nodeKeys []*utils.NodeKey, + nodeValues []*utils.NodeValue8, + nodeValuesHashes *[]*[4]uint64, +) error { var size int = len(nodeKeys) if len(nodeValues) != size { @@ -291,12 +322,17 @@ func validateDataLengths(nodeKeys []*utils.NodeKey, nodeValues []*utils.NodeValu return nil } -func removeDuplicateEntriesByKeys(size *int, nodeKeys *[]*utils.NodeKey, nodeValues *[]*utils.NodeValue8, nodeValuesHashes *[]*[4]uint64) error { +func removeDuplicateEntriesByKeys( + nodeKeys *[]*utils.NodeKey, + nodeValues *[]*utils.NodeValue8, + nodeValuesHashes *[]*[4]uint64, +) error { + size := len(*nodeKeys) storage := make(map[uint64]map[uint64]map[uint64]map[uint64]int) - resultNodeKeys := make([]*utils.NodeKey, 0, *size) - resultNodeValues := make([]*utils.NodeValue8, 0, *size) - resultNodeValuesHashes := make([]*[4]uint64, 0, *size) + resultNodeKeys := make([]*utils.NodeKey, 0, size) + resultNodeValues := make([]*utils.NodeValue8, 0, size) + resultNodeValuesHashes := make([]*[4]uint64, 0, size) for i, nodeKey := range *nodeKeys { setNodeKeyMapValue(storage, nodeKey, i) @@ -319,12 +355,13 @@ func removeDuplicateEntriesByKeys(size *int, nodeKeys *[]*utils.NodeKey, nodeVal *nodeValues = resultNodeValues *nodeValuesHashes = resultNodeValuesHashes - *size = len(*nodeKeys) - return nil } -func calculateNodeValueHashesIfMissing(nodeValues []*utils.NodeValue8, nodeValuesHashes *[]*[4]uint64) error { +func calculateNodeValueHashesIfMissing( + nodeValues []*utils.NodeValue8, + nodeValuesHashes *[]*[4]uint64, +) error { var globalError error size := len(nodeValues) cpuNum := parallel.DefaultNumGoroutines() @@ -358,7 +395,12 @@ func calculateNodeValueHashesIfMissing(nodeValues []*utils.NodeValue8, nodeValue return globalError } -func calculateNodeValueHashesIfMissingInInterval(nodeValues []*utils.NodeValue8, nodeValuesHashes *[]*[4]uint64, startIndex, endIndex int) error { +func calculateNodeValueHashesIfMissingInInterval( + nodeValues []*utils.NodeValue8, + nodeValuesHashes *[]*[4]uint64, + startIndex, + endIndex int, +) error { for i := startIndex; i < endIndex; i++ { if (*nodeValuesHashes)[i] != nil { continue @@ -382,36 +424,43 @@ func calculateRootNodeHashIfNil(s *SMT, root **utils.NodeKey) error { return nil } -func findInsertingPoint(s *SMT, insertingNodeKey, insertingPointerNodeHash *utils.NodeKey, insertingPointerToSmtBatchNode **smtBatchNode, fetchDirectSiblings bool) (int, []int, **smtBatchNode, []*utils.NodeKey, error) { - var err error - var insertingNodePathLevel int = -1 - var insertingPointerToSmtBatchNodeParent *smtBatchNode - - var visitedNodeHashes = make([]*utils.NodeKey, 0, 256) - - var nextInsertingPointerNodeHash *utils.NodeKey - var nextInsertingPointerToSmtBatchNode **smtBatchNode - - insertingNodePath := insertingNodeKey.GetPath() +func (s *SMT) findInsertingPoint( + insertingNodePath []int, + insertingPointerNodeHash *utils.NodeKey, + insertingPointerToSmtBatchNode **smtBatchNode, + fetchDirectSiblings bool, +) ( + insertingNodePathLevel int, + nextInsertingPointerToSmtBatchNode **smtBatchNode, + visitedNodeHashes []*utils.NodeKey, + err error, +) { + insertingNodePathLevel = -1 + visitedNodeHashes = make([]*utils.NodeKey, 0, 256) + + var ( + insertingPointerToSmtBatchNodeParent *smtBatchNode + nextInsertingPointerNodeHash *utils.NodeKey + ) for { if (*insertingPointerToSmtBatchNode) == nil { // update in-memory structure from db if !insertingPointerNodeHash.IsZero() { - *insertingPointerToSmtBatchNode, err = fetchNodeDataFromDb(s, insertingPointerNodeHash, insertingPointerToSmtBatchNodeParent) + *insertingPointerToSmtBatchNode, err = s.fetchNodeDataFromDb(insertingPointerNodeHash, insertingPointerToSmtBatchNodeParent) if err != nil { - return -2, []int{}, insertingPointerToSmtBatchNode, visitedNodeHashes, err + return -2, insertingPointerToSmtBatchNode, visitedNodeHashes, err } visitedNodeHashes = append(visitedNodeHashes, insertingPointerNodeHash) } else { if insertingNodePathLevel != -1 { - return -2, []int{}, insertingPointerToSmtBatchNode, visitedNodeHashes, fmt.Errorf("nodekey is zero at non-root level") + return -2, insertingPointerToSmtBatchNode, visitedNodeHashes, fmt.Errorf("nodekey is zero at non-root level") } } } if (*insertingPointerToSmtBatchNode) == nil { if insertingNodePathLevel != -1 { - return -2, []int{}, insertingPointerToSmtBatchNode, visitedNodeHashes, fmt.Errorf("working smt pointer is nil at non-root level") + return -2, insertingPointerToSmtBatchNode, visitedNodeHashes, fmt.Errorf("working smt pointer is nil at non-root level") } break } @@ -425,16 +474,16 @@ func findInsertingPoint(s *SMT, insertingNodeKey, insertingPointerNodeHash *util if fetchDirectSiblings { // load direct siblings of a non-leaf from the DB if (*insertingPointerToSmtBatchNode).leftNode == nil { - (*insertingPointerToSmtBatchNode).leftNode, err = fetchNodeDataFromDb(s, (*insertingPointerToSmtBatchNode).nodeLeftHashOrRemainingKey, (*insertingPointerToSmtBatchNode)) + (*insertingPointerToSmtBatchNode).leftNode, err = s.fetchNodeDataFromDb((*insertingPointerToSmtBatchNode).nodeLeftHashOrRemainingKey, (*insertingPointerToSmtBatchNode)) if err != nil { - return -2, []int{}, insertingPointerToSmtBatchNode, visitedNodeHashes, err + return -2, insertingPointerToSmtBatchNode, visitedNodeHashes, err } visitedNodeHashes = append(visitedNodeHashes, (*insertingPointerToSmtBatchNode).nodeLeftHashOrRemainingKey) } if (*insertingPointerToSmtBatchNode).rightNode == nil { - (*insertingPointerToSmtBatchNode).rightNode, err = fetchNodeDataFromDb(s, (*insertingPointerToSmtBatchNode).nodeRightHashOrValueHash, (*insertingPointerToSmtBatchNode)) + (*insertingPointerToSmtBatchNode).rightNode, err = s.fetchNodeDataFromDb((*insertingPointerToSmtBatchNode).nodeRightHashOrValueHash, (*insertingPointerToSmtBatchNode)) if err != nil { - return -2, []int{}, insertingPointerToSmtBatchNode, visitedNodeHashes, err + return -2, insertingPointerToSmtBatchNode, visitedNodeHashes, err } visitedNodeHashes = append(visitedNodeHashes, (*insertingPointerToSmtBatchNode).nodeRightHashOrValueHash) } @@ -452,10 +501,13 @@ func findInsertingPoint(s *SMT, insertingNodeKey, insertingPointerNodeHash *util insertingPointerToSmtBatchNode = nextInsertingPointerToSmtBatchNode } - return insertingNodePathLevel, insertingNodePath, insertingPointerToSmtBatchNode, visitedNodeHashes, nil + return insertingNodePathLevel, insertingPointerToSmtBatchNode, visitedNodeHashes, nil } -func updateNodeHashesForDelete(nodeHashesForDelete map[uint64]map[uint64]map[uint64]map[uint64]*utils.NodeKey, visitedNodeHashes []*utils.NodeKey) { +func updateNodeHashesForDelete( + nodeHashesForDelete map[uint64]map[uint64]map[uint64]map[uint64]*utils.NodeKey, + visitedNodeHashes []*utils.NodeKey, +) { for _, visitedNodeHash := range visitedNodeHashes { if visitedNodeHash == nil { continue @@ -466,7 +518,12 @@ func updateNodeHashesForDelete(nodeHashesForDelete map[uint64]map[uint64]map[uin } // no point to parallelize this function because db consumer is slower than this producer -func calculateAndSaveHashesDfs(sdh *smtDfsHelper, smtBatchNode *smtBatchNode, path []int, level int) { +func calculateAndSaveHashesDfs( + sdh *smtDfsHelper, + smtBatchNode *smtBatchNode, + path []int, + level int, +) { if smtBatchNode.isLeaf() { hashObj, hashValue := utils.HashKeyAndValueByPointers(utils.ConcatArrays4ByPointers(smtBatchNode.nodeLeftHashOrRemainingKey.AsUint64Pointer(), smtBatchNode.nodeRightHashOrValueHash.AsUint64Pointer()), &utils.LeafCapacity) smtBatchNode.hash = hashObj @@ -515,7 +572,11 @@ type smtBatchNode struct { hash *[4]uint64 } -func newSmtBatchNodeLeaf(nodeLeftHashOrRemainingKey, nodeRightHashOrValueHash *utils.NodeKey, parentNode *smtBatchNode) *smtBatchNode { +func newSmtBatchNodeLeaf( + nodeLeftHashOrRemainingKey, + nodeRightHashOrValueHash *utils.NodeKey, + parentNode *smtBatchNode, +) *smtBatchNode { return &smtBatchNode{ nodeLeftHashOrRemainingKey: nodeLeftHashOrRemainingKey, nodeRightHashOrValueHash: nodeRightHashOrValueHash, @@ -527,7 +588,7 @@ func newSmtBatchNodeLeaf(nodeLeftHashOrRemainingKey, nodeRightHashOrValueHash *u } } -func fetchNodeDataFromDb(s *SMT, nodeHash *utils.NodeKey, parentNode *smtBatchNode) (*smtBatchNode, error) { +func (s *SMT) fetchNodeDataFromDb(nodeHash *utils.NodeKey, parentNode *smtBatchNode) (*smtBatchNode, error) { if nodeHash.IsZero() { return nil, nil } @@ -586,7 +647,11 @@ func (sbn *smtBatchNode) updateHashesAfterDelete() { } } -func (sbn *smtBatchNode) createALeafInEmptyDirection(insertingNodePath []int, insertingNodePathLevel int, insertingNodeKey *utils.NodeKey) (**smtBatchNode, error) { +func (sbn *smtBatchNode) createALeafInEmptyDirection( + insertingNodePath []int, + insertingNodePathLevel int, + insertingNodeKey *utils.NodeKey, +) (**smtBatchNode, error) { direction := insertingNodePath[insertingNodePathLevel] childPointer := sbn.getChildInDirection(direction) if (*childPointer) != nil { @@ -597,7 +662,10 @@ func (sbn *smtBatchNode) createALeafInEmptyDirection(insertingNodePath []int, in return childPointer, nil } -func (sbn *smtBatchNode) expandLeafByAddingALeafInDirection(insertingNodeKey []int, insertingNodeKeyLevel int) **smtBatchNode { +func (sbn *smtBatchNode) expandLeafByAddingALeafInDirection( + insertingNodeKey []int, + insertingNodeKeyLevel int, +) **smtBatchNode { direction := insertingNodeKey[insertingNodeKeyLevel] insertingNodeKeyUpToLevel := insertingNodeKey[:insertingNodeKeyLevel] @@ -614,7 +682,12 @@ func (sbn *smtBatchNode) expandLeafByAddingALeafInDirection(insertingNodeKey []i return childPointer } -func (sbn *smtBatchNode) collapseLeafByRemovingTheSingleLeaf(insertingNodeKey []int, insertingNodeKeyLevel int, theSingleLeaf *smtBatchNode, theSingleNodeLeafDirection int) **smtBatchNode { +func (sbn *smtBatchNode) collapseLeafByRemovingTheSingleLeaf( + insertingNodeKey []int, + insertingNodeKeyLevel int, + theSingleLeaf *smtBatchNode, + theSingleNodeLeafDirection int, +) **smtBatchNode { insertingNodeKeyUpToLevel := insertingNodeKey[:insertingNodeKeyLevel+1] insertingNodeKeyUpToLevel[insertingNodeKeyLevel] = theSingleNodeLeafDirection nodeKey := utils.JoinKey(insertingNodeKeyUpToLevel, *theSingleLeaf.nodeLeftHashOrRemainingKey) @@ -688,7 +761,11 @@ func (sdh *smtDfsHelper) startConsumersLoop(s *SMT) error { } } -func setNodeKeyMapValue[T int | *utils.NodeKey](nodeKeyMap map[uint64]map[uint64]map[uint64]map[uint64]T, nodeKey *utils.NodeKey, value T) { +func setNodeKeyMapValue[T int | *utils.NodeKey]( + nodeKeyMap map[uint64]map[uint64]map[uint64]map[uint64]T, + nodeKey *utils.NodeKey, + value T, +) { mapLevel0, found := nodeKeyMap[nodeKey[0]] if !found { mapLevel0 = make(map[uint64]map[uint64]map[uint64]T) @@ -710,7 +787,10 @@ func setNodeKeyMapValue[T int | *utils.NodeKey](nodeKeyMap map[uint64]map[uint64 mapLevel2[nodeKey[3]] = value } -func getNodeKeyMapValue[T int | *utils.NodeKey](nodeKeyMap map[uint64]map[uint64]map[uint64]map[uint64]T, nodeKey *utils.NodeKey) (T, bool) { +func getNodeKeyMapValue[T int | *utils.NodeKey]( + nodeKeyMap map[uint64]map[uint64]map[uint64]map[uint64]T, + nodeKey *utils.NodeKey, +) (T, bool) { var notExistingValue T mapLevel0, found := nodeKeyMap[nodeKey[0]] diff --git a/smt/pkg/smt/smt_utils.go b/smt/pkg/smt/smt_utils.go new file mode 100644 index 00000000000..aed504d1643 --- /dev/null +++ b/smt/pkg/smt/smt_utils.go @@ -0,0 +1,49 @@ +package smt + +import ( + "fmt" + + "github.com/ledgerwatch/erigon/smt/pkg/utils" +) + +var ( + ErrEmptySearchPath = fmt.Errorf("search path is empty") +) + +func (s *SMT) GetNodeAtPath(path []int) (nodeV *utils.NodeValue12, err error) { + pathLen := len(path) + if pathLen == 0 { + return nil, ErrEmptySearchPath + } + + var sl utils.NodeValue12 + + oldRoot, err := s.getLastRoot() + if err != nil { + return nil, fmt.Errorf("getLastRoot: %w", err) + } + + for level, pathByte := range path { + sl, err = s.Db.Get(oldRoot) + if err != nil { + return nil, err + } + + if sl.IsFinalNode() { + foundRKey := utils.NodeKeyFromBigIntArray(sl[0:4]) + if level < pathLen-1 || + foundRKey.GetPath()[0] != pathByte { + return nil, nil + } + + break + } else { + oldRoot = utils.NodeKeyFromBigIntArray(sl[pathByte*4 : pathByte*4+4]) + if oldRoot.IsZero() { + return nil, nil + } + } + } + + return &sl, nil +} diff --git a/smt/pkg/smt/smt_utils_test.go b/smt/pkg/smt/smt_utils_test.go new file mode 100644 index 00000000000..f30d1646bd5 --- /dev/null +++ b/smt/pkg/smt/smt_utils_test.go @@ -0,0 +1,92 @@ +package smt + +import ( + "math/big" + "testing" + + "github.com/ledgerwatch/erigon/smt/pkg/utils" + "github.com/stretchr/testify/assert" +) + +func Test_DoesNodeExist(t *testing.T) { + tests := []struct { + name string + insertPaths [][]int + searchPath []int + expectedResult bool + expectedError error + }{ + { + name: "empty tree", + insertPaths: [][]int{}, + searchPath: []int{1}, + expectedResult: false, + expectedError: nil, + }, + { + name: "Search for empty path", + insertPaths: [][]int{{1}}, + searchPath: []int{}, + expectedResult: false, + expectedError: ErrEmptySearchPath, + }, + { + name: "Insert 1 node and search for it", + insertPaths: [][]int{{1}}, + searchPath: []int{1}, + expectedResult: true, + expectedError: nil, + }, + { + name: "Insert 1 node and search for the one next to it", + insertPaths: [][]int{{1}}, + searchPath: []int{0}, + expectedResult: false, + expectedError: nil, + }, + { + name: "Insert 2 nodes and search for the first one", + insertPaths: [][]int{{1}, {1, 1}}, + searchPath: []int{1}, + expectedResult: true, + expectedError: nil, + }, + { + name: "Insert 2 nodes and search for the second one", + insertPaths: [][]int{{1}, {1, 1}}, + searchPath: []int{1, 1}, + expectedResult: true, + expectedError: nil, + }, + { + name: "Search for node with longer path than the depth", + insertPaths: [][]int{{1}}, + searchPath: []int{1, 1}, + expectedResult: false, + expectedError: nil, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + s := NewSMT(nil, false) + for _, insertPath := range tt.insertPaths { + fullPath := make([]int, 256) + copy(fullPath, insertPath) + nodeKey, err := utils.NodeKeyFromPath(fullPath) + assert.NoError(t, err, tt.name+": Failed to create node key from path ") + _, err = s.InsertKA(nodeKey, new(big.Int).SetUint64(1) /*arbitrary, not used in test*/) + assert.NoError(t, err, tt.name+": Failed to insert node") + } + + result, err := s.GetNodeAtPath(tt.searchPath) + if tt.expectedError != nil { + assert.Error(t, err, tt.name) + assert.Equal(t, tt.expectedError, err, tt.name) + } else { + assert.NoError(t, err, tt.name) + } + assert.Equal(t, tt.expectedResult, result != nil, tt.name) + }) + } +} diff --git a/smt/pkg/smt/witness.go b/smt/pkg/smt/witness.go index 5fc7d64e336..ef80f6ab3ed 100644 --- a/smt/pkg/smt/witness.go +++ b/smt/pkg/smt/witness.go @@ -5,17 +5,18 @@ import ( "fmt" "math/big" - libcommon "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon/smt/pkg/db" "github.com/ledgerwatch/erigon/smt/pkg/utils" "github.com/ledgerwatch/erigon/turbo/trie" "github.com/status-im/keycard-go/hexutils" ) // BuildWitness creates a witness from the SMT -func BuildWitness(s *SMT, rd trie.RetainDecider, ctx context.Context) (*trie.Witness, error) { +func (s *RoSMT) BuildWitness(rd trie.RetainDecider, ctx context.Context) (*trie.Witness, error) { operands := make([]trie.WitnessOperator, 0) - root, err := s.Db.GetLastRoot() + root, err := s.DbRo.GetLastRoot() if err != nil { return nil, err } @@ -47,7 +48,7 @@ func BuildWitness(s *SMT, rd trie.RetainDecider, ctx context.Context) (*trie.Wit } if !retain { - h := libcommon.BigToHash(k.ToBigInt()) + h := common.BigToHash(k.ToBigInt()) hNode := trie.OperatorHash{Hash: h} operands = append(operands, &hNode) return false, nil @@ -55,12 +56,17 @@ func BuildWitness(s *SMT, rd trie.RetainDecider, ctx context.Context) (*trie.Wit } if v.IsFinalNode() { - actualK, err := s.Db.GetHashKey(k) - if err != nil { + actualK, err := s.DbRo.GetHashKey(k) + if err == db.ErrNotFound { + h := common.BigToHash(k.ToBigInt()) + hNode := trie.OperatorHash{Hash: h} + operands = append(operands, &hNode) + return false, nil + } else if err != nil { return false, err } - keySource, err := s.Db.GetKeySource(actualK) + keySource, err := s.DbRo.GetKeySource(actualK) if err != nil { return false, err } @@ -71,14 +77,14 @@ func BuildWitness(s *SMT, rd trie.RetainDecider, ctx context.Context) (*trie.Wit } valHash := v.Get4to8() - v, err := s.Db.Get(*valHash) + v, err := s.DbRo.Get(*valHash) if err != nil { return false, err } vInBytes := utils.ArrayBigToScalar(utils.BigIntArrayFromNodeValue8(v.GetNodeValue8())).Bytes() if t == utils.SC_CODE { - code, err := s.Db.GetCode(vInBytes) + code, err := s.DbRo.GetCode(vInBytes) if err != nil { return false, err } @@ -86,11 +92,15 @@ func BuildWitness(s *SMT, rd trie.RetainDecider, ctx context.Context) (*trie.Wit operands = append(operands, &trie.OperatorCode{Code: code}) } + storageKeyBytes := storage.Bytes() + if t != utils.SC_STORAGE { + storageKeyBytes = []byte{} + } // fmt.Printf("Node hash: %s, Node type: %d, address %x, storage %x, value %x\n", utils.ConvertBigIntToHex(k.ToBigInt()), t, addr, storage, utils.ArrayBigToScalar(value8).Bytes()) operands = append(operands, &trie.OperatorSMTLeafValue{ NodeType: uint8(t), Address: addr.Bytes(), - StorageKey: storage.Bytes(), + StorageKey: storageKeyBytes, Value: vInBytes, }) return false, nil @@ -118,10 +128,18 @@ func BuildWitness(s *SMT, rd trie.RetainDecider, ctx context.Context) (*trie.Wit } // BuildSMTfromWitness builds SMT from witness -func BuildSMTfromWitness(w *trie.Witness) (*SMT, error) { +func BuildSMTFromWitness(w *trie.Witness) (*SMT, error) { // using memdb s := NewSMT(nil, false) + if err := AddWitnessToSMT(s, w); err != nil { + return nil, fmt.Errorf("AddWitnessToSMT: %w", err) + } + + return s, nil +} + +func AddWitnessToSMT(s *SMT, w *trie.Witness) error { balanceMap := make(map[string]*big.Int) nonceMap := make(map[string]*big.Int) contractMap := make(map[string]string) @@ -135,7 +153,7 @@ func BuildSMTfromWitness(w *trie.Witness) (*SMT, error) { type nodeHash struct { path []int - hash libcommon.Hash + hash common.Hash } nodeHashes := make([]nodeHash, 0) @@ -144,8 +162,7 @@ func BuildSMTfromWitness(w *trie.Witness) (*SMT, error) { switch op := operator.(type) { case *trie.OperatorSMTLeafValue: valScaler := big.NewInt(0).SetBytes(op.Value) - addr := libcommon.BytesToAddress(op.Address) - + addr := common.BytesToAddress(op.Address) switch op.NodeType { case utils.KEY_BALANCE: balanceMap[addr.String()] = valScaler @@ -165,7 +182,6 @@ func BuildSMTfromWitness(w *trie.Witness) (*SMT, error) { storageMap[addr.String()][stKey] = valScaler.String() } - path = path[:len(path)-1] NodeChildCountMap[intArrayToString(path)] += 1 @@ -177,12 +193,12 @@ func BuildSMTfromWitness(w *trie.Witness) (*SMT, error) { } case *trie.OperatorCode: - addr := libcommon.BytesToAddress(w.Operators[i+1].(*trie.OperatorSMTLeafValue).Address) + addr := common.BytesToAddress(w.Operators[i+1].(*trie.OperatorSMTLeafValue).Address) code := hexutils.BytesToHex(op.Code) if len(code) > 0 { if err := s.Db.AddCode(hexutils.HexToBytes(code)); err != nil { - return nil, err + return err } code = fmt.Sprintf("0x%s", code) } @@ -212,7 +228,6 @@ func BuildSMTfromWitness(w *trie.Witness) (*SMT, error) { pathCopy := make([]int, len(path)) copy(pathCopy, path) nodeHashes = append(nodeHashes, nodeHash{path: pathCopy, hash: op.Hash}) - path = path[:len(path)-1] NodeChildCountMap[intArrayToString(path)] += 1 @@ -225,57 +240,52 @@ func BuildSMTfromWitness(w *trie.Witness) (*SMT, error) { default: // Unsupported operator type - return nil, fmt.Errorf("unsupported operator type: %T", op) + return fmt.Errorf("unsupported operator type: %T", op) } } for _, nodeHash := range nodeHashes { - _, err := s.InsertHashNode(nodeHash.path, nodeHash.hash.Big()) + // should not replace with hash node if there are nodes under it on the current smt + // we would lose needed data i we replace it with a hash node + node, err := s.GetNodeAtPath(nodeHash.path) if err != nil { - return nil, err + return fmt.Errorf("GetNodeAtPath: %w", err) + } + if node != nil { + continue + } + if _, err := s.InsertHashNode(nodeHash.path, nodeHash.hash.Big()); err != nil { + return fmt.Errorf("InsertHashNode: %w", err) } - _, err = s.Db.GetLastRoot() - if err != nil { - return nil, err + if _, err = s.Db.GetLastRoot(); err != nil { + return fmt.Errorf("GetLastRoot: %w", err) } } for addr, balance := range balanceMap { - _, err := s.SetAccountBalance(addr, balance) - if err != nil { - return nil, err + if _, err := s.SetAccountBalance(addr, balance); err != nil { + return fmt.Errorf("SetAccountBalance: %w", err) } } for addr, nonce := range nonceMap { - _, err := s.SetAccountNonce(addr, nonce) - if err != nil { - return nil, err + if _, err := s.SetAccountNonce(addr, nonce); err != nil { + return fmt.Errorf("SetAccountNonce: %w", err) } } for addr, code := range contractMap { - err := s.SetContractBytecode(addr, code) - if err != nil { - return nil, err + if err := s.SetContractBytecode(addr, code); err != nil { + return fmt.Errorf("SetContractBytecode: %w", err) } } for addr, storage := range storageMap { - _, err := s.SetContractStorage(addr, storage, nil) - if err != nil { - fmt.Println("error : unable to set contract storage", err) + if _, err := s.SetContractStorage(addr, storage, nil); err != nil { + return fmt.Errorf("SetContractStorage: %w", err) } } - return s, nil -} - -func intArrayToString(a []int) string { - s := "" - for _, v := range a { - s += fmt.Sprintf("%d", v) - } - return s + return nil } diff --git a/smt/pkg/smt/witness_test.go b/smt/pkg/smt/witness_test.go index 87dae548915..6d3415214f5 100644 --- a/smt/pkg/smt/witness_test.go +++ b/smt/pkg/smt/witness_test.go @@ -17,6 +17,7 @@ import ( "github.com/ledgerwatch/erigon/smt/pkg/utils" "github.com/ledgerwatch/erigon/turbo/trie" "github.com/stretchr/testify/require" + "gotest.tools/v3/assert" ) func prepareSMT(t *testing.T) (*smt.SMT, *trie.RetainList) { @@ -31,7 +32,7 @@ func prepareSMT(t *testing.T) (*smt.SMT, *trie.RetainList) { tds := state.NewTrieDbState(libcommon.Hash{}, tx, 0, state.NewPlainStateReader(tx)) - w := tds.TrieStateWriter() + w := tds.NewTrieStateWriter() intraBlockState := state.New(tds) @@ -46,7 +47,7 @@ func prepareSMT(t *testing.T) (*smt.SMT, *trie.RetainList) { intraBlockState.AddBalance(contract, balance) intraBlockState.SetState(contract, &sKey, *sVal) - err := intraBlockState.FinalizeTx(&chain.Rules{}, tds.TrieStateWriter()) + err := intraBlockState.FinalizeTx(&chain.Rules{}, tds.NewTrieStateWriter()) require.NoError(t, err, "error finalising 1st tx") err = intraBlockState.CommitBlock(&chain.Rules{}, w) @@ -112,7 +113,7 @@ func TestSMTWitnessRetainList(t *testing.T) { sKey := libcommon.HexToHash("0x5") sVal := uint256.NewInt(0xdeadbeef) - witness, err := smt.BuildWitness(smtTrie, rl, context.Background()) + witness, err := smtTrie.BuildWitness(rl, context.Background()) require.NoError(t, err, "error building witness") foundCode := findNode(t, witness, contract, libcommon.Hash{}, utils.SC_CODE) @@ -139,7 +140,7 @@ func TestSMTWitnessRetainListEmptyVal(t *testing.T) { _, err := smtTrie.SetAccountState(contract.String(), balance.ToBig(), uint256.NewInt(0).ToBig()) require.NoError(t, err) - witness, err := smt.BuildWitness(smtTrie, rl, context.Background()) + witness, err := smtTrie.BuildWitness(rl, context.Background()) require.NoError(t, err, "error building witness") foundCode := findNode(t, witness, contract, libcommon.Hash{}, utils.SC_CODE) @@ -160,10 +161,10 @@ func TestSMTWitnessRetainListEmptyVal(t *testing.T) { func TestWitnessToSMT(t *testing.T) { smtTrie, rl := prepareSMT(t) - witness, err := smt.BuildWitness(smtTrie, rl, context.Background()) + witness, err := smtTrie.BuildWitness(rl, context.Background()) require.NoError(t, err, "error building witness") - newSMT, err := smt.BuildSMTfromWitness(witness) + newSMT, err := smt.BuildSMTFromWitness(witness) require.NoError(t, err, "error building SMT from witness") root, err := newSMT.Db.GetLastRoot() @@ -190,12 +191,15 @@ func TestWitnessToSMTStateReader(t *testing.T) { expectedRoot, err := smtTrie.Db.GetLastRoot() require.NoError(t, err, "error getting last root") - witness, err := smt.BuildWitness(smtTrie, rl, context.Background()) + witness, err := smtTrie.BuildWitness(rl, context.Background()) require.NoError(t, err, "error building witness") - newSMT, err := smt.BuildSMTfromWitness(witness) + newSMT, err := smt.BuildSMTFromWitness(witness) require.NoError(t, err, "error building SMT from witness") + _, err = newSMT.BuildWitness(rl, context.Background()) + require.NoError(t, err, "error rebuilding witness") + root, err := newSMT.Db.GetLastRoot() require.NoError(t, err, "error getting the last root from db") @@ -239,3 +243,29 @@ func TestWitnessToSMTStateReader(t *testing.T) { // assert that the storage value is the same require.Equal(t, expectedStorageValue, newStorageValue) } + +func TestBlockWitnessLarge(t *testing.T) { + witnessBytes, err := hex.DecodeString(smt.Witness1) + require.NoError(t, err, "error decoding witness") + + w, err := trie.NewWitnessFromReader(bytes.NewReader(witnessBytes), false /* trace */) + if err != nil { + t.Error(err) + } + + smt1, err := smt.BuildSMTFromWitness(w) + require.NoError(t, err, "Could not restore trie from the block witness: %v", err) + + rl := &trie.AlwaysTrueRetainDecider{} + w2, err := smt1.BuildWitness(rl, context.Background()) + require.NoError(t, err, "error building witness") + + //create writer + var buff bytes.Buffer + w.WriteDiff(w2, &buff) + diff := buff.String() + if len(diff) > 0 { + fmt.Println(diff) + } + assert.Equal(t, 0, len(diff), "witnesses should be equal") +} diff --git a/smt/pkg/smt/witness_test_data.go b/smt/pkg/smt/witness_test_data.go new file mode 100644 index 00000000000..fab6aff4732 --- /dev/null +++ b/smt/pkg/smt/witness_test_data.go @@ -0,0 +1,6 @@ +package smt + +var ( + Witness1 = "0102030203020302030203020303ddd15247a8b234236d91271277b1059a674eaed56c29a6d8905b27ea9460c7e40344f7576ca6198b0bb6daa81b4eb6f594b46608e0f4d8d509361f0aac88eed2b50203020302030203020302030203037477c5b7ac361fa5a28f01782fc1b9577dfe27c9d91e5193c426916c166503f3033e6831fb92c6944c4869e9ff429fd40b9191f5a5a9fd8e4e26f67be29feb3d00020302030310c0064663f729ce8c12a4db054317ae8a3d309ee54378eba25ca39a4670758d03fa715595952a40ebcc9c06b02f6b1960a1f74a722c3a9fecba1aa66f32f1850e0203020303b010b79cdf4c9bd8f8164ad282defed968658e80fa57c26c19f5cadcfd9c890e0318f8d37b605fba62e9bd02f5554b8bd4784578021c737c4cb957c4ed5e8ad3b5020302030203020303c4ac3ac799860160a30a3304b765c2c90bc414edc3739a5d098bb7e18009548a0344ed07cf7b7b49fc2e7fc9c6c19d1b60e64990110188e15b445320a35660f91d02030203020303949f805ade2be05694c8011fa17fab3646a43f38f96d868386f0ba9558ba5f960302aabd9fbeceb9711f46d634513830181412c8405aea579f470a19b477d090140203020303db978a462b93b2efa3aa3da09e03370b570db692c6d361d52ae1051bdb26a3a903916d67432c505e1dc33f3617e0743d761aba44785726309191e79cb18b666e7402030203033edca13bcadc1db9305f3b15322cc6d774682fffdfe2b509f81d00b16ce2dcd003dc94780e238944094e7856154e6d3e54fec28293a9a70eaf1cc2a81e874e22170203020302010203070354000000000000000000000000000000005ca1ab1e5820e72de8a1b9696dd30f7886b15c4cc9234d52c6b41b9c33e2baaf8d88fc5b7c9f5820f8fb80310ac041e7a5e79c138d7261cda5d8a988dc9268b5a8dc5318fb610a90070354000000000000000000000000000000005ca1ab1e5820000000000000000000000000000000000000000000000000000000000000000358207d7b0aec16983b640324af57c161ae800ab5b0b61937d153540fd64ba724a431020303c6cbb686c9d7a94f49dfbee076ae1b87f1c9bb5f33b7c98a71816c33b4731a3b037514c2021b2bb805e2a6060b265dd53c069a4587e19cd7d1af99d0e9c3d0e550020303784570292bffbc3ee00153e5806a317459fed4c1d84de0515dcefc177404865003f08c92f6786e67148e8fb2bcd4eb813a665e16a270b475605d1d84b587450ff102030344c5e2a1775873020ab4e5a588e95d6702878cd46012682196dc39738fd8780703a6d5ee17fe3be7e20050e4e66c54b5188febbdd3615f832c35b073078258b214020303e5f90b59ef9f5ceee0e0a54551e41a62431ea06aa09be94779c474ca4d18683403e794dec8b1cbcd53bbecf14b61869699ed3f92ecbb4ac3d9a8bc744c09a3e69a020303476c478891a8f8d905ebf9e5c031ba1020ca1436538bf9b97c6eaa1b9512da97030e077b0c8215c8c43753d044e318788eb8e39de692fe0ccd46396d3b06ca6e0c020303ddaa569a922c9c28d9a225a4c212c076ad5e353bb7cceaab630a3884af855d2403d5ff4c122142c026a0b24b79b4a667d25ea916ef64d8a8215aa29a738c5588a50203034b1d465e96a44ba0d7983a6f4ce10a26bce7816b6d51ba8ac594c71892cc2af60381a6db28188e1b651603d41fbc2030bb2b7706e02b1eb3423d7f38ff6ef514e6020303f30f3c3ad2db979a1c81690619a35a801e3bcd77413f37e285b0011f2b6e2a4003239d1f94c6460af24c7228a2af86326ea1199e97365bf7dc5832ad029107445f0203038518fa303494de83c9ae1f80c877b5c0e6dba41880f6df1dbaaff30fa9b9c37a03653c1b2e876da5bd8b6535ce431ae69feb7be788cc67b2fa3dbff11c792c1f13020303d5efbfce398f4205569b3fc872e7405712796b7189d6846e61c7ff33a12ab0c5037aeb2da8a9e504490ac07aee509079823397fc6e9cd25257e658f6e0021ae771020302030203033bfe86ca5a55d4d2d42f5af48205ca0ab08df68e551e61b9a1bd5d575ff9cac3037462982abd4a0437ab5e12ab2af263ab382e0ceba69ff5de751519512149c70a0203020303980043fe396689718e09b0990d71b800219da2873a8e0c3c45d25ffe12bd9e6003f2f9aba950a1023ef8e02568c683c86ef2e77e16dfad909642ddc5cc57ac8c120203020303738b4a16af664d0e0c6b7ff278d1e3b602e6277085730d77844f1430c2f71bcd032c505136023a2005bd6b8abfc49eb783514ea36233d5439525478dc102ad67e402030203020303f30cfa6f63115cc17d752bd07a3848c463334bdf554ffeb5a57f2ac2535c4650037d85b4ea9025d3512a6fafe55d8e3570fc8c968eb67042e0ded283dcadc12ae8020302030351a20a2e192372b9383e5b8ef255adf58a3633e5aa4161424f7b52912e8053f603edc4f75f70c3608079c9f0b4584da6270879e9983bb3513d7e620024f15e659f02030203037e1734c6c90368548b9b6a882b4560d78e0630f3616dc7d4b4b8d77b96a42dbf03c4ed6f8e6cdc9797199a463a51287700852a10099a1386109a37561b538d228502030203020303d3858acf0781afe0adae49a25d1724b36c9989179cc884b9a9c6481f89e57706031da6fb50879ede58d816d1624292f0c60a8133bcbc671dd92d0f9cb8d50fc8030203020303521bd9da187efcbab07451097baf98589a33e32cd33501c5a912f48cf2552bef0352124f3ffee53f7e0f9a0068d5c0b0abfca5aaa148371c91e2e81df5fba6f8bf0203020303e00ce2232f3e208dcf050f887d02c7b91170c4b98e1d098ec5238bb3d387a41e038a0379dab30ce84865bb4f0834a9c3fd7bb2da17994abf03e89fd8d754bf7aab0203020302030333f5853903cb36caedffa12d0aa0a01c39e91309629a97dddafa8da4f738fb3e038e1dc6012aecd5998053b5878c6e3a398c8a286c7ad4dc0b55f043e4b4210950020302030317e041d91830fe8051fc73631cfd56519fc5bb88b5369298da9807b40d93733703dc7745bf8dfa058bcf1328f79efc9441cf5ad5fb5763c75bdebf9492f88a6c8302030203020303bf9a2780e63ac26ffca6df07f182db72e3e65802b277ea6d40057c383d5a37e3036c1548eb1c956ece0a876fff4463cc3f2e9c3b6eef88379f07e6d71013eb4aad020302030202020103c2eebac9e260dad1f051fa75846558dcc0ed120d1a7699fd1d6b739a3419015b020103b9d64db96b69b035a74a90069691afa2b34a705f3ad3aa82f3757c7a6f2a9f17070354000000000000000000000000000000005ca1ab1e58207af492bfc857210d76ff8398a35a942af892e866b1f4c241746b3ee89ca002595820f889596e5b3d4dbe5bcab5cde2af26d3ad8d88bc086e8b4f929885f33f6eec77020303cd3edcf79c7e0e0d6b87ae523729611faeda92e77bbb59f739e9a6127d890cdf03009a5c01c3cb27d2f1ffbd3fd77ff38f66991f64174f5df1411ea21ae2e22f250203035334694d8b56a2f5e0d0ded81283e7f36b3da6fbf2e2d1b469c008d7414296be03953388b0cacc587c5ca452ba8e96a9071958ef94439690bc14866f556a35ebc1020303227561f72db4ee550290a7b85931038224b1fa9c351395f5f5777f397016d7ae03dde5f312229c20faf5b1b27273112bc022bd0d1dad4195ffeeceb49c05001a07020303d4eebbde54471ef4008ea3e23e4bd31119b1d4fa51a2bce7771c95b70efba064038c6a2b8e1f68d72b2a95ef69cd8eb0ab32781e7687049eaf3b7381596c0bb8af0203036ae82b7b420a58afe9871a632d69be8475f745405df2183722c599f94a5cf15f038a575afe8d81ea9f181bee15a971affeffcb1964ed35ec291304be393899d80f02030203020302030203020303d634ac486eb2f4e325a096c1aac56ae5a0a3bba406dcbede2e9bd4837d1759f203ce1b43774de78b19d67b133fb575ead398fae6a712ebd63e26671f199c8e674302030203020302030203036068b215f89f68246518e7d8c967f8ae78b47c69bcb9e97deca5849a813b2e400384b630ffc67a1dd7b502c1b42165171a704d68ed15ced3b7cbb98bd150cd884b020302030203020303a3c7cf45ebdd7e21dade3434624c9fd521b0ab24a6956e3b8a777d700149806703b3637d0b1cf58c5272f28f8354a764d3cd48ff7c04f807237da8f4a1e2ef5db5020302030203020302030203020303c018dfe606efd5d17f3d45e91d33d3d7ef57d92a1d509291b1556bbb7e78dd0803b08ff5bb304aa8741af608415c541440edcd98bbc0fc849fe2a89c2c783341d502030203031e0eb0ac5664b1d0267d3c51dd66d1828c0d01a0903d599a317e4578926d4e3503330e2ccc546a3db52e796943aa8960d6d483a3b941ae0caa21cc7b9f7a3c2bbc070354a40d5f56745a118d0906a34e69aec8c0db1cb8fa5820000000000000000000000000000000000000000000000000000000000000000158206191319cb3bf48d9701195789dbbf6db5d3b99006317f5e7da37709f3d259374020303ac874a6acbf6de628134cd74ad9f336206e7aadb1ef09456b6267a770612485703ef323528b761720ce04927a54b81025a935f070420d655217e40eb2e084bd170020303a48199a63a429cf44fb39fdbb73098a49dc171e03d32800f483780adb9aa06580388796e8ab2076fc77f00a5096317ceff8a54da310b014a0310504bcd76f8b8da02030350179cb850b147782f26ff9a17895259e569b740cd6424a7a1479602bd8c822b0371b277886f0d14b6f82cfd063ecddab10fb5da5e0666e040992469d09a6bc8b0020303dee2b54587eeb7db2fe9ef0dc262b6ae679a5bfff89c8f403d181a1d79107b1d032aff27f522ef5fd88213c3865e01c7b4c1720d56778d1bd0e48e6a86fb3b07970203037d0d29240ad72800831a91d8e54019da745c6c6a630a625167723ace857bbb81031ede6b255a1c6ffd6fa2afc16d61aea6555a5cb85dc4669070b69b55a16ac58d020303335f1f02ebdb1926380c362d23b2d90d791f5ec8531287a47d3a1929d6304f1b037b80208ab1e9bc0411f128ccc859ac552945a650ebd0f9161a63fc9944e8d43f0203030d600bfcd6581d405aaed26aa7cee976fbb2bb9c1c1390bd3eb14cf5f6610223030bc731957e48cd1b0f92521fa971ca65f76bc8608eaddfa5243baf39838099810203034b176bbe991dbc4ae5738f23e31433597f9b892730ad4fdc86784eb928cbc642035fa76b91882dff00646e99735fb6accf89476e59c9dd28e0bc7b617870cc15e702030309cc884c89c3b546aecc35f4a95760a100cc3fb5457a82fc413ee2cd795345d2037ac7f4c6f9dc0e8f652f47fbda5c4b53428948acc95270be462ef8c909e5b742020303bb4f79f1339f6fc4ba3b4c61ff1940c27b29459942791fd7b160a04bc8aa411803628f665215c8c44bda1a6243487e35e5d9d922bf36f1976fd5f6c39264d16e8b0203036bdcb7f00d848df36ea8ffe2d419775be23396cb7344a2dd1ab44292769c922303710d1c2ccfa13ceec5ffc97b3469592c4f2495141e46bbaaae6f099c47b9737502030203035f97c209aeacbb5fc78e69d247a800528f4bcc5e649fdec626ae5ef510ee7a71036eeb37a43ca943f0bb09cb54bfcc7325ed27e97af16f59cab0822f88d3143888020302030373d9994e2d75a6f80adb914f45d533caf2ade8d952a0d8b73a40299199892f5f0347947690bda8388fbc8744af22af00157531bd0f37353b2407b573cff34e23c20203020303cd4c5dc3e51e3a3379cf73004c787ee7cb312c06c70800d0f08e65a0ee2313c40350adbcaba1f1a5b06ae4510704194cefdb5053ffacdca11f354a80cc04d0a2f402030203037ec1e64855ec72f6c39f1832616a45075eda4889495c393ffb673aa05f25e67d0361c764032c6e6f093f7e4e2db6e3324b29e59ee4df2f6df3536539ea135264cc02030203020302030325abb132a4c897744752a4707644448c653f00743c37cd560218074dfe1e4d2803fe62ee54fd13cf254cb8c3b2bf728d8c26703054588e529bb8b2a68a950ea4e0020302030203020303846e32cbe73ce37fdc6fb93afeed4425035df35d637127b54b8fc4c053d405ff0398c008e116cd33ceac2a28f29c392e533b755c24316cf6e847e4ef72b070dcc602030203037cc278f9b41fd17fb5eb3c839a725fcd1ef6000189fcebcb4214303f45dcd2d60386c3bc64da300f1a87efa2eb2724553e41348057fc99d5c23b5b20e216fde46d020302030376bf2ddfddca9910df80bb0785add76937d1e90e029c02b04c0cf421622a232803ba2219bc37e93a89b0effdfc3601f58645c1cb7e818f2254c8fd16fea4ba84440203020303fdf1c4799edef5fe2960f9148627fff521e591247a224eb8d05eae3f51675b560372adafa8e298a14d0da0a71e645a12a23def78db8e81f7a68ef92aac7d5700b40203020303f5a794d38718b283b47993f3bbcd67c76f84c47fcf2d35373fcb7f8a0f43a06b03a14b12d1f03790ac75797e463a8a7edcfb2bc80b65a7dc8d1b15d00cefb315d5020302010312f8462573dc83d436d2498e68019babdcc911f482e1e00c1b3fda70e1a166e40203020303adcfa2154e38e2cdbafd5d56bdaa5dca90a5bfb9c36bfbe140bb31ec0e66716503b5aaf1a6fa2e80ad8f4e49c51808d2898fd74f539ec5de974b57c27466f5e7490203070354000000000000000000000000000000005ca1ab1e58205956a0b12f607189a063054545ab26ce76ea5eb4c9bc1e8d8161646c93ac66515820da6aba51eaf87e14a7585e52e23cc0b789c61b3e808d2aef704ae932bb2ab49d070354ee5a4826068c5326a7f06fd6c7cbf816f096846c5820701c251f0448beefca8b47dce2e42f136d224b8e89e4900d24681e46a70e7448510237426c03237429400000000067445fb80203035b2ba27d2c4b5ccd82a303efb2a86cf208d08dd952ed0494acc5aff009a9809303e498dca26358e5fd56e5464288da82073a17cbbd112e322488c12bff1661b49b02030308e7f519768ebddac099b4d79b3da7e528e3e1e7f43fb5e815cc2e7e9bdb82ca03afa06c5681e457eed414f2a781c5cf2a752257a696aa0d741799d3e5c6ac65b6020303645cd7c283714f070784b2e6a24c049ba20cc01422792ec7507818b757b4d02103c6b1fb9b858f69c598cf4359d1a43ec9faa45a7f308dfcf087b529ecf6cba0d702030364631f25391237453ea4bdf6dcd59ec33334c8bf13b3f4ebc24512193a52368203d4a0ec404056d0dd6b14481bda731e46954f9d29e4d43aba64bb8ca52ca87bd902030329cd1de4c7edfcc761f57c5f2466970add88bd8705390cb23184091c99cbdde603eca27d7686e41e3d24558d66cbc83d2a5d35469522d922ab216914a84d977e36020303aa3f3aaee4ea8cc05d8b5a9f3c4528c8de0d5b4bd9bedd4456b8816a9d7195da036dee15633cb92bdefc8f632e08b85dcb8bf1d317f82dfcbb7b76e38f7421361502030203020303f1a4bc7768286c3e023725e4f781a6b00fb11d83f1dda2647000f13ca3c58544035e062fcd2f3f81c8d4d424e25bf7e77e301465425a25afa5d0bdbeee2c6284b202030203038482b5d9958175078c790a8a1effda8f156db2faa9ff2d6473d742b4f737143903bb264f8b66371fe289f7741ae353b137695ca94cbc9ed3ececd3ef601d54181d02030203020303b9a21b649304cec7a5d6d0464d6bd8ddffb475c672c0c9799b3457e4b9fc2a12038da99cc78f04ba4eaf3df326eeb15cb038c013a9e5b76698f493170bd356b13a020302030203039c69fd3c2b5b5200c89358d29432ddc4cdadbf9d1b05f2265bf4af27d968898503389f85ccddd9ba507ac3bae9f0a830a56eaf35ebde5aeb6c374dadfd0ab39aa9020302030318b62235f3bd9e0e268b30ff1a987af5548f00006ebcf51db0448e220c17e862034465f83c3781a2e121eca23c852e6b742e52e0fd76e2eaf886471d3f5c4a3e8502030203038b2faefda31a8d8e3e5590221ea164997bdaaba30fed699932fa0b65c6ab2fda0396915914ec53b6ea1fea28b0ede76ab410d1dafbf996f2fa7cd37f1b4ddeb59e020302030203020303455b9202298fcd235ea441cc50f29ce15a2a1a9504564159a849211c899dc08003f3df85b9d03df952c76c1f9853ce686f21949c732fc9b161b5759faa36b2cd55020302030203020303508d990b34daf5a3f925c435daac3d293f6d861094cc2d343a92c62428fa66da032f8b40a9211667e9c44328d6440091ecb3a46bc15832f7d7cdfa8ec130b527fc0203020303f993f7eae6e45a6f8557c6c5d0e912cb41b71d2bf37f38affc0b2d8e054193220315eeb3ab754628ce727cd0b7028ff8ed3291de7566b99066e127185d043f595702030203032f2c132f32f21e267ab64271e8f2c0c39fedbcc509c4589616cffec21d7332eb03839857347599c19c43a0acfe53e1bb5bbe0d68ddb49cee05f1b24c5acac24a150203020303dcd0869ad1107856680f6bf164623fc709d26d1a0842bb9c60a383f255c0ec2403c92cb1692742c4e2b6a91d13c3b371a9dccd29f898d8f6457ad052b1da9efcf6020302030203031408a1feb1c4cefd2e71c1d7ce58e6b4c2d139d48c67037d40dc0d60390af539039c51675ab13cc260ab6875b12824ed60903c1755add14024e27508ac0a3b9d81020102030376fdbe16ba7e2f048d9c311cb1f99291b4f624717ddd7e9f2aa653099d19314f032ebe85ea3fef7c7033338d1ed98e187eddf75dff4772a23e19392ce61690f77f020303f901f2ba5a7a95db9ea7106268f17f341206944377d1f006921211069cf8a0a103f43daf24401f9ed2d0691570a8ccdcd016c90b722786ff590276f7cd5933ff3d0203033cb8f613c530196a2ab151996cc3eb343199c5c0c0adc212268f74f6a092666c0355b4984426bd4db31ca5d70798a18280a4d319786bd897a29365d2db7489b32d020303d7f9465d351f2c4200659307b3cd7cf34d3fea84b9b23bffe5bec395f4a2d88a03ef3e9f16053b7f799f207451eb3403eb95301e9c9e721dfde0c41ebd8362485c0203036152db0543c6381b557c70b284c75fe77b405b54d279a37db0ea5a382a61abd603b70663772bf3728213f272a0d02b2ded9cd31441fbb82b9a44d266c56e7fdf58020303f967722c10537809246d339e984382cc197deea70a2c433df88fd7797701dc76036e06014c6d6c4d1358aefacae43b83631ffbbb39c93874faa4d589c1f60ca07302030341a2496071d2a84dec9f60bfd3288fdcf01683618900806b1a61a740fcb95d4b0338cf0dcf2e49a0359d0d543a3ac97474876f7605800e270d1c8671dc375720250203034a347b6bdf9e875c714c0790a2ad84b01edf7b15c4d23cacab0598c704417ea7039676ef3f389061effccb4e08a0afc2971c35bf69edbda2e91d9e88486113990e02030203020303927b20cc65cbc0d70e9880b16dfc67b8379ff4a96b95309302803a1819d95ea003eb0ebe2fcfd0a9002bd0985e47dac1c4a01561de0da69bea0bc25ff1b519d5b602030203020303a4c7f2025180b6de7674fc2c91392a565d9a28a77eb193f29d9ba706c6fdb42f03d2bca54ba531de1142b06bb35aed010d55ab6e0d862cdd7807e4136c1b9d0c490203020303ec1282aa791a0b578de360336d5cf95a7f3bf1ffda9cf697b3aacf9417aa38ad03cece3331be90852d59eb04e3bc87b03657c0993626d3e36ebeef97baedd928f00203020303afb305376ba08f5bfaced38f127295f994096684417f9de1a8f496fdccbb3547036bf14c6051f3bdb18d621bed206c3ceb8daf8ec24843921de9af2dc2ba70d5ba0203020303122291009057e848a0e15edd72e47061463ab3aee368289eddc782303e9299cd03678ace78eb9da91eb3fa9105c9969a0aa9abd66ac41ab138aa70346daadd327002030203020302030203020302030203037a46bc17ebfbc47f6d99661de00074c9958e0f7fd66df7c77c236b89b165472e034b58bfe7c7506d2891367c270ca350269dfc0a08b7466ec2496c6330dd602bb302030203039b58b0df7fae59a4cef25184d849214bc145cda115b2c0dfd85fd470ecdea70f0330923d4d299efbc138d4442519d30cd33a7827557231388b81a6ea9c65eabe6f0203020303af3fee608c2e8e5a30ffc6345d86ec1b2d55f10e518b4da5e8eb59abde07b59803c2016682405d3a953eba254601d5fc0b8966a33efaa51918a4a41b8e0acbeb4602030203034b1387aa6d0ab944e2ec65ce38c8643a0ddfca5c3059718f398dee501291569603528cbab25216c4397a402fcb572f0b512a773dfeafa59e401989a4da13406bfe02030203070354000000000000000000000000000000005ca1ab1e582054b6c4d9862a1658dedebe99a0f61d94c5d1515fd031d0dfe9ebce6a1454f5c658203f14693500ccd0260659fd9eaf69570edc0504867134ac88f871d91d388b63690203070354914e7547b9051ea6226c30495190a2efa15930c95820ffffffffffffffffffffffffffffffffffffffffffffffffffffffff74873927548382be7cc5c2cd8b14f44108444ced6745c5fecb02030311ab6695ec969171698c1f56d4c05373d8505a2c7299fb05cda1d4351e22bfa403478b94ae515fbd01728835b532c7c45ccc78d200d3d004da6917337e139eb729020303ffd14369e7c7f7aec3a890a20234885f2c9fb9802ec318d8434ebcd58a696153030ddae742090ea458c3f232dc894bd8cd3378b4b4590a0523e09a44e0439fe0db020303b1688d8c7806365d931579ccac8dbf7a8d7705ac393159dfd9c0395ab7b5ca5b036a6c978a565b15267de4330de7b6166014082043c5cc80370953767ac501ccf2020303fd878c58bb70337606fc9f519700dcabaee2f175ffd956a6d246c56e38de3c5a034ece3162b251497a52be7f417b99722c20de63b35a0387e0cb1d8a1ef6bd34190203032db40fdeb2c5256d5a237b6134f844646b325bfc12c687916327e21a65b1ae6a03381ca5f3231b0698c6d69bd685fd1930924395002ee0c9f1f3dc9324570c4f52020303eaf50a55e8bc433b8c594aeb6ce2dff8d6dc8a6fa7d72076a07d6b771d13d78b0311a2827108d1c853cd8a63db81104ad8493e188969ca0339d0a01ed043b47cdd020303f7993cfe3bc67991b923b2f3470e42e23e78ad30096bf8278f293053de07b46703a0c8d263334a785d55f5b7be433841bca1d7ef1b8743e6dacb4e4fdffc52a77a020303b616a1ceb3607803c41329eee93ec3541b2ebbe690a4f29e3234441d7fe22710033646798b76e3f8d1cdcef03b5802388ad826a45b0ba508443fa26d5cd6fca96602030320f9766d80286663ec273eaab27d516a59305f6fdb96957af2602f4c0eef4b8a031c930c476ddc908dc2d5ec253fdd2c6687f32616ae7698ee6f0f6baee9871f780203032d1c40f0360f2f347afb931f1caff15b122c02dd058d53cd31e10da5eb3a5005038da2ec93073400637eda663a2b3095ba8bcf473b6bc0ddba6732c0d88ea26f0402030203020302030349147352cccb7f2119bbfbbbb0a4306ee33992973d1777a3f176a7420854218003a44f6acf78a34c96774821d091ce968f756f12c95ad543c97e52f1c041e5c1900203020303a8a8350630628c9ac16ce93f256b9d92a9cab6a1144cd60fee0f228e02d0d04403fd17945ef7c2a783662ce43c34c9e7ff044bcfb5a3fb24299f994e4317c620010203020303cec89e1b5d9d20c59a319c536ef1ae8c0a67e0ded8f6ce3a7eb1979ef183d3870348dbae09afb5d5232bc158cd3f9c2728348ae93f0742e0d71971d3b26c301c0c020302030376e9a3f309a69b0c2c7ca3184457ba9f0ce19145bc96d4bd846742585cf4e9a903b07dbe4dab435161a33e90e991fdd8ac5c0670d77cf2b72ae5bc530519e6fbaf020302030203020303c423e16fb3487a4d9126ad5c533cf130444a4f099a85777493cbd2d231f27b71033c4bbd0160fa760c6c008ce70d7342f2cd5524690247577a0ca36e15528565cd02030203031e5c59c8eb7467fe1b1b59f78351143028717a9679b5956d1a42ab64efbbdff403bc2db4433eb1e4eb520035e06ee46cdd233cd6f74e4ce438a0743af21cf67ba10203020303c1da641e5501813afe9c4653f2179710154bfe94ebce827d0bf64d70bd3baf7a03e2bf953702f6287b134eee978e1b18a36f65b41c2c673d75876215604661dd50020302030203020303a35901b035cd24570a277362d9ece906ef4d6e00821b55212d69b6fd6775472d037568928f5eecc9599b391e6cb75468d91ac18de51d7e984eb678105c39fc8a4a0203020303791a9ee8b5057a6ca65118869d354dba135fd5c518d63144d3860987f084bbcb033c8c390d481f51cf6b43c22677a971beae0e62e8b2ecfdaaed05b48ac0f60294020302030203020302030203070054000000000000000000000000000000005ca1ab1e45e8d4a5100002010341305ecddd1b56329ac9f09a1235eec6ce6be69492a9788db13e9187dc21e9dc020303fb1c6d1aa6d3f3bef7a0bf4130218b3b168f9447e69ebcd3b68c2b2f41d9b2ef03652ba6f9b69aee3d28404079416c2f8fba4078d66b558c7a8d9615cfe7d3bd30020303d9f042d0d2f152e24d8cde02d3a7d7a1fa234efc5dc259572d412a2e607215ba03c5b76ff595e1d74a22eb44a5aed94f3225b6126c2c28ef04bb75e1d3804925ad02030314a2b125da4db5ba673cd5c0aaae8c5bf0857fd45728b868cff3f40eaf9f82790393e93c4f4b58f6f9d397d136319a29aa6b691b652651513bfc2297107379ce62020303f00359907dd68b2ae8e2d252d3313f3ba2bba16d21995333b2162b24c9bbeac4036435af585f0f75e60d362629108f6768756f7b39f1c70ab7f79e6b4e1bd9f08f020303929e2f8eb833089a3773b497247338865ef336de61e7da4a362eb3e5d5601a7203323197b010e3205d910c230463758f39cd6c01258db0a11b9b47f4c278db049402030328314d2f79ba26dc4f34afce51e50e0e05d61b253861e5ab47cc47dab500310e038502bfdf255197b6c7929c445580eddc7013470aa85f531e89cd595628576ef6020303295e1973d07a067f281e3337e756bacf10dcc295f7074564874ea4401eb2a4e503de12047d931a054019fb113a4c5d531be2d56ec3d20f99b1628f4d617b15da1c02030361cb373dd54af082c98abe4331b16f360ae70b82b3f111dfe54eab9bb47a85f0031bf72cc92e51f3f38f18d4d0a77c173ee78ae62dce6027288dd37d7f1024df600203032d8279aaf065d93b0e811dfa25bb7c19325ad2e7f99cad95d0737c5390500982036bdc41d82cbe612f8caa639dda471df1d8efe19aba0f39e884b0569c597f68ea020302030320c7fa871d9cbf1112255d49920d07bf151532323d32ceb6ad4da291fad9327403fccd5f970aaf4f45086402c560eeb209d84b4da278dc69f17e3426ba0b273f890203020303c3d3043a6c5a67ae707239a66070748c2efc09d25efbcca01ed86206919ee23d03407bd9bd8d77985f52cc5d8781fc24a200ae2f8bdbaa77b753f7f245f6814c87020302030203020302030365f66ec8e09bf15d73a83402fdc462cbcc40578fdf5d4ef85bbfbf9b5ea5e002039ca41cc26f222ece8fb37316d9436cb914d7041ed51f1d5d3831b735ae2f0721020302030203020302030203020303ce8a414b8283b20263f621799a194ddf5d753bef21ab8253b41de0ba8adf661003a8e38716cdd8a09095a7036c686009bd8236b1c7eb9507540fb981baa9a8bc4b020302030203037ca01b97c87ac12c8995d3c80f7aab3313747ace5a829f08eb68381a5a9fc54003e5554fbb47341d48f82f64a26d175a7d3559378657e77cf2de2eff917b95be300203020303512c2cf0ab4340a1623bdddc301aa586932f9413ea9bf8c0f1849d2d70d5d0ff0375d0cc499c7f76c70939fd8d633c658747eebf0eb138c15d902c34f0de9098030203020303b78dcbd59a3668396357cbda038d7e5bc77aac4acdb3cd3a75e96eb05079a6bf03ceb3ed2850bca5df0bd69ce2e85e9daff43cdb4c79f58685340f521521a0943f0203020302030201034b33ab5a3b8d3b01c374c1d7fcfc714398b7f0704ba2f1ee757670269fd5a7f7020302020203070354000000000000000000000000000000005ca1ab1e582000000000000000000000000000000000000000000000000000000000000000004383b69e070354000000000000000000000000000000005ca1ab1e582010c18923d58801103b7e76ccd81e81a281713d174575a74b2ef0341f6b9a42fd5820b8b76bb549992d9bfc44e3b36d087a175b2e78b9584fc752eaa3013e0bdd31e8070354000000000000000000000000000000005ca1ab1e58209bd14ac8c1cf553e0ad3a2c109b9871eb74f3c116bf0bf492ef04d2983722555582090629dad0a40430445b7d2b25a8d19c5d7a929608ed7890877a499aaca01ca5002030315edee06840e36ef17d13817ab5475d12f7bd50113984febf31e2cd80c08952c03d360a7d78676862429feb7c95d052c1e63379b8ad3becf085a21baa353ab93d30203037a2fb952c2cf8e85d9706bcbcb5a69b83b13403b58f06b0767f4204acc5917930310de142eb3b2790cf1e3694b72eecc7e8ab3860f543c15cc24274ff69570f009020303879875563fe8a079ef71e84b6840b187c681499095de9d02d8b101c9dfcd111e0395e9fc3b000e49b65678f256d247786f72c91494c960d117b7668045c35502720203034c4d4621925c12b3878ebf267a1a013cc3d5675903cb0e22bc6d1df0bace3f8d03c092e19f1fd097b76813fc2412338735dab2f62302645b0e72d195b68a1e4d4702030350620914ec3787f2d03c118a874edb853c9678a3949ce426fc19489744df65e2033a2bcd06528de10b0bf7c316956d5af798ce85d8618011a8db4df56202c17f27020303c27ba5c9e177fdba8afc9cd524bb5616116bb12aac5aa30d1918e36228883fda03003a4d0233fc2ff4bfd5cb02b70ae195150d4d18b59449829e612204e831187b0203038cec528699f0b6819a574be7bea0d083f5999e462c6a464a37c582392140762a0393afd21f19e4329c0ef6b1b06baf963080c2980a73c5937cd6322ef7dc631dc00203038cf4931c97d6aa8c453db3175ebdf27d40e4e34b2b3ac67e8888dc34556a99b603cd716cb8821688b0df7e56b2c31036c17c53a5f6d50b50cfd4e68d30d2420120020303b81ba13ab693dd6dffd70ba32f7bd51fbd5ecd3f58bd8ee96d7b081dbe45efa803dff9ee8db1218deb4733e71215a2e1629d8c9f5e36bc0b8184d70f2ea6e8e01d0203031aafd4025804cbeabfe796224eda42a75577ec804c615abc88953b7c966766a4034baee3dbeedfb1b839869d087bbadb64bd8d4007cef5bfcd038c7f8436c4b7e5020303f659d8fb79866e5a2f9479b24ca74b34dae4e211e6a758e376a1407294fd840e032e9950f2c2283fc366c78f61d806a412a244bebf4dca45f250dceff31fd3a2a802030203020303375268372cd898f2295ec6c9a9838412658bf8a9ba5c309854a92dd747e4eb3c03bf0048ab25caf15956b958175c59038226d0331be1767f2c00ae19bc9f70f9ff020302030203030290f4c412920a6ea22d4ec8091a90d63fc62609d3e55e44da20097cdd8204430338962fdeb56eeda46eb38c254e32bd4fa863167913801664a58d773fa3a4882f02030203020303b83955a533913a8e816c0a9e001379dcbb9a89e48410b365841c552e93987a4a03e42aa480068387d975b85b52ab67acc0d5de816085765f419fec172afc69df34020302030203030e1f9af6f9a3833c51c53d2ee2b598421c0227dc651646350725e51762077ea3039ad12ef2e43458f28d5267d58f355ca92e3f625a595042518e0ccf8b0d4e96e002030203020302030203020303e8bb2dae757d043417195292bac773cda990500845f91a94d00179fe89525c3e039f7fc724f0bd8bd083a725fa7d2c8169bd8ca33d31c9146805940f0ee480c3dd02030203037d8dcb012bdde19a0dd178c1de91d21cc866a76b9b6315554fec4bc4f5daa7920383f4a4890e8cd73e6e32096e4b11c5c0c50991dff65297720ea9ab7b8ccf3ef302030203020303c998c4c602a03cfa0c92a1542165567f11d23f2ae5fb91d04e02292f8e297548039447848097d9500f21ebe819789f98461e01aff7cfcd442c8afe8e07b87a95690203020303f6441de6ba2bc5cc9ead4300519a94a14a80b85f2fb0fa41e2211d7c02af0e6703703a99e4c2133e89a6e892863f14f143cf5f2ad94bd527081c8be6143f14f3db020302030203031da626649ee092857f195eb0059639309d744972389a4e748c471f16b0fc3c2e03f4072cd036d5bbb777ad24fa0b1a235806ef25737404d7ce4f83babb76bf090802030203020303c7f6a9615846a2d627db4c940098479bce3c61c8fc1170d0b7f8c9ac2bec5ea4033664667108158e9377b49cf3632952090b6eab3ba6eaed4f48ca9d5beb273fd002010203070354000000000000000000000000000000005ca1ab1e5820eff9a5f21a1dc5ce907981aedce8e1f0d94116f871970a4c9488b2a6813ffd41582021bb230cc7b5a15416d28b65e85327b729b384a46e7d1208f17d1d74e498f445020102030203070354000000000000000000000000000000005ca1ab1e5820cfe58c94626d82e54c34444223348c504ae148f1e79868731da9b44fc91ddfd4582040fc722808ecb16a4f1cb2e145abfb2b8eb9d731283dbb46fe013c0e3441dfbc070354000000000000000000000000000000005ca1ab1e58200000000000000000000000000000000000000000000000000000000000000002446745baae070354000000000000000000000000000000005ca1ab1e5820bc32590bee71a54e9565024aca9df30307a01cf8f23561baed0ef54b30f5be68582007b7aa19b2ab0ca977cf557ea4cec4c0b84b00a9430cfe0323877011fa03269c020203e752c67cd3ffa4dacba7194a973b10998b056b5b936d17da2a8eae591a8e175e020303abdf2e1db4950317aadeff604ae51ac75e8281b1ea22e7f17349757c71dca21d03fd88dafa9a26e9c8f617b5de3e1a6021092dbff5c1fdb5d8efaeecb1f333565c020303b8a363b96519cb0eed132d807f6e42ea35f115584c443a74b14a19bbeac463d7038247bf369e033fcc19a5797960f1387f04f80cf396babac560060887288632db0203033497b9767463d12616a5b29b2d66156e49b3cccfe6598e2e73d90190e04a15120384203647fe683ea28ce78395875f0bc39f1fe1ce6c9670b8393161514dab4701020303be9a5bc3511c4f8466f2316f7d83370249d71f17357eda7cd7135b86b136090703f4ab8e9e9441ad40280807e7462de0147c3471983825b8f70b6066331bc7fdac020303fedefede445ae76cbb5f94df4e543537d9404b97450cef183b95f2fade25aa250384deb1e0e5121e700221d4f1ed13d488aaa4275c46416c2003d8d1eddfd78c180203035ed3f2ee894a97e0bd924d79ee091e8430799ba07601928b7912280a2260717b0365ca974c72e3e2db5912a9c87c5d5f84c2825a1f0196fa4793cee99d4d173351020303678452244e14f477cf17190203e3d2741cde4dada731914ad7987d94a95fe953030018accfc6cce1d6b0b884be4fdf54fc21d7f2b0f320711abea4aaf2dfe49d52020303a72a40bab31ce553d4d303b80f98837eb346dc83decd766ed763739ccaeb1d0f0334cd6263be1472af1b73f09f382ab57941ac5043ccd4feb8c4abeb0f2b0a869702030320b1e0149d703853b7ff9219e776215178aeed36a0c047635656c8a804e0f73f031e01e1d5be3de353277a132daf82f484854a9f96ba3555c42f03f63dac8a72db02030203020302030376f04ec9d36ba7e80f947ada6e0259810101c9e7d45de9422ca4db530e69bced032f63219f0d7ee4e45f66963dffc99e8c00abdc81eba9881462b2586539a99a280203020303608497d825d71973307cda5edcd6d1f94aaf6ffebc7430ce2f8a9d7d58116881030188ddfeeb33494d9da289334e6a850d240512bc570857f475ef749bf48e83a502030203020303053f6a4c851f9d91a49431b1d1a85baeaf2538b0f94b8cbb089e9c440263e6de03bd1e02a14a5262aa6cbec4a59e8040bf5765c7b59d08656f0e7be78b166a80690203020303856ac3a92e4d8741331982bc77755fc5831ec683050767f2b9d5aff1fa786cc60386b7ce083e8e52be86e0780b6ed5bb8680acbc96259692245ec1f521c01e26d702030203031bedfc4c9c092921f7e7093b07a76dfcc37b9d1c851d1f62a66f209c22913b4c03956a3522834623943651a19db780c3e98710a341789ae868713e46e1b9c1b98202030203020303b9828062d71ab57eb548db7b526b960e119dbc14ae39cac02d7d00f8a195db8803c44a3b5fecff2afe00f98bf696d082a6a5175c5df8dfe72521b2c5659911b8480203020303b59aba5f6d921a646bbebb06a5e66188f856d56c93703805fe05e98a54ba2cee034576a02a1bd3f5e7b0a9f8d07e5a1fc02840fb40cae2f9f6400700f945b53a3102030203032cd57ed327e45cf463bdcaee1f1a638c75288b1ed961644cb925f5fdc451a63903bd691a351ce40e663ca27d33cbc4a814e08f6f9ca16661964166d7efcc7e711f0203020302030381d90d25cc12dca6684d3ffebcf1e5408d0a365c9242224f678548c5963ea95003b33603045dce8c8ff12133b56e2a2d3ed094cf962edfb62711e929ea2ec4f37f02030203020302030203020303beae4badab0dddd8f2223233bd5c4be6b07ced210a07e1372dd0f271fede38a903c41fa2d194a23c8b2c481a227801390c6671150c1d39cda7c9c0e883a05d629f0203070354000000000000000000000000000000005ca1ab1e5820966c9e067cc4e8a48ff6ae64ca4d8bf749f01266043ce38d3671782ac86c738058209971e52f05a7ac040ea951a9f045f6b0ca4f28805a30e62273f331e4980b3a37070354000000000000000000000000000000005ca1ab1e5820693c07ff6229368d9eeeeacfb0e582d270d5bbb89324a80731633d2bbd5c77535820e893eedb484a91d508d6a0fa224172039b6c2fb9415b8e17729aa55b42ba046c02030389e3fca41e0d8f09375c382623317ca86dbc11e768d3092378ee9200e7d24b29037cf6827003ef81b19f0742ffe1d5ae37c11f93b8d3d4043889bf098c271f2e720203038a710f76a0386b80e437c9bb7e2b0795891cc8dea00d59b49f1e62c95c385b7303aab1cd0e8ec8a91b65dcd0e5b347817bb11adde535c72ada7effe7988d7fd7ec020303eba38e93550f3ac5a10e031758339464c3e9bb984e93c5eed408d709b33e437203374d10d55d31d4afc6583fc4cc50332529ed4926608f647c2137442ca096f1ef02030328c04fd39c18b7b519c893153c0291b3cebd6f815290312e4200f9fc82c92db30322339478fdcca672932963533acfa941a5a526cd3c8b8639135df8f9914afe0b02030327a8ae74de2be2de1caf4666e833e43d52e360ca878143e3897046b3c3690e56030915b77a3bc6ba544017c15f038d9d5aae50e7db83f6d6feec452467c41ff98f0203037511f8af75f75f986a1c8714ba6c45599dcaa9f61e250f7099d27915b46a8ee403a79923041d4666433c3bbc2b46c8c137c489a36d2fa5c84872f3e254780666af020303dc8a20e01f59403e932ea67c29b74d1615fed5768abcf1df2432e56a0bb0ee2c03462f1a21b3bf910d6ca08e90a47a11d25ad48c6e4d6bff7369f2f28dad30ac7802030379e75e6dec2ffd37919ce25da5a5a67adcff93790a7cef7ba0c1534aab2208c0034a95a94a94ae7a317e153903a888ab404b75042483abfc1b53994584fed445ba0203020303674f36f2a847c25e092483b595d6d69338bbf807516d5b424e3ab05fc95719cd031b713460225852cb9a5005429fdfdc44c8d71153e1aa04364570343a434c6388020302030325b7b7ced4578ad2120d4703c775f82d4fcf6ff0578d60d76b1b3d5bf982812e03ee4f0964b55a782cc4128ae4291d947dfd63300081d89319ddc357b6e9a7d365020302030360d8ba62d7544d9d354731fc286a41c33869212a45b7322d2c4e67504738e65103b0ae0178f65708516a57beaa38a2cd4d344fe8f3a217f1fe9c4cb2d41b2975b502030203020303b5419117efdf04c24efdb20fe6e1786d53529a5630a98108d2118c7dca7c136e03aaa09bc73d06dc35034f97fec917652f664c4d768d0c036b2539e661f2d8fc380203020302030203020303d51b1bcd3eab3b6a6c384a831ec8080cab63c1c44d898bd626248194376fe1de037ecc252f46692f6e8959f226a5e94a4ac314d38000dabd3c66c06489789651bc0203020303248599a8b4c29a8dfd17c29080088b4573842ac4a1fc2fb628f1f65350cbc2bb034f7ae2704632668f91a5605aa808807c7c83f999d35def3d40f72a825eb561ec020302030203020302030203020302030392af697495712b4013883b3f5ad2d370bdb93f0ed60416692b0267f10d9a3caa0386fa8ccd91ab622b232223f9a347f1785ca9c4b7323a2e0d19a7971c3afd63ff0203020303b4f12607fb8df583b854d7b235f4a64ccb2f4bc9819dc50f3a03ed0d4906910e038f64a125d14bb92752d65593faae8e41bb5e80e4f147b20f0c247078f6e7ca77070354000000000000000000000000000000005ca1ab1e58202d11035f2912c26c30c4f8957d3910a20622ea8709c8cd3e0ad87fa0f4460bbb5820c0bf0b2ab68768eaabe5fda7814227beaeaf4c4ee7e67f5d07aefaf5f0410ab80203034d5eb602925f13a2147a2c1439d43faa74e2561bb3d27811f02042466fb2804f035d9458bc537a1957fddbf6c5f13c6bfc9349abf1251df9d6dd48b5b574f6f48f020303bbf6401ad2a6b95a3e749f5b31224fc7fcdd083e7aeac9671ec3bebda312fe5c03393a914dd0b171b4cca2f5cef52cb4ed4b564278c0fb678e5e8f3d911b4addb302030356cdb16849ae7aff540b8724f73974149f71cd3f984360537159a273a5bfcc1d03791ad7bed137c9501bcee55dc6c9b030157a30c37fca14d39c25d9d5137ae88b02030392940198d6b1df58f0a6c3cc1da02efd9547043d626487166ec858a5aae7b61903efbf993292364275b60efda91d4c18f5a87549ebd407ba16763b1f0c6113a6cb0203039691d481d60a086435d9f914e8e2b5e5a68abfafb82dcc9d6de2176920c35ded03347f67f0fbbc63fa8a3b826c6491f42b13869a2abd2b6326d75d51cb30ea9cf1020303d822fd2ee43799eb3f714589ce7bea36550c8fe4a90b5a2aa68a95486490962c03718887bb3381bda095c52048955b7ce2e5b35e5bec11515e9fa86187fa9e0fa70203032d3bd6d97e29b994d2d73867b8e19f6b3b2a43d5992a6b9b3f350f8834c0da9a039f1aca3992b3c769a6735830359c9153c2daee33d937c9b903a05ed9ada8f5d0020303bddef4e591830295d3c1f6e27decf301748434c3df51aa8b451558ee7962deea03926ffe488854b96b623872e9a54c6e5bb737fa12f402bd8a2a79f34000ba21520203037b0ddd3f35449b7e435243005ad2a536fa28167cf7da21ecd2d4c3a55e6421a6030fd4916c5757cb6e137fac5d203ba3d5d50a5077a06e3804faa80e9783e2367d0203036fff8395e24c14d9e40936920b141c84e2127ed823a1625699eaebd1f26b69c703069db41eccbdb4aa227cb482a97d6b342d0413855bef3c9b432d74ef0be43e0b" + witness2 = "0102030203020302030203020303ddd15247a8b234236d91271277b1059a674eaed56c29a6d8905b27ea9460c7e40344f7576ca6198b0bb6daa81b4eb6f594b46608e0f4d8d509361f0aac88eed2b50203020302030203020302030203037477c5b7ac361fa5a28f01782fc1b9577dfe27c9d91e5193c426916c166503f3033e6831fb92c6944c4869e9ff429fd40b9191f5a5a9fd8e4e26f67be29feb3d00020302030310c0064663f729ce8c12a4db054317ae8a3d309ee54378eba25ca39a4670758d03fa715595952a40ebcc9c06b02f6b1960a1f74a722c3a9fecba1aa66f32f1850e0203020303b010b79cdf4c9bd8f8164ad282defed968658e80fa57c26c19f5cadcfd9c890e0318f8d37b605fba62e9bd02f5554b8bd4784578021c737c4cb957c4ed5e8ad3b5020302030203020303c4ac3ac799860160a30a3304b765c2c90bc414edc3739a5d098bb7e18009548a0344ed07cf7b7b49fc2e7fc9c6c19d1b60e64990110188e15b445320a35660f91d02030203020303949f805ade2be05694c8011fa17fab3646a43f38f96d868386f0ba9558ba5f960302aabd9fbeceb9711f46d634513830181412c8405aea579f470a19b477d090140203020303db978a462b93b2efa3aa3da09e03370b570db692c6d361d52ae1051bdb26a3a903916d67432c505e1dc33f3617e0743d761aba44785726309191e79cb18b666e7402030203033edca13bcadc1db9305f3b15322cc6d774682fffdfe2b509f81d00b16ce2dcd003dc94780e238944094e7856154e6d3e54fec28293a9a70eaf1cc2a81e874e22170203020302010203070354000000000000000000000000000000005ca1ab1e5820e72de8a1b9696dd30f7886b15c4cc9234d52c6b41b9c33e2baaf8d88fc5b7c9f5820f8fb80310ac041e7a5e79c138d7261cda5d8a988dc9268b5a8dc5318fb610a90070354000000000000000000000000000000005ca1ab1e5820000000000000000000000000000000000000000000000000000000000000000358207d7b0aec16983b640324af57c161ae800ab5b0b61937d153540fd64ba724a431020303c6cbb686c9d7a94f49dfbee076ae1b87f1c9bb5f33b7c98a71816c33b4731a3b037514c2021b2bb805e2a6060b265dd53c069a4587e19cd7d1af99d0e9c3d0e550020303784570292bffbc3ee00153e5806a317459fed4c1d84de0515dcefc177404865003f08c92f6786e67148e8fb2bcd4eb813a665e16a270b475605d1d84b587450ff102030344c5e2a1775873020ab4e5a588e95d6702878cd46012682196dc39738fd8780703a6d5ee17fe3be7e20050e4e66c54b5188febbdd3615f832c35b073078258b214020303e5f90b59ef9f5ceee0e0a54551e41a62431ea06aa09be94779c474ca4d18683403e794dec8b1cbcd53bbecf14b61869699ed3f92ecbb4ac3d9a8bc744c09a3e69a020303476c478891a8f8d905ebf9e5c031ba1020ca1436538bf9b97c6eaa1b9512da97030e077b0c8215c8c43753d044e318788eb8e39de692fe0ccd46396d3b06ca6e0c020303ddaa569a922c9c28d9a225a4c212c076ad5e353bb7cceaab630a3884af855d2403d5ff4c122142c026a0b24b79b4a667d25ea916ef64d8a8215aa29a738c5588a50203034b1d465e96a44ba0d7983a6f4ce10a26bce7816b6d51ba8ac594c71892cc2af60381a6db28188e1b651603d41fbc2030bb2b7706e02b1eb3423d7f38ff6ef514e6020303f30f3c3ad2db979a1c81690619a35a801e3bcd77413f37e285b0011f2b6e2a4003239d1f94c6460af24c7228a2af86326ea1199e97365bf7dc5832ad029107445f0203038518fa303494de83c9ae1f80c877b5c0e6dba41880f6df1dbaaff30fa9b9c37a03653c1b2e876da5bd8b6535ce431ae69feb7be788cc67b2fa3dbff11c792c1f13020303d5efbfce398f4205569b3fc872e7405712796b7189d6846e61c7ff33a12ab0c5037aeb2da8a9e504490ac07aee509079823397fc6e9cd25257e658f6e0021ae771020302030203033bfe86ca5a55d4d2d42f5af48205ca0ab08df68e551e61b9a1bd5d575ff9cac3037462982abd4a0437ab5e12ab2af263ab382e0ceba69ff5de751519512149c70a0203020303980043fe396689718e09b0990d71b800219da2873a8e0c3c45d25ffe12bd9e6003f2f9aba950a1023ef8e02568c683c86ef2e77e16dfad909642ddc5cc57ac8c120203020303738b4a16af664d0e0c6b7ff278d1e3b602e6277085730d77844f1430c2f71bcd032c505136023a2005bd6b8abfc49eb783514ea36233d5439525478dc102ad67e402030203020303f30cfa6f63115cc17d752bd07a3848c463334bdf554ffeb5a57f2ac2535c4650037d85b4ea9025d3512a6fafe55d8e3570fc8c968eb67042e0ded283dcadc12ae8020302030351a20a2e192372b9383e5b8ef255adf58a3633e5aa4161424f7b52912e8053f603edc4f75f70c3608079c9f0b4584da6270879e9983bb3513d7e620024f15e659f02030203037e1734c6c90368548b9b6a882b4560d78e0630f3616dc7d4b4b8d77b96a42dbf03c4ed6f8e6cdc9797199a463a51287700852a10099a1386109a37561b538d228502030203020303d3858acf0781afe0adae49a25d1724b36c9989179cc884b9a9c6481f89e57706031da6fb50879ede58d816d1624292f0c60a8133bcbc671dd92d0f9cb8d50fc8030203020303521bd9da187efcbab07451097baf98589a33e32cd33501c5a912f48cf2552bef0352124f3ffee53f7e0f9a0068d5c0b0abfca5aaa148371c91e2e81df5fba6f8bf0203020303e00ce2232f3e208dcf050f887d02c7b91170c4b98e1d098ec5238bb3d387a41e038a0379dab30ce84865bb4f0834a9c3fd7bb2da17994abf03e89fd8d754bf7aab0203020302030333f5853903cb36caedffa12d0aa0a01c39e91309629a97dddafa8da4f738fb3e038e1dc6012aecd5998053b5878c6e3a398c8a286c7ad4dc0b55f043e4b4210950020302030317e041d91830fe8051fc73631cfd56519fc5bb88b5369298da9807b40d93733703dc7745bf8dfa058bcf1328f79efc9441cf5ad5fb5763c75bdebf9492f88a6c8302030203020303bf9a2780e63ac26ffca6df07f182db72e3e65802b277ea6d40057c383d5a37e3036c1548eb1c956ece0a876fff4463cc3f2e9c3b6eef88379f07e6d71013eb4aad020302030202020103c2eebac9e260dad1f051fa75846558dcc0ed120d1a7699fd1d6b739a3419015b020103b9d64db96b69b035a74a90069691afa2b34a705f3ad3aa82f3757c7a6f2a9f17070354000000000000000000000000000000005ca1ab1e58207af492bfc857210d76ff8398a35a942af892e866b1f4c241746b3ee89ca002595820f889596e5b3d4dbe5bcab5cde2af26d3ad8d88bc086e8b4f929885f33f6eec77020303cd3edcf79c7e0e0d6b87ae523729611faeda92e77bbb59f739e9a6127d890cdf03009a5c01c3cb27d2f1ffbd3fd77ff38f66991f64174f5df1411ea21ae2e22f250203035334694d8b56a2f5e0d0ded81283e7f36b3da6fbf2e2d1b469c008d7414296be03953388b0cacc587c5ca452ba8e96a9071958ef94439690bc14866f556a35ebc1020303227561f72db4ee550290a7b85931038224b1fa9c351395f5f5777f397016d7ae03dde5f312229c20faf5b1b27273112bc022bd0d1dad4195ffeeceb49c05001a07020303d4eebbde54471ef4008ea3e23e4bd31119b1d4fa51a2bce7771c95b70efba064038c6a2b8e1f68d72b2a95ef69cd8eb0ab32781e7687049eaf3b7381596c0bb8af0203036ae82b7b420a58afe9871a632d69be8475f745405df2183722c599f94a5cf15f038a575afe8d81ea9f181bee15a971affeffcb1964ed35ec291304be393899d80f02030203020302030203020303d634ac486eb2f4e325a096c1aac56ae5a0a3bba406dcbede2e9bd4837d1759f203ce1b43774de78b19d67b133fb575ead398fae6a712ebd63e26671f199c8e674302030203020302030203036068b215f89f68246518e7d8c967f8ae78b47c69bcb9e97deca5849a813b2e400384b630ffc67a1dd7b502c1b42165171a704d68ed15ced3b7cbb98bd150cd884b020302030203020303a3c7cf45ebdd7e21dade3434624c9fd521b0ab24a6956e3b8a777d700149806703b3637d0b1cf58c5272f28f8354a764d3cd48ff7c04f807237da8f4a1e2ef5db5020302030203020302030203020303c018dfe606efd5d17f3d45e91d33d3d7ef57d92a1d509291b1556bbb7e78dd0803b08ff5bb304aa8741af608415c541440edcd98bbc0fc849fe2a89c2c783341d502030203031e0eb0ac5664b1d0267d3c51dd66d1828c0d01a0903d599a317e4578926d4e3503330e2ccc546a3db52e796943aa8960d6d483a3b941ae0caa21cc7b9f7a3c2bbc070354a40d5f56745a118d0906a34e69aec8c0db1cb8fa5820000000000000000000000000000000000000000000000000000000000000000158206191319cb3bf48d9701195789dbbf6db5d3b99006317f5e7da37709f3d259374020303ac874a6acbf6de628134cd74ad9f336206e7aadb1ef09456b6267a770612485703ef323528b761720ce04927a54b81025a935f070420d655217e40eb2e084bd170020303a48199a63a429cf44fb39fdbb73098a49dc171e03d32800f483780adb9aa06580388796e8ab2076fc77f00a5096317ceff8a54da310b014a0310504bcd76f8b8da02030350179cb850b147782f26ff9a17895259e569b740cd6424a7a1479602bd8c822b0371b277886f0d14b6f82cfd063ecddab10fb5da5e0666e040992469d09a6bc8b0020303dee2b54587eeb7db2fe9ef0dc262b6ae679a5bfff89c8f403d181a1d79107b1d032aff27f522ef5fd88213c3865e01c7b4c1720d56778d1bd0e48e6a86fb3b07970203037d0d29240ad72800831a91d8e54019da745c6c6a630a625167723ace857bbb81031ede6b255a1c6ffd6fa2afc16d61aea6555a5cb85dc4669070b69b55a16ac58d020303335f1f02ebdb1926380c362d23b2d90d791f5ec8531287a47d3a1929d6304f1b037b80208ab1e9bc0411f128ccc859ac552945a650ebd0f9161a63fc9944e8d43f0203030d600bfcd6581d405aaed26aa7cee976fbb2bb9c1c1390bd3eb14cf5f6610223030bc731957e48cd1b0f92521fa971ca65f76bc8608eaddfa5243baf39838099810203034b176bbe991dbc4ae5738f23e31433597f9b892730ad4fdc86784eb928cbc642035fa76b91882dff00646e99735fb6accf89476e59c9dd28e0bc7b617870cc15e702030309cc884c89c3b546aecc35f4a95760a100cc3fb5457a82fc413ee2cd795345d2037ac7f4c6f9dc0e8f652f47fbda5c4b53428948acc95270be462ef8c909e5b742020303bb4f79f1339f6fc4ba3b4c61ff1940c27b29459942791fd7b160a04bc8aa411803628f665215c8c44bda1a6243487e35e5d9d922bf36f1976fd5f6c39264d16e8b0203036bdcb7f00d848df36ea8ffe2d419775be23396cb7344a2dd1ab44292769c922303710d1c2ccfa13ceec5ffc97b3469592c4f2495141e46bbaaae6f099c47b9737502030203035f97c209aeacbb5fc78e69d247a800528f4bcc5e649fdec626ae5ef510ee7a71036eeb37a43ca943f0bb09cb54bfcc7325ed27e97af16f59cab0822f88d3143888020302030373d9994e2d75a6f80adb914f45d533caf2ade8d952a0d8b73a40299199892f5f0347947690bda8388fbc8744af22af00157531bd0f37353b2407b573cff34e23c20203020303cd4c5dc3e51e3a3379cf73004c787ee7cb312c06c70800d0f08e65a0ee2313c40350adbcaba1f1a5b06ae4510704194cefdb5053ffacdca11f354a80cc04d0a2f402030203037ec1e64855ec72f6c39f1832616a45075eda4889495c393ffb673aa05f25e67d0361c764032c6e6f093f7e4e2db6e3324b29e59ee4df2f6df3536539ea135264cc02030203020302030325abb132a4c897744752a4707644448c653f00743c37cd560218074dfe1e4d2803fe62ee54fd13cf254cb8c3b2bf728d8c26703054588e529bb8b2a68a950ea4e0020302030203020303846e32cbe73ce37fdc6fb93afeed4425035df35d637127b54b8fc4c053d405ff0398c008e116cd33ceac2a28f29c392e533b755c24316cf6e847e4ef72b070dcc602030203037cc278f9b41fd17fb5eb3c839a725fcd1ef6000189fcebcb4214303f45dcd2d60386c3bc64da300f1a87efa2eb2724553e41348057fc99d5c23b5b20e216fde46d020302030376bf2ddfddca9910df80bb0785add76937d1e90e029c02b04c0cf421622a232803ba2219bc37e93a89b0effdfc3601f58645c1cb7e818f2254c8fd16fea4ba84440203020303fdf1c4799edef5fe2960f9148627fff521e591247a224eb8d05eae3f51675b560372adafa8e298a14d0da0a71e645a12a23def78db8e81f7a68ef92aac7d5700b40203020303f5a794d38718b283b47993f3bbcd67c76f84c47fcf2d35373fcb7f8a0f43a06b03a14b12d1f03790ac75797e463a8a7edcfb2bc80b65a7dc8d1b15d00cefb315d5020302010312f8462573dc83d436d2498e68019babdcc911f482e1e00c1b3fda70e1a166e40203020303adcfa2154e38e2cdbafd5d56bdaa5dca90a5bfb9c36bfbe140bb31ec0e66716503b5aaf1a6fa2e80ad8f4e49c51808d2898fd74f539ec5de974b57c27466f5e7490203070354000000000000000000000000000000005ca1ab1e58205956a0b12f607189a063054545ab26ce76ea5eb4c9bc1e8d8161646c93ac66515820da6aba51eaf87e14a7585e52e23cc0b789c61b3e808d2aef704ae932bb2ab49d070354ee5a4826068c5326a7f06fd6c7cbf816f096846c5820701c251f0448beefca8b47dce2e42f136d224b8e89e4900d24681e46a70e7448510237426c03237429400000000067445fb80203035b2ba27d2c4b5ccd82a303efb2a86cf208d08dd952ed0494acc5aff009a9809303e498dca26358e5fd56e5464288da82073a17cbbd112e322488c12bff1661b49b02030308e7f519768ebddac099b4d79b3da7e528e3e1e7f43fb5e815cc2e7e9bdb82ca03afa06c5681e457eed414f2a781c5cf2a752257a696aa0d741799d3e5c6ac65b6020303645cd7c283714f070784b2e6a24c049ba20cc01422792ec7507818b757b4d02103c6b1fb9b858f69c598cf4359d1a43ec9faa45a7f308dfcf087b529ecf6cba0d702030364631f25391237453ea4bdf6dcd59ec33334c8bf13b3f4ebc24512193a52368203d4a0ec404056d0dd6b14481bda731e46954f9d29e4d43aba64bb8ca52ca87bd902030329cd1de4c7edfcc761f57c5f2466970add88bd8705390cb23184091c99cbdde603eca27d7686e41e3d24558d66cbc83d2a5d35469522d922ab216914a84d977e36020303aa3f3aaee4ea8cc05d8b5a9f3c4528c8de0d5b4bd9bedd4456b8816a9d7195da036dee15633cb92bdefc8f632e08b85dcb8bf1d317f82dfcbb7b76e38f7421361502030203020303f1a4bc7768286c3e023725e4f781a6b00fb11d83f1dda2647000f13ca3c58544035e062fcd2f3f81c8d4d424e25bf7e77e301465425a25afa5d0bdbeee2c6284b202030203038482b5d9958175078c790a8a1effda8f156db2faa9ff2d6473d742b4f737143903bb264f8b66371fe289f7741ae353b137695ca94cbc9ed3ececd3ef601d54181d02030203020303b9a21b649304cec7a5d6d0464d6bd8ddffb475c672c0c9799b3457e4b9fc2a12038da99cc78f04ba4eaf3df326eeb15cb038c013a9e5b76698f493170bd356b13a020302030203039c69fd3c2b5b5200c89358d29432ddc4cdadbf9d1b05f2265bf4af27d968898503389f85ccddd9ba507ac3bae9f0a830a56eaf35ebde5aeb6c374dadfd0ab39aa9020302030318b62235f3bd9e0e268b30ff1a987af5548f00006ebcf51db0448e220c17e862034465f83c3781a2e121eca23c852e6b742e52e0fd76e2eaf886471d3f5c4a3e8502030203038b2faefda31a8d8e3e5590221ea164997bdaaba30fed699932fa0b65c6ab2fda0396915914ec53b6ea1fea28b0ede76ab410d1dafbf996f2fa7cd37f1b4ddeb59e020302030203020303455b9202298fcd235ea441cc50f29ce15a2a1a9504564159a849211c899dc08003f3df85b9d03df952c76c1f9853ce686f21949c732fc9b161b5759faa36b2cd55020302030203020303508d990b34daf5a3f925c435daac3d293f6d861094cc2d343a92c62428fa66da032f8b40a9211667e9c44328d6440091ecb3a46bc15832f7d7cdfa8ec130b527fc0203020303f993f7eae6e45a6f8557c6c5d0e912cb41b71d2bf37f38affc0b2d8e054193220315eeb3ab754628ce727cd0b7028ff8ed3291de7566b99066e127185d043f595702030203032f2c132f32f21e267ab64271e8f2c0c39fedbcc509c4589616cffec21d7332eb03839857347599c19c43a0acfe53e1bb5bbe0d68ddb49cee05f1b24c5acac24a150203020303dcd0869ad1107856680f6bf164623fc709d26d1a0842bb9c60a383f255c0ec2403c92cb1692742c4e2b6a91d13c3b371a9dccd29f898d8f6457ad052b1da9efcf6020302030203031408a1feb1c4cefd2e71c1d7ce58e6b4c2d139d48c67037d40dc0d60390af539039c51675ab13cc260ab6875b12824ed60903c1755add14024e27508ac0a3b9d81020102030376fdbe16ba7e2f048d9c311cb1f99291b4f624717ddd7e9f2aa653099d19314f032ebe85ea3fef7c7033338d1ed98e187eddf75dff4772a23e19392ce61690f77f020303f901f2ba5a7a95db9ea7106268f17f341206944377d1f006921211069cf8a0a103f43daf24401f9ed2d0691570a8ccdcd016c90b722786ff590276f7cd5933ff3d0203033cb8f613c530196a2ab151996cc3eb343199c5c0c0adc212268f74f6a092666c0355b4984426bd4db31ca5d70798a18280a4d319786bd897a29365d2db7489b32d020303d7f9465d351f2c4200659307b3cd7cf34d3fea84b9b23bffe5bec395f4a2d88a03ef3e9f16053b7f799f207451eb3403eb95301e9c9e721dfde0c41ebd8362485c0203036152db0543c6381b557c70b284c75fe77b405b54d279a37db0ea5a382a61abd603b70663772bf3728213f272a0d02b2ded9cd31441fbb82b9a44d266c56e7fdf58020303f967722c10537809246d339e984382cc197deea70a2c433df88fd7797701dc76036e06014c6d6c4d1358aefacae43b83631ffbbb39c93874faa4d589c1f60ca07302030341a2496071d2a84dec9f60bfd3288fdcf01683618900806b1a61a740fcb95d4b0338cf0dcf2e49a0359d0d543a3ac97474876f7605800e270d1c8671dc375720250203034a347b6bdf9e875c714c0790a2ad84b01edf7b15c4d23cacab0598c704417ea7039676ef3f389061effccb4e08a0afc2971c35bf69edbda2e91d9e88486113990e02030203020303927b20cc65cbc0d70e9880b16dfc67b8379ff4a96b95309302803a1819d95ea003eb0ebe2fcfd0a9002bd0985e47dac1c4a01561de0da69bea0bc25ff1b519d5b602030203020303a4c7f2025180b6de7674fc2c91392a565d9a28a77eb193f29d9ba706c6fdb42f03d2bca54ba531de1142b06bb35aed010d55ab6e0d862cdd7807e4136c1b9d0c490203020303ec1282aa791a0b578de360336d5cf95a7f3bf1ffda9cf697b3aacf9417aa38ad03cece3331be90852d59eb04e3bc87b03657c0993626d3e36ebeef97baedd928f00203020303afb305376ba08f5bfaced38f127295f994096684417f9de1a8f496fdccbb3547036bf14c6051f3bdb18d621bed206c3ceb8daf8ec24843921de9af2dc2ba70d5ba0203020303122291009057e848a0e15edd72e47061463ab3aee368289eddc782303e9299cd03678ace78eb9da91eb3fa9105c9969a0aa9abd66ac41ab138aa70346daadd327002030203020302030203020302030203037a46bc17ebfbc47f6d99661de00074c9958e0f7fd66df7c77c236b89b165472e034b58bfe7c7506d2891367c270ca350269dfc0a08b7466ec2496c6330dd602bb302030203039b58b0df7fae59a4cef25184d849214bc145cda115b2c0dfd85fd470ecdea70f0330923d4d299efbc138d4442519d30cd33a7827557231388b81a6ea9c65eabe6f0203020303af3fee608c2e8e5a30ffc6345d86ec1b2d55f10e518b4da5e8eb59abde07b59803c2016682405d3a953eba254601d5fc0b8966a33efaa51918a4a41b8e0acbeb4602030203034b1387aa6d0ab944e2ec65ce38c8643a0ddfca5c3059718f398dee501291569603528cbab25216c4397a402fcb572f0b512a773dfeafa59e401989a4da13406bfe02030203070354000000000000000000000000000000005ca1ab1e582054b6c4d9862a1658dedebe99a0f61d94c5d1515fd031d0dfe9ebce6a1454f5c658203f14693500ccd0260659fd9eaf69570edc0504867134ac88f871d91d388b63690203070354914e7547b9051ea6226c30495190a2efa15930c95820ffffffffffffffffffffffffffffffffffffffffffffffffffffffff74873927548382be7cc5c2cd8b14f44108444ced6745c5fecb02030311ab6695ec969171698c1f56d4c05373d8505a2c7299fb05cda1d4351e22bfa403478b94ae515fbd01728835b532c7c45ccc78d200d3d004da6917337e139eb729020303ffd14369e7c7f7aec3a890a20234885f2c9fb9802ec318d8434ebcd58a696153030ddae742090ea458c3f232dc894bd8cd3378b4b4590a0523e09a44e0439fe0db020303b1688d8c7806365d931579ccac8dbf7a8d7705ac393159dfd9c0395ab7b5ca5b036a6c978a565b15267de4330de7b6166014082043c5cc80370953767ac501ccf2020303fd878c58bb70337606fc9f519700dcabaee2f175ffd956a6d246c56e38de3c5a034ece3162b251497a52be7f417b99722c20de63b35a0387e0cb1d8a1ef6bd34190203032db40fdeb2c5256d5a237b6134f844646b325bfc12c687916327e21a65b1ae6a03381ca5f3231b0698c6d69bd685fd1930924395002ee0c9f1f3dc9324570c4f52020303eaf50a55e8bc433b8c594aeb6ce2dff8d6dc8a6fa7d72076a07d6b771d13d78b0311a2827108d1c853cd8a63db81104ad8493e188969ca0339d0a01ed043b47cdd020303f7993cfe3bc67991b923b2f3470e42e23e78ad30096bf8278f293053de07b46703a0c8d263334a785d55f5b7be433841bca1d7ef1b8743e6dacb4e4fdffc52a77a020303b616a1ceb3607803c41329eee93ec3541b2ebbe690a4f29e3234441d7fe22710033646798b76e3f8d1cdcef03b5802388ad826a45b0ba508443fa26d5cd6fca96602030320f9766d80286663ec273eaab27d516a59305f6fdb96957af2602f4c0eef4b8a031c930c476ddc908dc2d5ec253fdd2c6687f32616ae7698ee6f0f6baee9871f780203032d1c40f0360f2f347afb931f1caff15b122c02dd058d53cd31e10da5eb3a5005038da2ec93073400637eda663a2b3095ba8bcf473b6bc0ddba6732c0d88ea26f0402030203020302030349147352cccb7f2119bbfbbbb0a4306ee33992973d1777a3f176a7420854218003a44f6acf78a34c96774821d091ce968f756f12c95ad543c97e52f1c041e5c19002030203020302030203020303b990df7130026def95a6bd8c75f955e81827b81134286380b827ccc8d59020bb03acc511b7a7e46ccf36ec7d94e25df35ebd2b1bdb6754891973d081dbb84b74c3020302030203037bcfbafd729a64f6a285d2cb27f169b7f38544bcd685a9a551029d47527b7dc70315cfee27b492bf2f3b5144c78e9ffaadbcb1ab045fa6e58965731a597f48e9eb02030203020303d9b9c9b6e08cb6200a2ead0a7b44aa81f526cec46dd91a23e67370c74e198a1703502ff4229c44844597f1659b073f9ea36ead050cc08aa533e40f9a3a38d1407f02030203039708356582bcd2add889a2bafd2ad4c93eb63fa742f601c045b9e98e1149112903727c6112bad2314490ec2e9b95bdf87c45c93b0ee92fb4a0707bc806c0723c280203020302030309c10778ca4fd1e78c03c22cf95624e6d9b1845103efbf1dd6e56c4d47beab4a0357fd6003666e25f92b0e831fd0f7a0574664f4355a1cf4073937a1664bccea64020302030382b763d46efd8db57bde8120303a4dfd77ee10456b1b2a46916e75b6f0a29b140350037449c92721dfd8d234901d464a1cd6403666af10a822691082f192df864502030203020302030203070354ba42ee5864884c77a683e1dda390c6f6ae144167582089780709b74f02d53045c2451635aa24be6675c290344ab3fd48e15b49e3ea685713fc1f351312af1faacc6d6ecf4013834144b9d9b99c83070354000000000000000000000000000000005ca1ab1e5820565d8b0ba637731af59e8597dc7f3c3f039a169ba1e83c6daef2d43a8117db505820f4cbb10315f5e55053e70d60059ccf403d4e9e90cf36a791de2607fa7a89f1ed0203037d9fcfae52b9e7cb960a57b588a5e913fc59bcd8e1e3a72545680ae787cd9a080340f594bc38ee796cbfa7f62e7375b6e27cf3b90da8baef7cb8225b98a6dc06f9020303fe38e4536c064741e62c787ceaff5b8252f552b1e7c0dc2bd09b0ad991b62b0803d4a9342427b3fb884bbbbfdbef9db4edcfc599bcf4918022291891ff47a4473c020303133b5e6a3d0e759a8df9838b021551845f1e112062741d51919df0ba3110621c030b0cb1f937b231765bdfb0ada592cb4ec4e37b7ce68ad21315051f0bca23e93602030360b50daa81d1eebf433a86cdc63d639bb88efd0837df61c20ef4a07b86d4df9103c38b3536ac0104b6bbdc2fb47f57e4eef152aa4c727f7040d362604644adeb43020303685524b040ca7b4e87a1e2e05e3c0c0e289d68a623eb6b014a9b08d3525c072a03e2f0a7769adbb870f5ba21929643c23ce8a0e8149c6003bf635a14218eff4307020303c3d7afa90b5337e37f369666a5fe1e26675836c3adbf5b685277769112a2445e038d8f4c4e40c45232c7da072bf7ec8aa1feee967030d0ac4beb626cb50f2dbc8b0203038590c5066fd108dc4907febbdfb860f25f821349acf99d458bf4df063c0941d303d1c8f444e3b9f496780241daf46ee0ca3dfed98ccd58102a13bee062db56089802030381362d398fbe328b72f0cee739e72b7a5ace40a66aeaf298ef98f620c2b3b3da039ba3165764fca29bf2a7c01813fe58996e8e705882dfd43f6c5e17c54b307a4702030326b74aee4a5123aae28720835cc1a727db62addf31878a96a703fc43875a400203f2e248472501c5a6bb2e13c3ae7a208a35d01dbc41e44affa9e4c0300e2c2912020303ac5ba43d3be112366057444e9a2db12b96222bb7bb88139738320cc53924cacd03462978b4844a138be32fc8d45c274d7d1e77539a2839950eca4dbb779ade3db00203020303cec89e1b5d9d20c59a319c536ef1ae8c0a67e0ded8f6ce3a7eb1979ef183d3870348dbae09afb5d5232bc158cd3f9c2728348ae93f0742e0d71971d3b26c301c0c020302030376e9a3f309a69b0c2c7ca3184457ba9f0ce19145bc96d4bd846742585cf4e9a903b07dbe4dab435161a33e90e991fdd8ac5c0670d77cf2b72ae5bc530519e6fbaf020302030203020303c423e16fb3487a4d9126ad5c533cf130444a4f099a85777493cbd2d231f27b71033c4bbd0160fa760c6c008ce70d7342f2cd5524690247577a0ca36e15528565cd02030203031e5c59c8eb7467fe1b1b59f78351143028717a9679b5956d1a42ab64efbbdff403bc2db4433eb1e4eb520035e06ee46cdd233cd6f74e4ce438a0743af21cf67ba10203020303c1da641e5501813afe9c4653f2179710154bfe94ebce827d0bf64d70bd3baf7a03e2bf953702f6287b134eee978e1b18a36f65b41c2c673d75876215604661dd50020302030203020303a35901b035cd24570a277362d9ece906ef4d6e00821b55212d69b6fd6775472d037568928f5eecc9599b391e6cb75468d91ac18de51d7e984eb678105c39fc8a4a0203020303791a9ee8b5057a6ca65118869d354dba135fd5c518d63144d3860987f084bbcb033c8c390d481f51cf6b43c22677a971beae0e62e8b2ecfdaaed05b48ac0f60294020302030203020302030203070054000000000000000000000000000000005ca1ab1e45e8d4a5100002010341305ecddd1b56329ac9f09a1235eec6ce6be69492a9788db13e9187dc21e9dc020303fb1c6d1aa6d3f3bef7a0bf4130218b3b168f9447e69ebcd3b68c2b2f41d9b2ef03652ba6f9b69aee3d28404079416c2f8fba4078d66b558c7a8d9615cfe7d3bd30020303d9f042d0d2f152e24d8cde02d3a7d7a1fa234efc5dc259572d412a2e607215ba03c5b76ff595e1d74a22eb44a5aed94f3225b6126c2c28ef04bb75e1d3804925ad02030314a2b125da4db5ba673cd5c0aaae8c5bf0857fd45728b868cff3f40eaf9f82790393e93c4f4b58f6f9d397d136319a29aa6b691b652651513bfc2297107379ce62020303f00359907dd68b2ae8e2d252d3313f3ba2bba16d21995333b2162b24c9bbeac4036435af585f0f75e60d362629108f6768756f7b39f1c70ab7f79e6b4e1bd9f08f020303929e2f8eb833089a3773b497247338865ef336de61e7da4a362eb3e5d5601a7203323197b010e3205d910c230463758f39cd6c01258db0a11b9b47f4c278db049402030328314d2f79ba26dc4f34afce51e50e0e05d61b253861e5ab47cc47dab500310e038502bfdf255197b6c7929c445580eddc7013470aa85f531e89cd595628576ef6020303295e1973d07a067f281e3337e756bacf10dcc295f7074564874ea4401eb2a4e503de12047d931a054019fb113a4c5d531be2d56ec3d20f99b1628f4d617b15da1c02030361cb373dd54af082c98abe4331b16f360ae70b82b3f111dfe54eab9bb47a85f0031bf72cc92e51f3f38f18d4d0a77c173ee78ae62dce6027288dd37d7f1024df600203032d8279aaf065d93b0e811dfa25bb7c19325ad2e7f99cad95d0737c5390500982036bdc41d82cbe612f8caa639dda471df1d8efe19aba0f39e884b0569c597f68ea020302030320c7fa871d9cbf1112255d49920d07bf151532323d32ceb6ad4da291fad9327403fccd5f970aaf4f45086402c560eeb209d84b4da278dc69f17e3426ba0b273f890203020303c3d3043a6c5a67ae707239a66070748c2efc09d25efbcca01ed86206919ee23d03407bd9bd8d77985f52cc5d8781fc24a200ae2f8bdbaa77b753f7f245f6814c87020302030203020302030365f66ec8e09bf15d73a83402fdc462cbcc40578fdf5d4ef85bbfbf9b5ea5e002039ca41cc26f222ece8fb37316d9436cb914d7041ed51f1d5d3831b735ae2f0721020302030203020302030203020303ce8a414b8283b20263f621799a194ddf5d753bef21ab8253b41de0ba8adf661003a8e38716cdd8a09095a7036c686009bd8236b1c7eb9507540fb981baa9a8bc4b020302030203037ca01b97c87ac12c8995d3c80f7aab3313747ace5a829f08eb68381a5a9fc54003e5554fbb47341d48f82f64a26d175a7d3559378657e77cf2de2eff917b95be300203020303512c2cf0ab4340a1623bdddc301aa586932f9413ea9bf8c0f1849d2d70d5d0ff0375d0cc499c7f76c70939fd8d633c658747eebf0eb138c15d902c34f0de9098030203020303b78dcbd59a3668396357cbda038d7e5bc77aac4acdb3cd3a75e96eb05079a6bf03ceb3ed2850bca5df0bd69ce2e85e9daff43cdb4c79f58685340f521521a0943f0203020302030201034b33ab5a3b8d3b01c374c1d7fcfc714398b7f0704ba2f1ee757670269fd5a7f7020302020203070354000000000000000000000000000000005ca1ab1e582000000000000000000000000000000000000000000000000000000000000000004383b69e070354000000000000000000000000000000005ca1ab1e582010c18923d58801103b7e76ccd81e81a281713d174575a74b2ef0341f6b9a42fd5820b8b76bb549992d9bfc44e3b36d087a175b2e78b9584fc752eaa3013e0bdd31e8070354000000000000000000000000000000005ca1ab1e58209bd14ac8c1cf553e0ad3a2c109b9871eb74f3c116bf0bf492ef04d2983722555582090629dad0a40430445b7d2b25a8d19c5d7a929608ed7890877a499aaca01ca5002030315edee06840e36ef17d13817ab5475d12f7bd50113984febf31e2cd80c08952c03d360a7d78676862429feb7c95d052c1e63379b8ad3becf085a21baa353ab93d30203037a2fb952c2cf8e85d9706bcbcb5a69b83b13403b58f06b0767f4204acc5917930310de142eb3b2790cf1e3694b72eecc7e8ab3860f543c15cc24274ff69570f009020303879875563fe8a079ef71e84b6840b187c681499095de9d02d8b101c9dfcd111e0395e9fc3b000e49b65678f256d247786f72c91494c960d117b7668045c35502720203034c4d4621925c12b3878ebf267a1a013cc3d5675903cb0e22bc6d1df0bace3f8d03c092e19f1fd097b76813fc2412338735dab2f62302645b0e72d195b68a1e4d4702030350620914ec3787f2d03c118a874edb853c9678a3949ce426fc19489744df65e2033a2bcd06528de10b0bf7c316956d5af798ce85d8618011a8db4df56202c17f27020303c27ba5c9e177fdba8afc9cd524bb5616116bb12aac5aa30d1918e36228883fda03003a4d0233fc2ff4bfd5cb02b70ae195150d4d18b59449829e612204e831187b0203038cec528699f0b6819a574be7bea0d083f5999e462c6a464a37c582392140762a0393afd21f19e4329c0ef6b1b06baf963080c2980a73c5937cd6322ef7dc631dc00203038cf4931c97d6aa8c453db3175ebdf27d40e4e34b2b3ac67e8888dc34556a99b603cd716cb8821688b0df7e56b2c31036c17c53a5f6d50b50cfd4e68d30d2420120020303b81ba13ab693dd6dffd70ba32f7bd51fbd5ecd3f58bd8ee96d7b081dbe45efa803dff9ee8db1218deb4733e71215a2e1629d8c9f5e36bc0b8184d70f2ea6e8e01d0203031aafd4025804cbeabfe796224eda42a75577ec804c615abc88953b7c966766a4034baee3dbeedfb1b839869d087bbadb64bd8d4007cef5bfcd038c7f8436c4b7e5020303f659d8fb79866e5a2f9479b24ca74b34dae4e211e6a758e376a1407294fd840e032e9950f2c2283fc366c78f61d806a412a244bebf4dca45f250dceff31fd3a2a802030203020303375268372cd898f2295ec6c9a9838412658bf8a9ba5c309854a92dd747e4eb3c03bf0048ab25caf15956b958175c59038226d0331be1767f2c00ae19bc9f70f9ff020302030203030290f4c412920a6ea22d4ec8091a90d63fc62609d3e55e44da20097cdd8204430338962fdeb56eeda46eb38c254e32bd4fa863167913801664a58d773fa3a4882f02030203020303b83955a533913a8e816c0a9e001379dcbb9a89e48410b365841c552e93987a4a03e42aa480068387d975b85b52ab67acc0d5de816085765f419fec172afc69df34020302030203030e1f9af6f9a3833c51c53d2ee2b598421c0227dc651646350725e51762077ea3039ad12ef2e43458f28d5267d58f355ca92e3f625a595042518e0ccf8b0d4e96e002030203020302030203020303e8bb2dae757d043417195292bac773cda990500845f91a94d00179fe89525c3e039f7fc724f0bd8bd083a725fa7d2c8169bd8ca33d31c9146805940f0ee480c3dd02030203037d8dcb012bdde19a0dd178c1de91d21cc866a76b9b6315554fec4bc4f5daa7920383f4a4890e8cd73e6e32096e4b11c5c0c50991dff65297720ea9ab7b8ccf3ef302030203020303c998c4c602a03cfa0c92a1542165567f11d23f2ae5fb91d04e02292f8e297548039447848097d9500f21ebe819789f98461e01aff7cfcd442c8afe8e07b87a95690203020303f6441de6ba2bc5cc9ead4300519a94a14a80b85f2fb0fa41e2211d7c02af0e6703703a99e4c2133e89a6e892863f14f143cf5f2ad94bd527081c8be6143f14f3db020302030203031da626649ee092857f195eb0059639309d744972389a4e748c471f16b0fc3c2e03f4072cd036d5bbb777ad24fa0b1a235806ef25737404d7ce4f83babb76bf090802030203020303c7f6a9615846a2d627db4c940098479bce3c61c8fc1170d0b7f8c9ac2bec5ea4033664667108158e9377b49cf3632952090b6eab3ba6eaed4f48ca9d5beb273fd002010203070354000000000000000000000000000000005ca1ab1e5820eff9a5f21a1dc5ce907981aedce8e1f0d94116f871970a4c9488b2a6813ffd41582021bb230cc7b5a15416d28b65e85327b729b384a46e7d1208f17d1d74e498f445020102030203070354000000000000000000000000000000005ca1ab1e5820cfe58c94626d82e54c34444223348c504ae148f1e79868731da9b44fc91ddfd4582040fc722808ecb16a4f1cb2e145abfb2b8eb9d731283dbb46fe013c0e3441dfbc070354000000000000000000000000000000005ca1ab1e58200000000000000000000000000000000000000000000000000000000000000002446745baae070354000000000000000000000000000000005ca1ab1e5820bc32590bee71a54e9565024aca9df30307a01cf8f23561baed0ef54b30f5be68582007b7aa19b2ab0ca977cf557ea4cec4c0b84b00a9430cfe0323877011fa03269c020203e752c67cd3ffa4dacba7194a973b10998b056b5b936d17da2a8eae591a8e175e020303abdf2e1db4950317aadeff604ae51ac75e8281b1ea22e7f17349757c71dca21d03fd88dafa9a26e9c8f617b5de3e1a6021092dbff5c1fdb5d8efaeecb1f333565c020303b8a363b96519cb0eed132d807f6e42ea35f115584c443a74b14a19bbeac463d7038247bf369e033fcc19a5797960f1387f04f80cf396babac560060887288632db0203033497b9767463d12616a5b29b2d66156e49b3cccfe6598e2e73d90190e04a15120384203647fe683ea28ce78395875f0bc39f1fe1ce6c9670b8393161514dab4701020303be9a5bc3511c4f8466f2316f7d83370249d71f17357eda7cd7135b86b136090703f4ab8e9e9441ad40280807e7462de0147c3471983825b8f70b6066331bc7fdac020303fedefede445ae76cbb5f94df4e543537d9404b97450cef183b95f2fade25aa250384deb1e0e5121e700221d4f1ed13d488aaa4275c46416c2003d8d1eddfd78c180203035ed3f2ee894a97e0bd924d79ee091e8430799ba07601928b7912280a2260717b0365ca974c72e3e2db5912a9c87c5d5f84c2825a1f0196fa4793cee99d4d173351020303678452244e14f477cf17190203e3d2741cde4dada731914ad7987d94a95fe953030018accfc6cce1d6b0b884be4fdf54fc21d7f2b0f320711abea4aaf2dfe49d52020303a72a40bab31ce553d4d303b80f98837eb346dc83decd766ed763739ccaeb1d0f0334cd6263be1472af1b73f09f382ab57941ac5043ccd4feb8c4abeb0f2b0a869702030320b1e0149d703853b7ff9219e776215178aeed36a0c047635656c8a804e0f73f031e01e1d5be3de353277a132daf82f484854a9f96ba3555c42f03f63dac8a72db020302030341ad95a71f5d9ac2a4472b72437b529d9683cd3110874426bf5a3cf9fcb979a703a054c300828ecfa19bda2ca0f4d770134b6812dadef254990956f75f418010920203020303674f36f2a847c25e092483b595d6d69338bbf807516d5b424e3ab05fc95719cd031b713460225852cb9a5005429fdfdc44c8d71153e1aa04364570343a434c6388020302030325b7b7ced4578ad2120d4703c775f82d4fcf6ff0578d60d76b1b3d5bf982812e03ee4f0964b55a782cc4128ae4291d947dfd63300081d89319ddc357b6e9a7d365020302030360d8ba62d7544d9d354731fc286a41c33869212a45b7322d2c4e67504738e65103b0ae0178f65708516a57beaa38a2cd4d344fe8f3a217f1fe9c4cb2d41b2975b502030203020303b5419117efdf04c24efdb20fe6e1786d53529a5630a98108d2118c7dca7c136e03aaa09bc73d06dc35034f97fec917652f664c4d768d0c036b2539e661f2d8fc380203020302030203020303d51b1bcd3eab3b6a6c384a831ec8080cab63c1c44d898bd626248194376fe1de037ecc252f46692f6e8959f226a5e94a4ac314d38000dabd3c66c06489789651bc0203020303248599a8b4c29a8dfd17c29080088b4573842ac4a1fc2fb628f1f65350cbc2bb034f7ae2704632668f91a5605aa808807c7c83f999d35def3d40f72a825eb561ec020302030203020302030203020302030392af697495712b4013883b3f5ad2d370bdb93f0ed60416692b0267f10d9a3caa0386fa8ccd91ab622b232223f9a347f1785ca9c4b7323a2e0d19a7971c3afd63ff0203020303b4f12607fb8df583b854d7b235f4a64ccb2f4bc9819dc50f3a03ed0d4906910e038f64a125d14bb92752d65593faae8e41bb5e80e4f147b20f0c247078f6e7ca77070354000000000000000000000000000000005ca1ab1e58202d11035f2912c26c30c4f8957d3910a20622ea8709c8cd3e0ad87fa0f4460bbb5820c0bf0b2ab68768eaabe5fda7814227beaeaf4c4ee7e67f5d07aefaf5f0410ab80203034d5eb602925f13a2147a2c1439d43faa74e2561bb3d27811f02042466fb2804f035d9458bc537a1957fddbf6c5f13c6bfc9349abf1251df9d6dd48b5b574f6f48f020303bbf6401ad2a6b95a3e749f5b31224fc7fcdd083e7aeac9671ec3bebda312fe5c03393a914dd0b171b4cca2f5cef52cb4ed4b564278c0fb678e5e8f3d911b4addb302030356cdb16849ae7aff540b8724f73974149f71cd3f984360537159a273a5bfcc1d03791ad7bed137c9501bcee55dc6c9b030157a30c37fca14d39c25d9d5137ae88b02030392940198d6b1df58f0a6c3cc1da02efd9547043d626487166ec858a5aae7b61903efbf993292364275b60efda91d4c18f5a87549ebd407ba16763b1f0c6113a6cb0203039691d481d60a086435d9f914e8e2b5e5a68abfafb82dcc9d6de2176920c35ded03347f67f0fbbc63fa8a3b826c6491f42b13869a2abd2b6326d75d51cb30ea9cf1020303d822fd2ee43799eb3f714589ce7bea36550c8fe4a90b5a2aa68a95486490962c03718887bb3381bda095c52048955b7ce2e5b35e5bec11515e9fa86187fa9e0fa70203032d3bd6d97e29b994d2d73867b8e19f6b3b2a43d5992a6b9b3f350f8834c0da9a039f1aca3992b3c769a6735830359c9153c2daee33d937c9b903a05ed9ada8f5d0020303bddef4e591830295d3c1f6e27decf301748434c3df51aa8b451558ee7962deea03926ffe488854b96b623872e9a54c6e5bb737fa12f402bd8a2a79f34000ba21520203037b0ddd3f35449b7e435243005ad2a536fa28167cf7da21ecd2d4c3a55e6421a6030fd4916c5757cb6e137fac5d203ba3d5d50a5077a06e3804faa80e9783e2367d0203036fff8395e24c14d9e40936920b141c84e2127ed823a1625699eaebd1f26b69c703069db41eccbdb4aa227cb482a97d6b342d0413855bef3c9b432d74ef0be43e0b" +) diff --git a/smt/pkg/smt/witness_utils.go b/smt/pkg/smt/witness_utils.go new file mode 100644 index 00000000000..5aadf4d6cdf --- /dev/null +++ b/smt/pkg/smt/witness_utils.go @@ -0,0 +1,11 @@ +package smt + +import "fmt" + +func intArrayToString(a []int) string { + s := "" + for _, v := range a { + s += fmt.Sprintf("%d", v) + } + return s +} diff --git a/test/Makefile b/test/Makefile index 888957f94fc..276f8ef648c 100644 --- a/test/Makefile +++ b/test/Makefile @@ -43,7 +43,7 @@ RUN_DOCKER_POOL_MANAGER := $(DOCKER_COMPOSE) up -d $(DOCKER_POOL_MANAGER) RUN_DOCKER_SIGNER := $(DOCKER_COMPOSE) up -d $(DOCKER_SIGNER) RUN_DOCKER_DS := $(DOCKER_COMPOSE) up -d $(DOCKER_DS) -STOP := $(DOCKER_COMPOSE) down --remove-orphans; rm -rf sqlite +STOP := $(DOCKER_COMPOSE) down --remove-orphans; sleep 3; rm -rf data .PHONY: run run: ## Runs a full node diff --git a/test/config/cdk.config.toml b/test/config/cdk.config.toml index 2f26c9b80d8..6491b14a0a8 100644 --- a/test/config/cdk.config.toml +++ b/test/config/cdk.config.toml @@ -56,7 +56,7 @@ GetBatchWaitInterval = "10s" ForcedGas = 0 GasPriceMarginFactor = 1 MaxGasPriceLimit = 0 - StoragePath = "tmp/cdk/ethtxmanager.sqlite" + StoragePath = "/tmp/cdk/ethtxmanager.sqlite" ReadPendingL1Txs = false SafeStatusL1NumberOfBlocks = 0 FinalizedStatusL1NumberOfBlocks = 0 @@ -116,7 +116,7 @@ SyncModeOnlyEnabled = false ForcedGas = 0 GasPriceMarginFactor = 1 MaxGasPriceLimit = 0 - StoragePath = "" + StoragePath = "/tmp/cdk/ethtxmanager.sqlite" ReadPendingL1Txs = false SafeStatusL1NumberOfBlocks = 0 FinalizedStatusL1NumberOfBlocks = 0 @@ -172,7 +172,6 @@ GlobalExitRootAddr="0xB8cedD4B9eF683f0887C44a6E4312dC7A6e2fcdB" RollupManagerAddr = "0x2d42E2899662EFf08b13eeb65b154b904C7a1c8a" SyncBlockChunkSize=10 BlockFinality="LatestBlock" -URLRPCL1="http://xlayer-rpc:8545" WaitForNewBlocksPeriod="100ms" InitialBlock= 353 diff --git a/test/config/test.erigon.rpc.config.yaml b/test/config/test.erigon.rpc.config.yaml index 147e8a6680d..8d8463d2b4d 100644 --- a/test/config/test.erigon.rpc.config.yaml +++ b/test/config/test.erigon.rpc.config.yaml @@ -27,7 +27,7 @@ log.console.verbosity: info #zkevm.executor-urls: xlayer-executor:50071 zkevm.executor-urls: "" zkevm.executor-strict: false -zkevm.witness-full: true +zkevm.witness-full: false zkevm.sequencer-block-seal-time: "6s" zkevm.sequencer-batch-seal-time: "12s" diff --git a/test/config/test.erigon.seq.config.yaml b/test/config/test.erigon.seq.config.yaml index 700ea817847..d71184afc15 100644 --- a/test/config/test.erigon.seq.config.yaml +++ b/test/config/test.erigon.seq.config.yaml @@ -26,7 +26,7 @@ log.console.verbosity: info zkevm.executor-urls: xlayer-executor:50071 zkevm.executor-strict: true -zkevm.witness-full: true +zkevm.witness-full: false zkevm.sequencer-block-seal-time: "3s" zkevm.sequencer-batch-seal-time: "10s" diff --git a/test/docker-compose.yml b/test/docker-compose.yml index 983b54ac5e0..1c2d72f2149 100644 --- a/test/docker-compose.yml +++ b/test/docker-compose.yml @@ -21,7 +21,7 @@ services: container_name: xlayer-seqs image: zjg555543/cdk:v0.4.0-beta10 volumes: - - ./sqlite/seqs:/tmp/cdk + - ./data/seqs:/tmp/cdk - ./keystore/da.permit.keystore:/pk/da.permit.keystore - ./keystore/sequencer.keystore:/pk/sequencer.keystore - ./config/cdk.config.toml:/app/config.toml @@ -37,7 +37,7 @@ services: ports: - 50081:50081 volumes: - - ./sqlite/agg:/tmp/cdk + - ./data/agg:/tmp/cdk - ./keystore/aggregator.keystore:/pk/aggregator.keystore - ./config/cdk.config.toml:/app/config.toml - ./config/test.genesis.config.json:/app/genesis.json @@ -105,6 +105,7 @@ services: - 6900:6900 - 9092:9095 volumes: + - ./data/seq/:/home/erigon/data/ - ./config/test.erigon.seq.config.yaml:/usr/src/app/config.yaml - ./config/dynamic-mynetwork-allocs.json:/usr/src/app/dynamic-mynetwork-allocs.json - ./config/dynamic-mynetwork-chainspec.json:/usr/src/app/dynamic-mynetwork-chainspec.json @@ -122,6 +123,7 @@ services: - 6901:6900 - 9091:9095 volumes: + - ./data/rpc/:/home/erigon/data/ - ./config/test.erigon.rpc.config.yaml:/usr/src/app/config.yaml - ./config/dynamic-mynetwork-allocs.json:/usr/src/app/dynamic-mynetwork-allocs.json - ./config/dynamic-mynetwork-chainspec.json:/usr/src/app/dynamic-mynetwork-chainspec.json @@ -346,6 +348,7 @@ services: ports: - 7900:7900 volumes: +# - ./data/ds/:/home/dsrelay/ - ./config/ds-config.toml:/app/config.toml command: - "/bin/sh" diff --git a/turbo/cli/default_flags.go b/turbo/cli/default_flags.go index bac9fb35279..c40d5b7de9d 100644 --- a/turbo/cli/default_flags.go +++ b/turbo/cli/default_flags.go @@ -29,6 +29,8 @@ var DefaultFlags = []cli.Flag{ &utils.TxPoolLifetimeFlag, &utils.TxPoolTraceSendersFlag, &utils.TxPoolCommitEveryFlag, + &utils.TxpoolPurgeEveryFlag, + &utils.TxpoolPurgeDistanceFlag, &PruneFlag, &PruneHistoryFlag, &PruneReceiptFlag, @@ -329,5 +331,7 @@ var DefaultFlags = []cli.Flag{ &utils.InfoTreeUpdateInterval, &utils.SealBatchImmediatelyOnOverflow, &utils.MockWitnessGeneration, + &utils.WitnessCacheEnable, + &utils.WitnessCacheLimit, &utils.WitnessContractInclusion, } diff --git a/turbo/cli/flags_zkevm.go b/turbo/cli/flags_zkevm.go index 06977ddeb34..20c09205fac 100644 --- a/turbo/cli/flags_zkevm.go +++ b/turbo/cli/flags_zkevm.go @@ -131,6 +131,13 @@ func ApplyFlagsForZkConfig(ctx *cli.Context, cfg *ethconfig.Config) { badBatches = append(badBatches, val) } + // witness cache flags + // if dicabled, set limit to 0 and only check for it to be 0 or not + witnessCacheEnabled := ctx.Bool(utils.WitnessCacheEnable.Name) + witnessCacheLimit := ctx.Uint64(utils.WitnessCacheLimit.Name) + if !witnessCacheEnabled { + witnessCacheLimit = 0 + } var witnessInclusion []libcommon.Address for _, s := range strings.Split(ctx.String(utils.WitnessContractInclusion.Name), ",") { if s == "" { @@ -220,6 +227,7 @@ func ApplyFlagsForZkConfig(ctx *cli.Context, cfg *ethconfig.Config) { InfoTreeUpdateInterval: ctx.Duration(utils.InfoTreeUpdateInterval.Name), SealBatchImmediatelyOnOverflow: ctx.Bool(utils.SealBatchImmediatelyOnOverflow.Name), MockWitnessGeneration: ctx.Bool(utils.MockWitnessGeneration.Name), + WitnessCacheLimit: witnessCacheLimit, WitnessContractInclusion: witnessInclusion, } diff --git a/turbo/jsonrpc/debug_api.go b/turbo/jsonrpc/debug_api.go index 1f65b7d47c4..3f5eed08c7e 100644 --- a/turbo/jsonrpc/debug_api.go +++ b/turbo/jsonrpc/debug_api.go @@ -44,6 +44,7 @@ type PrivateDebugAPI interface { GetRawHeader(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) (hexutility.Bytes, error) GetRawBlock(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) (hexutility.Bytes, error) TraceTransactionCounters(ctx context.Context, hash common.Hash, config *tracers.TraceConfig_ZkEvm, stream *jsoniter.Stream) error + TraceBatchByNumber(ctx context.Context, number rpc.BlockNumber, config *tracers.TraceConfig_ZkEvm, stream *jsoniter.Stream) error } // PrivateDebugAPIImpl is implementation of the PrivateDebugAPI interface based on remote Db access diff --git a/turbo/jsonrpc/eth_block_zkevm.go b/turbo/jsonrpc/eth_block_zkevm.go index 6f82477f685..3bf04c81dd8 100644 --- a/turbo/jsonrpc/eth_block_zkevm.go +++ b/turbo/jsonrpc/eth_block_zkevm.go @@ -26,6 +26,7 @@ import ( "github.com/ledgerwatch/erigon/turbo/rpchelper" "github.com/ledgerwatch/erigon/turbo/transactions" "github.com/ledgerwatch/erigon/zk/hermez_db" + "github.com/ledgerwatch/erigon/zk/sequencer" ) func (api *APIImpl) CallBundle(ctx context.Context, txHashes []common.Hash, stateBlockNumberOrHash rpc.BlockNumberOrHash, timeoutMilliSecondsPtr *int64) (map[string]interface{}, error) { @@ -217,7 +218,12 @@ func (api *APIImpl) GetBlockByNumber(ctx context.Context, number rpc.BlockNumber defer tx.Rollback() // get latest finished block - finishedBlock, err := stages.GetStageProgress(tx, stages.Finish) + var finishedBlock uint64 + if sequencer.IsSequencer() { + finishedBlock, err = stages.GetStageProgress(tx, stages.Execution) + } else { + finishedBlock, err = stages.GetStageProgress(tx, stages.Finish) + } if err != nil { return nil, err } diff --git a/turbo/jsonrpc/eth_system_zk.go b/turbo/jsonrpc/eth_system_zk.go index d8da8ca0dd3..5882f23a44c 100644 --- a/turbo/jsonrpc/eth_system_zk.go +++ b/turbo/jsonrpc/eth_system_zk.go @@ -6,9 +6,12 @@ import ( "fmt" "math/big" "strconv" + "strings" "time" "github.com/ledgerwatch/erigon-lib/common/hexutil" + "github.com/ledgerwatch/erigon/cmd/utils" + "github.com/ledgerwatch/erigon/ethclient" "github.com/ledgerwatch/erigon/zkevm/encoding" "github.com/ledgerwatch/erigon/zkevm/jsonrpc/client" "github.com/ledgerwatch/log/v3" @@ -44,23 +47,15 @@ func (api *APIImpl) GasPrice(ctx context.Context) (*hexutil.Big, error) { return &price, nil } - res, err := client.JSONRPCCall(api.l2RpcUrl, "eth_gasPrice") + client, err := ethclient.DialContext(ctx, api.l2RpcUrl) if err != nil { return nil, err } + defer client.Close() - if res.Error != nil { - return nil, fmt.Errorf("RPC error response: %s", res.Error.Message) - } - - var resultString string - if err := json.Unmarshal(res.Result, &resultString); err != nil { - return nil, fmt.Errorf("failed to unmarshal result: %v", err) - } - - price, ok := big.NewInt(0).SetString(resultString[2:], 16) - if !ok { - return nil, fmt.Errorf("failed to convert result to big.Int") + price, err := client.SuggestGasPrice(ctx) + if err != nil { + return nil, err } return (*hexutil.Big)(price), nil @@ -76,11 +71,13 @@ func (api *APIImpl) GasPrice_nonRedirected(ctx context.Context) (*hexutil.Big, e if time.Since(api.L1GasPrice.timestamp) > 3*time.Second || api.L1GasPrice.gasPrice == nil { l1GasPrice, err := api.l1GasPrice() if err != nil { - return nil, err - } - api.L1GasPrice = L1GasPrice{ - timestamp: time.Now(), - gasPrice: l1GasPrice, + log.Debug("Failed to get L1 gas price: ", err) + + } else { + api.L1GasPrice = L1GasPrice{ + timestamp: time.Now(), + gasPrice: l1GasPrice, + } } } @@ -129,6 +126,10 @@ func (api *APIImpl) l1GasPrice() (*big.Int, error) { } if res.Error != nil { + if strings.Contains(res.Error.Message, api.L1RpcUrl) { + replacement := fmt.Sprintf("<%s>", utils.L1RpcUrlFlag.Name) + res.Error.Message = strings.ReplaceAll(res.Error.Message, api.L1RpcUrl, replacement) + } return nil, fmt.Errorf("RPC error response: %s", res.Error.Message) } diff --git a/turbo/jsonrpc/tracing_block_zkevm.go b/turbo/jsonrpc/tracing_block_zkevm.go new file mode 100644 index 00000000000..c5e4558901b --- /dev/null +++ b/turbo/jsonrpc/tracing_block_zkevm.go @@ -0,0 +1,184 @@ +package jsonrpc + +import ( + "context" + "time" + + jsoniter "github.com/json-iterator/go" + "github.com/ledgerwatch/erigon-lib/chain" + "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon/consensus" + "github.com/ledgerwatch/erigon/core" + "github.com/ledgerwatch/erigon/core/rawdb" + "github.com/ledgerwatch/erigon/core/state" + "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/core/vm" + "github.com/ledgerwatch/erigon/core/vm/evmtypes" + "github.com/ledgerwatch/erigon/eth/tracers" + "github.com/ledgerwatch/erigon/rpc" + "github.com/ledgerwatch/erigon/turbo/services" + "github.com/ledgerwatch/erigon/turbo/transactions" + "github.com/ledgerwatch/erigon/zk/hermez_db" +) + +type blockTracer struct { + ctx context.Context + stream *jsoniter.Stream + engine consensus.EngineReader + tx kv.Tx + config *tracers.TraceConfig_ZkEvm + chainConfig *chain.Config + _blockReader services.FullBlockReader + historyV3 bool + evmCallTimeout time.Duration +} + +func (bt *blockTracer) TraceBlock(block *types.Block) error { + txEnv, err := transactions.ComputeTxEnv_ZkEvm(bt.ctx, bt.engine, block, bt.chainConfig, bt._blockReader, bt.tx, 0, bt.historyV3) + if err != nil { + bt.stream.WriteNil() + return err + } + bt.stream.WriteArrayStart() + + borTx := rawdb.ReadBorTransactionForBlock(bt.tx, block.NumberU64()) + txns := block.Transactions() + if borTx != nil && *bt.config.BorTraceEnabled { + txns = append(txns, borTx) + } + + txTracerEnv := txTracerEnv{ + block: block, + txEnv: txEnv, + cumulativeGas: uint64(0), + hermezReader: hermez_db.NewHermezDbReader(bt.tx), + chainConfig: bt.chainConfig, + engine: bt.engine, + } + + for idx, txn := range txns { + if err := bt.traceLastTxFlushed(txTracerEnv, txn, idx); err != nil { + return err + } + } + bt.stream.WriteArrayEnd() + bt.stream.Flush() + + return nil +} + +func (bt *blockTracer) traceLastTxFlushed(txTracerEnv txTracerEnv, txn types.Transaction, idx int) error { + if err := bt.traceTransactionUnflushed(txTracerEnv, txn, idx); err != nil { + return err + } + + if idx != len(txTracerEnv.block.Transactions())-1 { + bt.stream.WriteMore() + } + bt.stream.Flush() + return nil +} + +func (bt *blockTracer) traceTransactionUnflushed(txTracerEnv txTracerEnv, txn types.Transaction, idx int) error { + txHash := txn.Hash() + bt.stream.WriteObjectStart() + bt.stream.WriteObjectField("txHash") + bt.stream.WriteString(txHash.Hex()) + bt.stream.WriteMore() + bt.stream.WriteObjectField("result") + select { + default: + case <-bt.ctx.Done(): + bt.stream.WriteNil() + return bt.ctx.Err() + } + + txCtx, msg, err := txTracerEnv.GetTxExecuteContext(txn, idx) + if err != nil { + bt.stream.WriteNil() + return err + } + + if err = transactions.TraceTx( + bt.ctx, + msg, + txTracerEnv.txEnv.BlockContext, + txCtx, + txTracerEnv.txEnv.Ibs, + bt.config, + bt.chainConfig, + bt.stream, + bt.evmCallTimeout, + ); err == nil { + rules := bt.chainConfig.Rules(txTracerEnv.block.NumberU64(), txTracerEnv.block.Time()) + err = txTracerEnv.txEnv.Ibs.FinalizeTx(rules, state.NewNoopWriter()) + } + bt.stream.WriteObjectEnd() + + // if we have an error we want to output valid json for it before continuing after clearing down potential writes to the stream + if err != nil { + bt.handleError(err) + if err != nil { + return err + } + } + + return nil +} + +func (bt *blockTracer) handleError(err error) { + bt.stream.WriteMore() + bt.stream.WriteObjectStart() + rpc.HandleError(err, bt.stream) + bt.stream.WriteObjectEnd() +} + +type txTracerEnv struct { + hermezReader state.ReadOnlyHermezDb + chainConfig *chain.Config + engine consensus.EngineReader + block *types.Block + cumulativeGas uint64 + txEnv transactions.TxEnv +} + +func (tt *txTracerEnv) GetTxExecuteContext(txn types.Transaction, idx int) (evmtypes.TxContext, types.Message, error) { + txHash := txn.Hash() + evm, effectiveGasPricePercentage, err := core.PrepareForTxExecution( + tt.chainConfig, + &vm.Config{}, + &tt.txEnv.BlockContext, + tt.hermezReader, + tt.txEnv.Ibs, + tt.block, + &txHash, + idx, + ) + if err != nil { + return evmtypes.TxContext{}, types.Message{}, err + } + + msg, _, err := core.GetTxContext( + tt.chainConfig, + tt.engine, + tt.txEnv.Ibs, + tt.block.Header(), + txn, + evm, + effectiveGasPricePercentage, + ) + if err != nil { + return evmtypes.TxContext{}, types.Message{}, err + } + + txCtx := evmtypes.TxContext{ + TxHash: txHash, + Origin: msg.From(), + GasPrice: msg.GasPrice(), + Txn: txn, + CumulativeGasUsed: &tt.cumulativeGas, + BlockNum: tt.block.NumberU64(), + } + + return txCtx, msg, nil +} diff --git a/turbo/jsonrpc/tracing_zkevm.go b/turbo/jsonrpc/tracing_zkevm.go index 145e64265de..fada554d42a 100644 --- a/turbo/jsonrpc/tracing_zkevm.go +++ b/turbo/jsonrpc/tracing_zkevm.go @@ -2,6 +2,7 @@ package jsonrpc import ( "context" + "errors" "fmt" "math/big" "time" @@ -81,87 +82,20 @@ func (api *PrivateDebugAPIImpl) traceBlock(ctx context.Context, blockNrOrHash rp stream.WriteNil() return err } - engine := api.engine() - - txEnv, err := transactions.ComputeTxEnv_ZkEvm(ctx, engine, block, chainConfig, api._blockReader, tx, 0, api.historyV3(tx)) - if err != nil { - stream.WriteNil() - return err - } - blockCtx := txEnv.BlockContext - ibs := txEnv.Ibs - rules := chainConfig.Rules(block.NumberU64(), block.Time()) - stream.WriteArrayStart() - - borTx := rawdb.ReadBorTransactionForBlock(tx, block.NumberU64()) - txns := block.Transactions() - if borTx != nil && *config.BorTraceEnabled { - txns = append(txns, borTx) + blockTracer := &blockTracer{ + ctx: ctx, + stream: stream, + engine: api.engine(), + tx: tx, + config: config, + chainConfig: chainConfig, + _blockReader: api._blockReader, + historyV3: api.historyV3(tx), + evmCallTimeout: api.evmCallTimeout, } - cumulativeGas := uint64(0) - hermezReader := hermez_db.NewHermezDbReader(tx) - - for idx, txn := range txns { - stream.WriteObjectStart() - stream.WriteObjectField("txHash") - stream.WriteString(txn.Hash().Hex()) - stream.WriteMore() - stream.WriteObjectField("result") - select { - default: - case <-ctx.Done(): - stream.WriteNil() - return ctx.Err() - } - - txHash := txn.Hash() - evm, effectiveGasPricePercentage, err := core.PrepareForTxExecution(chainConfig, &vm.Config{}, &blockCtx, hermezReader, ibs, block, &txHash, idx) - if err != nil { - stream.WriteNil() - return err - } - - msg, _, err := core.GetTxContext(chainConfig, engine, ibs, block.Header(), txn, evm, effectiveGasPricePercentage) - if err != nil { - stream.WriteNil() - return err - } - - txCtx := evmtypes.TxContext{ - TxHash: txn.Hash(), - Origin: msg.From(), - GasPrice: msg.GasPrice(), - Txn: txn, - CumulativeGasUsed: &cumulativeGas, - BlockNum: block.NumberU64(), - } - - err = transactions.TraceTx(ctx, msg, blockCtx, txCtx, ibs, config, chainConfig, stream, api.evmCallTimeout) - if err == nil { - err = ibs.FinalizeTx(rules, state.NewNoopWriter()) - } - stream.WriteObjectEnd() - - // if we have an error we want to output valid json for it before continuing after clearing down potential writes to the stream - if err != nil { - stream.WriteMore() - stream.WriteObjectStart() - rpc.HandleError(err, stream) - stream.WriteObjectEnd() - if err != nil { - return err - } - } - if idx != len(txns)-1 { - stream.WriteMore() - } - stream.Flush() - } - stream.WriteArrayEnd() - stream.Flush() - return nil + return blockTracer.TraceBlock(block) } func (api *PrivateDebugAPIImpl) TraceCallMany(ctx context.Context, bundles []Bundle, simulateContext StateContext, config *tracers.TraceConfig_ZkEvm, stream *jsoniter.Stream) error { @@ -469,3 +403,80 @@ func (api *PrivateDebugAPIImpl) TraceTransactionCounters(ctx context.Context, ha // Trace the transaction and return return transactions.TraceTx(ctx, txEnv.Msg, txEnv.BlockContext, txEnv.TxContext, txEnv.Ibs, config, chainConfig, stream, api.evmCallTimeout) } + +func (api *PrivateDebugAPIImpl) TraceBatchByNumber(ctx context.Context, batchNum rpc.BlockNumber, config *tracers.TraceConfig_ZkEvm, stream *jsoniter.Stream) error { + tx, err := api.db.BeginRo(ctx) + if err != nil { + stream.WriteNil() + return err + } + defer tx.Rollback() + + reader := hermez_db.NewHermezDbReader(tx) + badBatch, err := reader.GetInvalidBatch(batchNum.Uint64()) + if err != nil { + stream.WriteNil() + return err + } + + if badBatch { + stream.WriteNil() + return errors.New("batch is invalid") + } + + blockNumbers, err := reader.GetL2BlockNosByBatch(batchNum.Uint64()) + if err != nil { + stream.WriteNil() + return fmt.Errorf("failed to get block numbers for batch %d: %w", batchNum, err) + } + if len(blockNumbers) == 0 { + return fmt.Errorf("no blocks found for batch %d", batchNum) + } + + // if we've pruned this history away for this block then just return early + // to save any red herring errors + if err = api.BaseAPI.checkPruneHistory(tx, blockNumbers[0]); err != nil { + stream.WriteNil() + return err + } + + if config == nil { + config = &tracers.TraceConfig_ZkEvm{} + } + + if config.BorTraceEnabled == nil { + config.BorTraceEnabled = newBoolPtr(false) + } + + chainConfig, err := api.chainConfig(ctx, tx) + if err != nil { + stream.WriteNil() + return err + } + blockTracer := &blockTracer{ + ctx: ctx, + stream: stream, + engine: api.engine(), + tx: tx, + config: config, + chainConfig: chainConfig, + _blockReader: api._blockReader, + historyV3: api.historyV3(tx), + evmCallTimeout: api.evmCallTimeout, + } + + for _, blockNum := range blockNumbers { + block, err := api.blockByNumberWithSenders(ctx, tx, blockNum) + if err != nil { + stream.WriteNil() + return nil + } + + if err := blockTracer.TraceBlock(block); err != nil { + stream.WriteNil() + return err + } + } + + return nil +} diff --git a/turbo/jsonrpc/zkevm_api.go b/turbo/jsonrpc/zkevm_api.go index 6301971290d..6e22410bb39 100644 --- a/turbo/jsonrpc/zkevm_api.go +++ b/turbo/jsonrpc/zkevm_api.go @@ -9,18 +9,20 @@ import ( "github.com/ledgerwatch/erigon-lib/chain" "github.com/ledgerwatch/erigon-lib/common" - libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/common/hexutility" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/log/v3" zktypes "github.com/ledgerwatch/erigon/zk/types" + "math" + "github.com/holiman/uint256" "github.com/ledgerwatch/erigon-lib/common/hexutil" "github.com/ledgerwatch/erigon-lib/kv/membatchwithdb" "github.com/ledgerwatch/erigon/core" "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/state" + "github.com/ledgerwatch/erigon/core/systemcontracts" eritypes "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/core/types/accounts" "github.com/ledgerwatch/erigon/eth/ethconfig" @@ -31,6 +33,7 @@ import ( "github.com/ledgerwatch/erigon/smt/pkg/smt" smtUtils "github.com/ledgerwatch/erigon/smt/pkg/utils" "github.com/ledgerwatch/erigon/turbo/rpchelper" + "github.com/ledgerwatch/erigon/turbo/trie" "github.com/ledgerwatch/erigon/zk/datastream/server" "github.com/ledgerwatch/erigon/zk/hermez_db" "github.com/ledgerwatch/erigon/zk/legacy_executor_verifier" @@ -40,12 +43,9 @@ import ( "github.com/ledgerwatch/erigon/zk/syncer" zktx "github.com/ledgerwatch/erigon/zk/tx" "github.com/ledgerwatch/erigon/zk/utils" - zkUtils "github.com/ledgerwatch/erigon/zk/utils" "github.com/ledgerwatch/erigon/zk/witness" "github.com/ledgerwatch/erigon/zkevm/hex" "github.com/ledgerwatch/erigon/zkevm/jsonrpc/client" - "github.com/ledgerwatch/erigon/core/systemcontracts" - "math" ) var sha3UncleHash = common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347") @@ -732,6 +732,11 @@ func (api *ZkEvmAPIImpl) getAccInputHash(ctx context.Context, db SequenceReader, return nil, fmt.Errorf("failed to get sequence range data for batch %d: %w", batchNum, err) } + // if we are asking for genesis return 0x0..0 + if batchNum == 0 && prevSequence.BatchNo == 0 { + return &common.Hash{}, nil + } + if prevSequence == nil || batchSequence == nil { var missing string if prevSequence == nil && batchSequence == nil { @@ -744,16 +749,6 @@ func (api *ZkEvmAPIImpl) getAccInputHash(ctx context.Context, db SequenceReader, return nil, fmt.Errorf("failed to get %s for batch %d", missing, batchNum) } - // if we are asking for the injected batch or genesis return 0x0..0 - if (batchNum == 0 || batchNum == 1) && prevSequence.BatchNo == 0 { - return &common.Hash{}, nil - } - - // if prev is 0, set to 1 (injected batch) - if prevSequence.BatchNo == 0 { - prevSequence.BatchNo = 1 - } - // get batch range for sequence prevSequenceBatch, currentSequenceBatch := prevSequence.BatchNo, batchSequence.BatchNo // get call data for tx @@ -790,11 +785,8 @@ func (api *ZkEvmAPIImpl) getAccInputHash(ctx context.Context, db SequenceReader, return nil, fmt.Errorf("batch %d is out of range of sequence calldata", batchNum) } - accInputHash = &prevSequenceAccinputHash - if prevSequenceBatch == 0 { - return - } // calculate acc input hash + accInputHash = &prevSequenceAccinputHash for i := 0; i < int(batchNum-prevSequenceBatch); i++ { accInputHash = accInputHashCalcFn(prevSequenceAccinputHash, i) prevSequenceAccinputHash = *accInputHash @@ -839,7 +831,7 @@ func (api *ZkEvmAPIImpl) GetFullBlockByNumber(ctx context.Context, number rpc.Bl // GetFullBlockByHash returns a full block from the current canonical chain. If number is nil, the // latest known block is returned. -func (api *ZkEvmAPIImpl) GetFullBlockByHash(ctx context.Context, hash libcommon.Hash, fullTx bool) (types.Block, error) { +func (api *ZkEvmAPIImpl) GetFullBlockByHash(ctx context.Context, hash common.Hash, fullTx bool) (types.Block, error) { tx, err := api.db.BeginRo(ctx) if err != nil { return types.Block{}, err @@ -981,7 +973,6 @@ func (api *ZkEvmAPIImpl) GetBlockRangeWitness(ctx context.Context, startBlockNrO } func (api *ZkEvmAPIImpl) getBatchWitness(ctx context.Context, tx kv.Tx, batchNum uint64, debug bool, mode WitnessMode) (hexutility.Bytes, error) { - // limit in-flight requests by name semaphore := api.semaphores[getBatchWitness] if semaphore != nil { @@ -996,14 +987,44 @@ func (api *ZkEvmAPIImpl) getBatchWitness(ctx context.Context, tx kv.Tx, batchNum if api.ethApi.historyV3(tx) { return nil, fmt.Errorf("not supported by Erigon3") } - - generator, fullWitness, err := api.buildGenerator(ctx, tx, mode) + reader := hermez_db.NewHermezDbReader(tx) + badBatch, err := reader.GetInvalidBatch(batchNum) if err != nil { return nil, err } - return generator.GetWitnessByBatch(tx, ctx, batchNum, debug, fullWitness) + if !badBatch { + blockNumbers, err := reader.GetL2BlockNosByBatch(batchNum) + if err != nil { + return nil, err + } + if len(blockNumbers) == 0 { + return nil, fmt.Errorf("no blocks found for batch %d", batchNum) + } + var startBlock, endBlock uint64 + for _, blockNumber := range blockNumbers { + if startBlock == 0 || blockNumber < startBlock { + startBlock = blockNumber + } + if blockNumber > endBlock { + endBlock = blockNumber + } + } + + startBlockInt := rpc.BlockNumber(startBlock) + endBlockInt := rpc.BlockNumber(endBlock) + + startBlockRpc := rpc.BlockNumberOrHash{BlockNumber: &startBlockInt} + endBlockNrOrHash := rpc.BlockNumberOrHash{BlockNumber: &endBlockInt} + return api.getBlockRangeWitness(ctx, api.db, startBlockRpc, endBlockNrOrHash, debug, mode) + } else { + generator, fullWitness, err := api.buildGenerator(ctx, tx, mode) + if err != nil { + return nil, err + } + return generator.GetWitnessByBadBatch(tx, ctx, batchNum, debug, fullWitness) + } } func (api *ZkEvmAPIImpl) buildGenerator(ctx context.Context, tx kv.Tx, witnessMode WitnessMode) (*witness.Generator, bool, error) { @@ -1050,7 +1071,6 @@ func (api *ZkEvmAPIImpl) getBlockRangeWitness(ctx context.Context, db kv.RoDB, s } endBlockNr, _, _, err := rpchelper.GetCanonicalBlockNumber_zkevm(endBlockNrOrHash, tx, api.ethApi.filters) // DoCall cannot be executed on non-canonical blocks - if err != nil { return nil, err } @@ -1059,6 +1079,41 @@ func (api *ZkEvmAPIImpl) getBlockRangeWitness(ctx context.Context, db kv.RoDB, s return nil, fmt.Errorf("start block number must be less than or equal to end block number, start=%d end=%d", blockNr, endBlockNr) } + hermezDb := hermez_db.NewHermezDbReader(tx) + + // we only keep trimmed witnesses in the db + if witnessMode == WitnessModeTrimmed { + blockWitnesses := make([]*trie.Witness, 0, endBlockNr-blockNr+1) + //try to get them from the db, if all are available - do not unwind and generate + for blockNum := blockNr; blockNum <= endBlockNr; blockNum++ { + witnessBytes, err := hermezDb.GetWitnessCache(blockNum) + if err != nil { + return nil, err + } + + if len(witnessBytes) == 0 { + break + } + + blockWitness, err := witness.ParseWitnessFromBytes(witnessBytes, false) + if err != nil { + return nil, err + } + + blockWitnesses = append(blockWitnesses, blockWitness) + } + + if len(blockWitnesses) == int(endBlockNr-blockNr+1) { + // found all, calculate + baseWitness, err := witness.MergeWitnesses(ctx, blockWitnesses) + if err != nil { + return nil, err + } + + return witness.GetWitnessBytes(baseWitness, debug) + } + } + generator, fullWitness, err := api.buildGenerator(ctx, tx, witnessMode) if err != nil { return nil, err @@ -1303,11 +1358,6 @@ func getLastBlockInBatchNumber(tx kv.Tx, batchNumber uint64) (uint64, error) { return blocks[len(blocks)-1], nil } -func getAllBlocksInBatchNumber(tx kv.Tx, batchNumber uint64) ([]uint64, error) { - reader := hermez_db.NewHermezDbReader(tx) - return reader.GetL2BlockNosByBatch(batchNumber) -} - func getLatestBatchNumber(tx kv.Tx) (uint64, error) { c, err := tx.Cursor(hermez_db.BLOCKBATCHES) if err != nil { @@ -1381,68 +1431,6 @@ func getForkIntervals(tx kv.Tx) ([]rpc.ForkInterval, error) { return result, nil } -func convertTransactionsReceipts( - txs []eritypes.Transaction, - receipts eritypes.Receipts, - hermezReader hermez_db.HermezDbReader, - block eritypes.Block) ([]types.Transaction, error) { - if len(txs) != len(receipts) { - return nil, errors.New("transactions and receipts length mismatch") - } - - result := make([]types.Transaction, 0, len(txs)) - - for idx, tx := range txs { - effectiveGasPricePercentage, err := hermezReader.GetEffectiveGasPricePercentage(tx.Hash()) - if err != nil { - return nil, err - } - gasPrice := tx.GetPrice() - v, r, s := tx.RawSignatureValues() - var sender common.Address - - // TODO: senders! - - var receipt *types.Receipt - if len(receipts) > idx { - receipt = convertReceipt(receipts[idx], sender, tx.GetTo(), gasPrice, effectiveGasPricePercentage) - } - - bh := block.Hash() - blockNumber := block.NumberU64() - - tran := types.Transaction{ - Nonce: types.ArgUint64(tx.GetNonce()), - GasPrice: types.ArgBig(*gasPrice.ToBig()), - Gas: types.ArgUint64(tx.GetGas()), - To: tx.GetTo(), - Value: types.ArgBig(*tx.GetValue().ToBig()), - Input: tx.GetData(), - V: types.ArgBig(*v.ToBig()), - R: types.ArgBig(*r.ToBig()), - S: types.ArgBig(*s.ToBig()), - Hash: tx.Hash(), - From: sender, - BlockHash: &bh, - BlockNumber: types.ArgUint64Ptr(types.ArgUint64(blockNumber)), - TxIndex: types.ArgUint64Ptr(types.ArgUint64(idx)), - Type: types.ArgUint64(tx.Type()), - Receipt: receipt, - } - - cid := tx.GetChainID() - var cidAB *types.ArgBig - if cid.Cmp(uint256.NewInt(0)) != 0 { - cidAB = (*types.ArgBig)(cid.ToBig()) - tran.ChainID = cidAB - } - - result = append(result, tran) - } - - return result, nil -} - func convertBlockToRpcBlock( orig *eritypes.Block, receipts eritypes.Receipts, @@ -1660,7 +1648,7 @@ func (zkapi *ZkEvmAPIImpl) GetProof(ctx context.Context, address common.Address, batch := membatchwithdb.NewMemoryBatch(tx, api.dirs.Tmp, api.logger) defer batch.Rollback() - if err = zkUtils.PopulateMemoryMutationTables(batch); err != nil { + if err = utils.PopulateMemoryMutationTables(batch); err != nil { return nil, err } @@ -1720,13 +1708,12 @@ func (zkapi *ZkEvmAPIImpl) GetProof(ctx context.Context, address common.Address, plainState := state.NewPlainState(tx, blockNumber, systemcontracts.SystemContractCodeLookup[chainCfg.ChainName]) defer plainState.Close() - inclusion := make(map[libcommon.Address][]libcommon.Hash) + inclusion := make(map[common.Address][]common.Hash) for _, contract := range zkapi.config.WitnessContractInclusion { - err = plainState.ForEachStorage(contract, libcommon.Hash{}, func(key, secKey libcommon.Hash, value uint256.Int) bool { + if err = plainState.ForEachStorage(contract, common.Hash{}, func(key, secKey common.Hash, value uint256.Int) bool { inclusion[contract] = append(inclusion[contract], key) return false - }, math.MaxInt64) - if err != nil { + }, math.MaxInt64); err != nil { return nil, err } } @@ -1786,7 +1773,7 @@ func (zkapi *ZkEvmAPIImpl) GetProof(ctx context.Context, address common.Address, accProof := &accounts.SMTAccProofResult{ Address: address, Balance: (*hexutil.Big)(balance), - CodeHash: libcommon.BytesToHash(codeHash), + CodeHash: common.BytesToHash(codeHash), CodeLength: hexutil.Uint64(codeLength), Nonce: hexutil.Uint64(nonce), BalanceProof: balanceProofs, @@ -1928,6 +1915,38 @@ func (api *ZkEvmAPIImpl) GetRollupManagerAddress(ctx context.Context) (res json. return rollupManagerAddressJson, err } +func (api *ZkEvmAPIImpl) getInjectedBatchAccInputHashFromSequencer(rpcUrl string) (*common.Hash, error) { + res, err := client.JSONRPCCall(rpcUrl, "zkevm_getBatchByNumber", 1) + if err != nil { + return nil, err + } + + if res.Error != nil { + return nil, fmt.Errorf("RPC error response: %s", res.Error.Message) + } + + var resultMap map[string]interface{} + + err = json.Unmarshal(res.Result, &resultMap) + if err != nil { + return nil, err + } + + hashValue, ok := resultMap["accInputHash"] + if !ok { + return nil, fmt.Errorf("accInputHash not found in response") + } + + hash, ok := hashValue.(string) + if !ok { + return nil, fmt.Errorf("accInputHash is not a string") + } + + decoded := common.HexToHash(hash) + + return &decoded, nil +} + func (api *ZkEvmAPIImpl) GetLatestDataStreamBlock(ctx context.Context) (hexutil.Uint64, error) { tx, err := api.db.BeginRo(ctx) if err != nil { diff --git a/turbo/jsonrpc/zkevm_api_test.go b/turbo/jsonrpc/zkevm_api_test.go index c9cda1e73f8..8746a0cecd5 100644 --- a/turbo/jsonrpc/zkevm_api_test.go +++ b/turbo/jsonrpc/zkevm_api_test.go @@ -17,7 +17,6 @@ import ( "github.com/ledgerwatch/erigon-lib/common/hexutil" "github.com/ledgerwatch/erigon-lib/kv/kvcache" "github.com/ledgerwatch/erigon/accounts/abi/bind/backends" - "github.com/ledgerwatch/erigon/cmd/rpcdaemon/commands/mocks" "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/crypto" @@ -30,6 +29,7 @@ import ( "github.com/ledgerwatch/erigon/zk/hermez_db" rpctypes "github.com/ledgerwatch/erigon/zk/rpcdaemon" "github.com/ledgerwatch/erigon/zk/syncer" + "github.com/ledgerwatch/erigon/zk/syncer/mocks" zktypes "github.com/ledgerwatch/erigon/zk/types" "github.com/ledgerwatch/log/v3" "github.com/stretchr/testify/assert" @@ -480,7 +480,7 @@ func TestGetBatchByNumber(t *testing.T) { assert.Equal(gers[len(gers)-1], batch.GlobalExitRoot) assert.Equal(mainnetExitRoots[len(mainnetExitRoots)-1], batch.MainnetExitRoot) assert.Equal(rollupExitRoots[len(rollupExitRoots)-1], batch.RollupExitRoot) - assert.Equal(common.HexToHash(common.Hash{}.String()), batch.AccInputHash) + assert.Equal(common.HexToHash("0x97d1524156ccb46723e5c3c87951da9a390499ba288161d879df1dbc03d49afc"), batch.AccInputHash) assert.Equal(common.HexToHash("0x22ddb9a356815c3fac1026b6dec5df3124afbadb485c9ba5a3e3398a04b7ba97"), *batch.SendSequencesTxHash) assert.Equal(rpctypes.ArgUint64(1714427009), batch.Timestamp) assert.Equal(true, batch.Closed) diff --git a/turbo/jsonrpc/zkevm_api_xlayer.go b/turbo/jsonrpc/zkevm_api_xlayer.go index b517a2b534a..bf2f828e8d0 100644 --- a/turbo/jsonrpc/zkevm_api_xlayer.go +++ b/turbo/jsonrpc/zkevm_api_xlayer.go @@ -24,17 +24,16 @@ func (api *ZkEvmAPIImpl) GetBatchSealTime(ctx context.Context, batchNumber rpc.B return 0, err } defer tx.Rollback() - - blocks, err := getAllBlocksInBatchNumber(tx, uint64(batchNumber.Int64())) + var lastBlockNum = uint64(0) + lastBlockNum, err = getLastBlockInBatchNumber(tx, uint64(batchNumber.Int64())) if err != nil { return 0, err } - if len(blocks) == 0 { - return 0, errors.New("batch is empty") + lastBlock, err := api.GetFullBlockByNumber(ctx, rpc.BlockNumber(lastBlockNum), false) + if err != nil { + return 0, err } - lastBlock, err := api.GetFullBlockByNumber(ctx, rpc.BlockNumber(blocks[len(blocks)-1]), false) - return lastBlock.Timestamp, nil } diff --git a/turbo/rpchelper/rpc_block.go b/turbo/rpchelper/rpc_block.go index b5da8148c29..5835209835c 100644 --- a/turbo/rpchelper/rpc_block.go +++ b/turbo/rpchelper/rpc_block.go @@ -10,6 +10,7 @@ import ( "github.com/ledgerwatch/erigon/eth/stagedsync/stages" "github.com/ledgerwatch/erigon/rpc" "github.com/ledgerwatch/erigon/zk/hermez_db" + "github.com/ledgerwatch/erigon/zk/sequencer" ) var UnknownBlockError = &rpc.CustomError{ @@ -18,15 +19,13 @@ var UnknownBlockError = &rpc.CustomError{ } func GetLatestFinishedBlockNumber(tx kv.Tx) (uint64, error) { - forkchoiceHeadHash := rawdb.ReadForkchoiceHead(tx) - if forkchoiceHeadHash != (libcommon.Hash{}) { - forkchoiceHeadNum := rawdb.ReadHeaderNumber(tx, forkchoiceHeadHash) - if forkchoiceHeadNum != nil { - return *forkchoiceHeadNum, nil - } + var blockNum uint64 + var err error + if sequencer.IsSequencer() { + blockNum, err = stages.GetStageProgress(tx, stages.Execution) + } else { + blockNum, err = stages.GetStageProgress(tx, stages.Finish) } - - blockNum, err := stages.GetStageProgress(tx, stages.Finish) if err != nil { return 0, fmt.Errorf("getting latest block number: %w", err) } @@ -48,14 +47,19 @@ func GetFinalizedBlockNumber(tx kv.Tx) (uint64, error) { return 0, err } - finishedBlockNumber, err := stages.GetStageProgress(tx, stages.Finish) + var highestBlockNumber uint64 + if sequencer.IsSequencer() { + highestBlockNumber, err = stages.GetStageProgress(tx, stages.Execution) + } else { + highestBlockNumber, err = stages.GetStageProgress(tx, stages.Finish) + } if err != nil { return 0, fmt.Errorf("getting latest finished block number: %w", err) } blockNumber := highestVerifiedBlock - if finishedBlockNumber < blockNumber { - blockNumber = finishedBlockNumber + if highestBlockNumber < blockNumber { + blockNumber = highestBlockNumber } return blockNumber, nil diff --git a/turbo/stages/zk_stages.go b/turbo/stages/zk_stages.go index a585503c0e0..88d0deb1acb 100644 --- a/turbo/stages/zk_stages.go +++ b/turbo/stages/zk_stages.go @@ -80,6 +80,7 @@ func NewDefaultZkStages(ctx context.Context, ), stagedsync.StageHashStateCfg(db, dirs, cfg.HistoryV3, agg), zkStages.StageZkInterHashesCfg(db, true, true, false, dirs.Tmp, blockReader, controlServer.Hd, cfg.HistoryV3, agg, cfg.Zk), + zkStages.StageWitnessCfg(db, cfg.Zk, controlServer.ChainConfig, engine, blockReader, agg, cfg.HistoryV3, dirs, cfg.WitnessContractInclusion), stagedsync.StageHistoryCfg(db, cfg.Prune, dirs.Tmp), stagedsync.StageLogIndexCfg(db, cfg.Prune, dirs.Tmp, cfg.Genesis.Config.NoPruneContracts), stagedsync.StageCallTracesCfg(db, cfg.Prune, 0, dirs.Tmp), diff --git a/turbo/trie/witness.go b/turbo/trie/witness.go index 3f309be40e5..874fe5eb966 100644 --- a/turbo/trie/witness.go +++ b/turbo/trie/witness.go @@ -118,6 +118,8 @@ func NewWitnessFromReader(input io.Reader, trace bool) (*Witness, error) { op = &OperatorCode{} case OpBranch: op = &OperatorBranch{} + case OpSMTLeaf: + op = &OperatorSMTLeafValue{} case OpEmptyRoot: op = &OperatorEmptyRoot{} case OpExtension: @@ -173,81 +175,98 @@ func (w *Witness) WriteDiff(w2 *Witness, output io.Writer) { op = w.Operators[i] } if i >= len(w2.Operators) { - fmt.Fprintf(output, "unexpected o1[%d] = %T %v; o2[%d] = nil\n", i, op, op, i) + fmt.Fprintf(output, "missing in o2: o1[%d] = %T %v;\n", i, op, op) continue } + op2 := w2.Operators[i] switch o1 := op.(type) { case *OperatorBranch: - o2, ok := w2.Operators[i].(*OperatorBranch) + o2, ok := op2.(*OperatorBranch) if !ok { - fmt.Fprintf(output, "o1[%d] = %T %+v; o2[%d] = %T %+v\n", i, o1, o1, i, o2, o2) - } - if o1.Mask != o2.Mask { - fmt.Fprintf(output, "o1[%d].Mask = %v; o2[%d].Mask = %v", i, o1.Mask, i, o2.Mask) + fmt.Fprintf(output, "OperatorBranch: o1[%d] = %T; o2[%d] = %T\n", i, o1, i, op2) + } else if o1.Mask != o2.Mask { + fmt.Fprintf(output, "OperatorBranch: o1[%d].Mask = %v; o2[%d].Mask = %v", i, o1.Mask, i, o2.Mask) } case *OperatorHash: - o2, ok := w2.Operators[i].(*OperatorHash) + o2, ok := op2.(*OperatorHash) if !ok { - fmt.Fprintf(output, "o1[%d] = %T %+v; o2[%d] = %T %+v\n", i, o1, o1, i, o2, o2) - } - if !bytes.Equal(o1.Hash.Bytes(), o2.Hash.Bytes()) { - fmt.Fprintf(output, "o1[%d].Hash = %s; o2[%d].Hash = %s\n", i, o1.Hash.Hex(), i, o2.Hash.Hex()) + fmt.Fprintf(output, "OperatorHash: o1[%d] = %T; o2[%d] = %T\n", i, o1, i, op2) + } else if !bytes.Equal(o1.Hash.Bytes(), o2.Hash.Bytes()) { + fmt.Fprintf(output, "OperatorHash: o1[%d].Hash = %s; o2[%d].Hash = %s\n", i, o1.Hash.Hex(), i, o2.Hash.Hex()) } case *OperatorCode: - o2, ok := w2.Operators[i].(*OperatorCode) + o2, ok := op2.(*OperatorCode) if !ok { - fmt.Fprintf(output, "o1[%d] = %T %+v; o2[%d] = %T %+v\n", i, o1, o1, i, o2, o2) - } - if !bytes.Equal(o1.Code, o2.Code) { - fmt.Fprintf(output, "o1[%d].Code = %x; o2[%d].Code = %x\n", i, o1.Code, i, o2.Code) + fmt.Fprintf(output, "OperatorCode: o1[%d] = %T; o2[%d] = %T\n", i, o1, i, op2) + } else if !bytes.Equal(o1.Code, o2.Code) { + fmt.Fprintf(output, "OperatorCode: o1[%d].Code = %x; o2[%d].Code = %x\n", i, o1.Code, i, o2.Code) } case *OperatorEmptyRoot: - o2, ok := w2.Operators[i].(*OperatorEmptyRoot) + _, ok := op2.(*OperatorEmptyRoot) if !ok { - fmt.Fprintf(output, "o1[%d] = %T %+v; o2[%d] = %T %+v\n", i, o1, o1, i, o2, o2) + fmt.Fprintf(output, "OperatorEmptyRoot: o1[%d] = %T; o2[%d] = %T\n", i, o1, i, op2) } case *OperatorExtension: - o2, ok := w2.Operators[i].(*OperatorExtension) + o2, ok := op2.(*OperatorExtension) if !ok { - fmt.Fprintf(output, "o1[%d] = %T %+v; o2[%d] = %T %+v\n", i, o1, o1, i, o2, o2) - } - if !bytes.Equal(o1.Key, o2.Key) { - fmt.Fprintf(output, "extension o1[%d].Key = %x; o2[%d].Key = %x\n", i, o1.Key, i, o2.Key) + fmt.Fprintf(output, "OperatorExtension: o1[%d] = %T; o2[%d] = %T\n", i, o1, i, op2) + } else if !bytes.Equal(o1.Key, o2.Key) { + fmt.Fprintf(output, "OperatorExtension: o1[%d].Key = %x; o2[%d].Key = %x\n", i, o1.Key, i, o2.Key) } case *OperatorLeafAccount: - o2, ok := w2.Operators[i].(*OperatorLeafAccount) + o2, ok := op2.(*OperatorLeafAccount) if !ok { - fmt.Fprintf(output, "o1[%d] = %T %+v; o2[%d] = %T %+v\n", i, o1, o1, i, o2, o2) - } - if !bytes.Equal(o1.Key, o2.Key) { - fmt.Fprintf(output, "leafAcc o1[%d].Key = %x; o2[%d].Key = %x\n", i, o1.Key, i, o2.Key) - } - if o1.Nonce != o2.Nonce { - fmt.Fprintf(output, "leafAcc o1[%d].Nonce = %v; o2[%d].Nonce = %v\n", i, o1.Nonce, i, o2.Nonce) - } - if o1.Balance.String() != o2.Balance.String() { - fmt.Fprintf(output, "leafAcc o1[%d].Balance = %v; o2[%d].Balance = %v\n", i, o1.Balance.String(), i, o2.Balance.String()) - } - if o1.HasCode != o2.HasCode { - fmt.Fprintf(output, "leafAcc o1[%d].HasCode = %v; o2[%d].HasCode = %v\n", i, o1.HasCode, i, o2.HasCode) - } - if o1.HasStorage != o2.HasStorage { - fmt.Fprintf(output, "leafAcc o1[%d].HasStorage = %v; o2[%d].HasStorage = %v\n", i, o1.HasStorage, i, o2.HasStorage) + fmt.Fprintf(output, "OperatorLeafAccount: o1[%d] = %T; o2[%d] = %T\n", i, o1, i, op2) + } else { + if !bytes.Equal(o1.Key, o2.Key) { + fmt.Fprintf(output, "OperatorLeafAccount: o1[%d].Key = %x; o2[%d].Key = %x\n", i, o1.Key, i, o2.Key) + } + if o1.Nonce != o2.Nonce { + fmt.Fprintf(output, "OperatorLeafAccount: o1[%d].Nonce = %v; o2[%d].Nonce = %v\n", i, o1.Nonce, i, o2.Nonce) + } + if o1.Balance.String() != o2.Balance.String() { + fmt.Fprintf(output, "OperatorLeafAccount: o1[%d].Balance = %v; o2[%d].Balance = %v\n", i, o1.Balance.String(), i, o2.Balance.String()) + } + if o1.HasCode != o2.HasCode { + fmt.Fprintf(output, "OperatorLeafAccount: o1[%d].HasCode = %v; o2[%d].HasCode = %v\n", i, o1.HasCode, i, o2.HasCode) + } + if o1.HasStorage != o2.HasStorage { + fmt.Fprintf(output, "OperatorLeafAccount: o1[%d].HasStorage = %v; o2[%d].HasStorage = %v\n", i, o1.HasStorage, i, o2.HasStorage) + } } case *OperatorLeafValue: - o2, ok := w2.Operators[i].(*OperatorLeafValue) + o2, ok := op2.(*OperatorLeafValue) if !ok { - fmt.Fprintf(output, "o1[%d] = %T %+v; o2[%d] = %T %+v\n", i, o1, o1, i, o2, o2) + fmt.Fprintf(output, "OperatorLeafValue: o1[%d] = %T; o2[%d] = %T\n", i, o1, i, op2) + } else { + if !bytes.Equal(o1.Key, o2.Key) { + fmt.Fprintf(output, "OperatorLeafValue: o1[%d].Key = %x; o2[%d].Key = %x\n", i, o1.Key, i, o2.Key) + } + if !bytes.Equal(o1.Value, o2.Value) { + fmt.Fprintf(output, "OperatorLeafValue: o1[%d].Value = %x; o2[%d].Value = %x\n", i, o1.Value, i, o2.Value) + } } - if !bytes.Equal(o1.Key, o2.Key) { - fmt.Fprintf(output, "leafVal o1[%d].Key = %x; o2[%d].Key = %x\n", i, o1.Key, i, o2.Key) - } - if !bytes.Equal(o1.Value, o2.Value) { - fmt.Fprintf(output, "leafVal o1[%d].Value = %x; o2[%d].Value = %x\n", i, o1.Value, i, o2.Value) + case *OperatorSMTLeafValue: + o2, ok := op2.(*OperatorSMTLeafValue) + if !ok { + fmt.Fprintf(output, "OperatorSMTLeafValue: o1[%d] = %T; o2[%d] = %T\n", i, o1, i, op2) + } else { + if !bytes.Equal(o1.Address, o2.Address) { + fmt.Fprintf(output, "OperatorSMTLeafValue: o1[%d].Address = %x; o2[%d].Address = %x\n", i, o1.Address, i, o2.Address) + } + if !bytes.Equal(o1.StorageKey, o2.StorageKey) { + fmt.Fprintf(output, "OperatorSMTLeafValue: o1[%d].StorageKey = %x; o2[%d].StorageKey = %x\n", i, o1.StorageKey, i, o2.StorageKey) + } + if !bytes.Equal(o1.Value, o2.Value) { + fmt.Fprintf(output, "OperatorSMTLeafValue: o1[%d].Value = %x; o2[%d].Value = %x\n", i, o1.Value, i, o2.Value) + } + if o1.NodeType != o2.NodeType { + fmt.Fprintf(output, "OperatorSMTLeafValue: o1[%d].NodeType = %d; o2[%d].NodeType = %d\n", i, o1.NodeType, i, o2.NodeType) + } } + default: - o2 := w2.Operators[i] - fmt.Fprintf(output, "unexpected o1[%d] = %T %+v; o2[%d] = %T %+v\n", i, o1, o1, i, o2, o2) + fmt.Fprintf(output, "unexpected operator: o1[%d] = %T; o2[%d] = %T\n", i, o1, i, op2) } } } diff --git a/zk/datastream/mock_services/data_stream_server_mock.go b/zk/datastream/mocks/data_stream_server_mock.go similarity index 100% rename from zk/datastream/mock_services/data_stream_server_mock.go rename to zk/datastream/mocks/data_stream_server_mock.go diff --git a/zk/datastream/mock_services/stream_server_mock.go b/zk/datastream/mocks/stream_server_mock.go similarity index 100% rename from zk/datastream/mock_services/stream_server_mock.go rename to zk/datastream/mocks/stream_server_mock.go diff --git a/zk/debug_tools/test-contracts/contracts/GasBurner.sol b/zk/debug_tools/test-contracts/contracts/GasBurner.sol new file mode 100644 index 00000000000..bdc7a8d8db8 --- /dev/null +++ b/zk/debug_tools/test-contracts/contracts/GasBurner.sol @@ -0,0 +1,12 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.15; + +contract GasBurner { + constructor() { + //dynamic array + uint[] memory a = new uint[](12000); + for (uint i = 0; i < 2000; i++) { + a[i%10000] = i; + } + } +} \ No newline at end of file diff --git a/zk/debug_tools/test-contracts/package.json b/zk/debug_tools/test-contracts/package.json index 7022ac7abf5..3164dd36bc4 100644 --- a/zk/debug_tools/test-contracts/package.json +++ b/zk/debug_tools/test-contracts/package.json @@ -21,7 +21,8 @@ "chainCall:local": "npx hardhat compile && npx hardhat run scripts/chain-call.js --network local", "chainCall:sepolia": "npx hardhat compile && npx hardhat run scripts/chain-call.js --network sepolia", "create:local": "npx hardhat compile && npx hardhat run scripts/create.js --network local", - "keccak:local": "npx hardhat compile && npx hardhat run scripts/keccak-loop.js --network local" + "keccak:local": "npx hardhat compile && npx hardhat run scripts/keccak-loop.js --network local", + "gasBurner:local": "npx hardhat compile && npx hardhat run scripts/gas-burner.js --network local" }, "keywords": [], "author": "", diff --git a/zk/debug_tools/test-contracts/scripts/gas-burner.js b/zk/debug_tools/test-contracts/scripts/gas-burner.js new file mode 100644 index 00000000000..e3564d84582 --- /dev/null +++ b/zk/debug_tools/test-contracts/scripts/gas-burner.js @@ -0,0 +1,26 @@ +async function main() { +try { + // Get the ContractFactory of your BigLoopContract + const GasBurnerContract = await hre.ethers.getContractFactory("GasBurner"); + + // Deploy the contract + const contract = await GasBurnerContract.deploy(); + // Wait for the deployment transaction to be mined + await contract.waitForDeployment(); + + console.log(`GasBurner deployed to: ${await contract.getAddress()}`); + + // const result = await contract.bigLoop(10000); + // console.log(result); + } catch (error) { + console.error(error); + process.exit(1); + } +} + +main() + .then(() => process.exit(0)) + .catch(error => { + console.error(error); + process.exit(1); + }); \ No newline at end of file diff --git a/zk/hermez_db/db.go b/zk/hermez_db/db.go index 71b633ecb38..f28a1333e6f 100644 --- a/zk/hermez_db/db.go +++ b/zk/hermez_db/db.go @@ -51,7 +51,8 @@ const FORK_HISTORY = "fork_history" // index const JUST_UNWOUND = "just_unwound" // batch number -> true const PLAIN_STATE_VERSION = "plain_state_version" // batch number -> true const ERIGON_VERSIONS = "erigon_versions" // erigon version -> timestamp of startup -const BATCH_ENDS = "batch_ends" // +const BATCH_ENDS = "batch_ends" // batch number -> true +const WITNESS_CACHE = "witness_cache" // block number -> witness for 1 block var HermezDbTables = []string{ L1VERIFICATIONS, @@ -89,6 +90,7 @@ var HermezDbTables = []string{ ERIGON_VERSIONS, INNER_TX, BATCH_ENDS, + WITNESS_CACHE, } type HermezDb struct { @@ -1888,3 +1890,20 @@ func (db *HermezDbReader) getForkIntervals(forkIdFilter *uint64) ([]types.ForkIn return forkIntervals, nil } + +func (db *HermezDb) WriteWitnessCache(blockNo uint64, witnessBytes []byte) error { + key := Uint64ToBytes(blockNo) + return db.tx.Put(WITNESS_CACHE, key, witnessBytes) +} + +func (db *HermezDbReader) GetWitnessCache(blockNo uint64) ([]byte, error) { + v, err := db.tx.GetOne(WITNESS_CACHE, Uint64ToBytes(blockNo)) + if err != nil { + return nil, err + } + return v, nil +} + +func (db *HermezDb) DeleteWitnessCaches(from, to uint64) error { + return db.deleteFromBucketWithUintKeysRange(WITNESS_CACHE, from, to) +} diff --git a/zk/l1_data/l1_decoder.go b/zk/l1_data/l1_decoder.go index 4427d9760fa..003e9d0ec5d 100644 --- a/zk/l1_data/l1_decoder.go +++ b/zk/l1_data/l1_decoder.go @@ -14,7 +14,6 @@ import ( "github.com/ledgerwatch/erigon/crypto" "github.com/ledgerwatch/erigon/zk/contracts" "github.com/ledgerwatch/erigon/zk/da" - "github.com/ledgerwatch/erigon/zk/hermez_db" zktx "github.com/ledgerwatch/erigon/zk/tx" ) @@ -195,7 +194,12 @@ type DecodedL1Data struct { LimitTimestamp uint64 } -func BreakDownL1DataByBatch(batchNo uint64, forkId uint64, reader *hermez_db.HermezDbReader) (*DecodedL1Data, error) { +type l1DecoderHermezReader interface { + GetL1BatchData(batchNo uint64) ([]byte, error) + GetLastL1BatchData() (uint64, error) +} + +func BreakDownL1DataByBatch(batchNo uint64, forkId uint64, reader l1DecoderHermezReader) (*DecodedL1Data, error) { decoded := &DecodedL1Data{} // we expect that the batch we're going to load in next should be in the db already because of the l1 block sync // stage, if it is not there we need to panic as we're in a bad state diff --git a/zk/l1infotree/updater.go b/zk/l1infotree/updater.go index 28a8e8176ff..d1b5ac4362c 100644 --- a/zk/l1infotree/updater.go +++ b/zk/l1infotree/updater.go @@ -140,7 +140,7 @@ LOOP: tree, err := InitialiseL1InfoTree(hermezDb) if err != nil { - return nil, err + return nil, fmt.Errorf("InitialiseL1InfoTree: %w", err) } // process the logs in chunks @@ -153,7 +153,7 @@ LOOP: headersMap, err := u.syncer.L1QueryHeaders(chunk) if err != nil { - return nil, err + return nil, fmt.Errorf("L1QueryHeaders: %w", err) } for _, l := range chunk { @@ -163,13 +163,13 @@ LOOP: if header == nil { header, err = u.syncer.GetHeader(l.BlockNumber) if err != nil { - return nil, err + return nil, fmt.Errorf("GetHeader: %w", err) } } tmpUpdate, err := createL1InfoTreeUpdate(l, header) if err != nil { - return nil, err + return nil, fmt.Errorf("createL1InfoTreeUpdate: %w", err) } leafHash := HashLeafData(tmpUpdate.GER, tmpUpdate.ParentHash, tmpUpdate.Timestamp) @@ -185,7 +185,7 @@ LOOP: newRoot, err := tree.AddLeaf(uint32(u.latestUpdate.Index), leafHash) if err != nil { - return nil, err + return nil, fmt.Errorf("tree.AddLeaf: %w", err) } log.Debug("New L1 Index", "index", u.latestUpdate.Index, @@ -197,13 +197,13 @@ LOOP: ) if err = handleL1InfoTreeUpdate(hermezDb, u.latestUpdate); err != nil { - return nil, err + return nil, fmt.Errorf("handleL1InfoTreeUpdate: %w", err) } if err = hermezDb.WriteL1InfoTreeLeaf(u.latestUpdate.Index, leafHash); err != nil { - return nil, err + return nil, fmt.Errorf("WriteL1InfoTreeLeaf: %w", err) } if err = hermezDb.WriteL1InfoTreeRoot(common.BytesToHash(newRoot[:]), u.latestUpdate.Index); err != nil { - return nil, err + return nil, fmt.Errorf("WriteL1InfoTreeRoot: %w", err) } processed++ @@ -218,7 +218,7 @@ LOOP: u.progress = allLogs[len(allLogs)-1].BlockNumber + 1 } if err = stages.SaveStageProgress(tx, stages.L1InfoTree, u.progress); err != nil { - return nil, err + return nil, fmt.Errorf("SaveStageProgress: %w", err) } return allLogs, nil @@ -242,7 +242,7 @@ func chunkLogs(slice []types.Log, chunkSize int) [][]types.Log { func InitialiseL1InfoTree(hermezDb *hermez_db.HermezDb) (*L1InfoTree, error) { leaves, err := hermezDb.GetAllL1InfoTreeLeaves() if err != nil { - return nil, err + return nil, fmt.Errorf("GetAllL1InfoTreeLeaves: %w", err) } allLeaves := make([][32]byte, len(leaves)) @@ -252,7 +252,7 @@ func InitialiseL1InfoTree(hermezDb *hermez_db.HermezDb) (*L1InfoTree, error) { tree, err := NewL1InfoTree(32, allLeaves) if err != nil { - return nil, err + return nil, fmt.Errorf("NewL1InfoTree: %w", err) } return tree, nil @@ -289,10 +289,10 @@ func handleL1InfoTreeUpdate( ) error { var err error if err = hermezDb.WriteL1InfoTreeUpdate(update); err != nil { - return err + return fmt.Errorf("WriteL1InfoTreeUpdate: %w", err) } if err = hermezDb.WriteL1InfoTreeUpdateToGer(update); err != nil { - return err + return fmt.Errorf("WriteL1InfoTreeUpdateToGer: %w", err) } return nil } diff --git a/zk/smt/changes_getter.go b/zk/smt/changes_getter.go new file mode 100644 index 00000000000..0e89700e14a --- /dev/null +++ b/zk/smt/changes_getter.go @@ -0,0 +1,200 @@ +package smt + +import ( + "errors" + "fmt" + + "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon-lib/common/length" + "github.com/ledgerwatch/erigon-lib/kv" + + "github.com/holiman/uint256" + "github.com/ledgerwatch/erigon-lib/kv/dbutils" + "github.com/ledgerwatch/erigon/core/types/accounts" + + "github.com/ledgerwatch/erigon/core/state" + "github.com/ledgerwatch/erigon/core/systemcontracts" + "github.com/status-im/keycard-go/hexutils" +) + +var ( + ErrAlreadyOpened = errors.New("already opened") + ErrNotOpened = errors.New("not opened") +) + +type changesGetter struct { + tx kv.Tx + + ac kv.CursorDupSort + sc kv.CursorDupSort + psr *state.PlainState + currentPsr *state.PlainStateReader + + accChanges map[common.Address]*accounts.Account + codeChanges map[common.Address]string + storageChanges map[common.Address]map[string]string + + opened bool +} + +func NewChangesGetter(tx kv.Tx) *changesGetter { + return &changesGetter{ + tx: tx, + accChanges: make(map[common.Address]*accounts.Account), + codeChanges: make(map[common.Address]string), + storageChanges: make(map[common.Address]map[string]string), + } +} +func (cg *changesGetter) addDeletedAcc(addr common.Address) { + deletedAcc := new(accounts.Account) + deletedAcc.Balance = *uint256.NewInt(0) + deletedAcc.Nonce = 0 + cg.accChanges[addr] = deletedAcc +} + +func (cg *changesGetter) openChangesGetter(from uint64) error { + if cg.opened { + return ErrAlreadyOpened + } + + ac, err := cg.tx.CursorDupSort(kv.AccountChangeSet) + if err != nil { + return fmt.Errorf("CursorDupSort: %w", err) + } + + sc, err := cg.tx.CursorDupSort(kv.StorageChangeSet) + if err != nil { + return fmt.Errorf("CursorDupSort: %w", err) + } + + cg.ac = ac + cg.sc = sc + cg.psr = state.NewPlainState(cg.tx, from, systemcontracts.SystemContractCodeLookup["Hermez"]) + cg.currentPsr = state.NewPlainStateReader(cg.tx) + + cg.opened = true + + return nil +} + +func (cg *changesGetter) closeChangesGetter() { + if cg.ac != nil { + cg.ac.Close() + } + + if cg.sc != nil { + cg.sc.Close() + } + + if cg.psr != nil { + cg.psr.Close() + } +} + +func (cg *changesGetter) getChangesForBlock(blockNum uint64) error { + if !cg.opened { + return ErrNotOpened + } + + cg.psr.SetBlockNr(blockNum) + dupSortKey := dbutils.EncodeBlockNumber(blockNum) + + // collect changes to accounts and code + for _, v, err2 := cg.ac.SeekExact(dupSortKey); err2 == nil && v != nil; _, v, err2 = cg.ac.NextDup() { + if err := cg.setAccountChangesFromV(v); err != nil { + return fmt.Errorf("failed to get account changes: %w", err) + } + } + + if err := cg.tx.ForPrefix(kv.StorageChangeSet, dupSortKey, cg.setStorageChangesFromKv); err != nil { + return fmt.Errorf("failed to get storage changes: %w", err) + } + + return nil +} + +func (cg *changesGetter) setAccountChangesFromV(v []byte) error { + addr := common.BytesToAddress(v[:length.Addr]) + + // if the account was created in this changeset we should delete it + if len(v[length.Addr:]) == 0 { + cg.codeChanges[addr] = "" + cg.addDeletedAcc(addr) + return nil + } + + oldAcc, err := cg.psr.ReadAccountData(addr) + if err != nil { + return fmt.Errorf("ReadAccountData: %w", err) + } + + // currAcc at block we're unwinding from + currAcc, err := cg.currentPsr.ReadAccountData(addr) + if err != nil { + return fmt.Errorf("ReadAccountData: %w", err) + } + + if oldAcc.Incarnation > 0 { + if len(v) == 0 { // self-destructed + cg.addDeletedAcc(addr) + } else { + if currAcc.Incarnation > oldAcc.Incarnation { + cg.addDeletedAcc(addr) + } + } + } + + // store the account + cg.accChanges[addr] = oldAcc + + if oldAcc.CodeHash != currAcc.CodeHash { + hexcc, err := cg.getCodehashChanges(addr, oldAcc) + if err != nil { + return fmt.Errorf("getCodehashChanges: %w", err) + } + cg.codeChanges[addr] = hexcc + } + + return nil +} + +func (cg *changesGetter) getCodehashChanges(addr common.Address, oldAcc *accounts.Account) (string, error) { + cc, err := cg.currentPsr.ReadAccountCode(addr, oldAcc.Incarnation, oldAcc.CodeHash) + if err != nil { + return "", fmt.Errorf("ReadAccountCode: %w", err) + } + + ach := hexutils.BytesToHex(cc) + hexcc := "" + if len(ach) > 0 { + hexcc = "0x" + ach + } + + return hexcc, nil +} + +func (cg *changesGetter) setStorageChangesFromKv(sk, sv []byte) error { + changesetKey := sk[length.BlockNum:] + address, _ := dbutils.PlainParseStoragePrefix(changesetKey) + + sstorageKey := sv[:length.Hash] + stk := common.BytesToHash(sstorageKey) + + value := []byte{0} + if len(sv[length.Hash:]) != 0 { + value = sv[length.Hash:] + } + + stkk := fmt.Sprintf("0x%032x", stk) + v := fmt.Sprintf("0x%032x", common.BytesToHash(value)) + + m := make(map[string]string) + m[stkk] = v + + if cg.storageChanges[address] == nil { + cg.storageChanges[address] = make(map[string]string) + } + cg.storageChanges[address][stkk] = v + + return nil +} diff --git a/zk/smt/unwind_smt.go b/zk/smt/unwind_smt.go new file mode 100644 index 00000000000..e02203ce115 --- /dev/null +++ b/zk/smt/unwind_smt.go @@ -0,0 +1,91 @@ +package smt + +import ( + "context" + "fmt" + "math" + + "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon-lib/kv" + db2 "github.com/ledgerwatch/erigon/smt/pkg/db" + + "github.com/ledgerwatch/erigon-lib/kv/membatchwithdb" + + "github.com/ledgerwatch/erigon/smt/pkg/smt" + "github.com/ledgerwatch/erigon/turbo/trie" + "github.com/ledgerwatch/erigon/zk" + "github.com/ledgerwatch/erigon/zkevm/log" +) + +func UnwindZkSMT(ctx context.Context, logPrefix string, from, to uint64, tx kv.RwTx, checkRoot bool, expectedRootHash *common.Hash, quiet bool) (common.Hash, error) { + if !quiet { + log.Info(fmt.Sprintf("[%s] Unwind trie hashes started", logPrefix)) + defer log.Info(fmt.Sprintf("[%s] Unwind ended", logPrefix)) + } + + eridb := db2.NewEriDb(tx) + eridb.RollbackBatch() + + dbSmt := smt.NewSMT(eridb, false) + + if !quiet { + log.Info(fmt.Sprintf("[%s]", logPrefix), "last root", common.BigToHash(dbSmt.LastRoot())) + } + + // only open the batch if tx is not already one + if _, ok := tx.(*membatchwithdb.MemoryMutation); !ok { + quit := make(chan struct{}) + eridb.OpenBatch(quit) + } + + changesGetter := NewChangesGetter(tx) + if err := changesGetter.openChangesGetter(from); err != nil { + return trie.EmptyRoot, fmt.Errorf("OpenChangesGetter: %w", err) + } + defer changesGetter.closeChangesGetter() + + total := uint64(math.Abs(float64(from) - float64(to) + 1)) + progressChan, stopPrinter := zk.ProgressPrinter(fmt.Sprintf("[%s] Progress unwinding", logPrefix), total, quiet) + defer stopPrinter() + + // walk backwards through the blocks, applying state changes, and deletes + // PlainState contains data AT the block + // History tables contain data BEFORE the block - so need a +1 offset + for i := from; i >= to+1; i-- { + select { + case <-ctx.Done(): + return trie.EmptyRoot, fmt.Errorf("context done") + default: + } + + if err := changesGetter.getChangesForBlock(i); err != nil { + return trie.EmptyRoot, fmt.Errorf("getChangesForBlock: %w", err) + } + + progressChan <- 1 + } + + stopPrinter() + + if _, _, err := dbSmt.SetStorage(ctx, logPrefix, changesGetter.accChanges, changesGetter.codeChanges, changesGetter.storageChanges); err != nil { + return trie.EmptyRoot, err + } + + lr := dbSmt.LastRoot() + + hash := common.BigToHash(lr) + if checkRoot && hash != *expectedRootHash { + log.Error("failed to verify hash") + return trie.EmptyRoot, fmt.Errorf("wrong trie root: %x, expected (from header): %x", hash, expectedRootHash) + } + + if !quiet { + log.Info(fmt.Sprintf("[%s] Trie root matches", logPrefix), "hash", hash.Hex()) + } + + if err := eridb.CommitBatch(); err != nil { + return trie.EmptyRoot, err + } + + return hash, nil +} diff --git a/zk/stages/stage_batches.go b/zk/stages/stage_batches.go index 00d99cf48b9..df96aa808b7 100644 --- a/zk/stages/stage_batches.go +++ b/zk/stages/stage_batches.go @@ -5,29 +5,29 @@ import ( "errors" "fmt" "math/big" + "os" "sync/atomic" + "syscall" "time" "github.com/ledgerwatch/erigon-lib/chain" "github.com/ledgerwatch/erigon-lib/common" - "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon/core/rawdb" + "github.com/ledgerwatch/erigon/core/state" ethTypes "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/eth/ethconfig" "github.com/ledgerwatch/erigon/eth/stagedsync" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/zk" + "github.com/ledgerwatch/erigon/zk/datastream/client" "github.com/ledgerwatch/erigon/zk/datastream/types" "github.com/ledgerwatch/erigon/zk/erigon_db" "github.com/ledgerwatch/erigon/zk/hermez_db" "github.com/ledgerwatch/erigon/zk/sequencer" - - "github.com/ledgerwatch/erigon/core/rawdb" - "github.com/ledgerwatch/erigon/core/state" - "github.com/ledgerwatch/erigon/eth/ethconfig" "github.com/ledgerwatch/log/v3" - "github.com/ledgerwatch/erigon/zk/datastream/client" ) const ( @@ -139,7 +139,7 @@ func SpawnStageBatches( var err error tx, err = cfg.db.BeginRw(ctx) if err != nil { - return fmt.Errorf("failed to open tx, %w", err) + return fmt.Errorf("cfg.db.BeginRw, %w", err) } defer tx.Rollback() } @@ -149,14 +149,23 @@ func SpawnStageBatches( stageProgressBlockNo, err := stages.GetStageProgress(tx, stages.Batches) if err != nil { - return fmt.Errorf("save stage progress error: %v", err) + return fmt.Errorf("GetStageProgress: %w", err) } //// BISECT //// - if cfg.zkCfg.DebugLimit > 0 && stageProgressBlockNo > cfg.zkCfg.DebugLimit { - log.Info(fmt.Sprintf("[%s] Debug limit reached", logPrefix), "stageProgressBlockNo", stageProgressBlockNo, "debugLimit", cfg.zkCfg.DebugLimit) - time.Sleep(2 * time.Second) - return nil + if cfg.zkCfg.DebugLimit > 0 { + finishProg, err := stages.GetStageProgress(tx, stages.Finish) + if err != nil { + } + if finishProg >= cfg.zkCfg.DebugLimit { + log.Info(fmt.Sprintf("[%s] Debug limit reached", logPrefix), "finishProg", finishProg, "debugLimit", cfg.zkCfg.DebugLimit) + syscall.Kill(os.Getpid(), syscall.SIGINT) + } + + if stageProgressBlockNo >= cfg.zkCfg.DebugLimit { + log.Info(fmt.Sprintf("[%s] Debug limit reached", logPrefix), "stageProgressBlockNo", stageProgressBlockNo, "debugLimit", cfg.zkCfg.DebugLimit) + return nil + } } // this limit is blocknumber not included, so up to limit-1 @@ -169,20 +178,20 @@ func SpawnStageBatches( // get batch for batches progress stageProgressBatchNo, err := hermezDb.GetBatchNoByL2Block(stageProgressBlockNo) if err != nil && !errors.Is(err, hermez_db.ErrorNotStored) { - return fmt.Errorf("get batch no by l2 block error: %v", err) + return fmt.Errorf("GetBatchNoByL2Block: %w", err) } startSyncTime := time.Now() latestForkId, err := stages.GetStageProgress(tx, stages.ForkId) if err != nil { - return err + return fmt.Errorf("GetStageProgress: %w", err) } dsQueryClient, stopDsClient, err := newStreamClient(ctx, cfg, latestForkId) if err != nil { log.Warn(fmt.Sprintf("[%s] %s", logPrefix, err)) - return err + return fmt.Errorf("newStreamClient: %w", err) } defer stopDsClient() @@ -243,12 +252,12 @@ func SpawnStageBatches( _, highestL1InfoTreeIndex, err := hermezDb.GetLatestBlockL1InfoTreeIndexProgress() if err != nil { - return fmt.Errorf("failed to get highest used l1 info index, %w", err) + return fmt.Errorf("GetLatestBlockL1InfoTreeIndexProgress: %w", err) } stageExecProgress, err := stages.GetStageProgress(tx, stages.Execution) if err != nil { - return fmt.Errorf("failed to get stage exec progress, %w", err) + return fmt.Errorf("GetStageProgress: %w", err) } // just exit the stage early if there is more execution work to do @@ -261,12 +270,12 @@ func SpawnStageBatches( lastProcessedBlockHash, err := eriDb.ReadCanonicalHash(stageProgressBlockNo) if err != nil { - return fmt.Errorf("failed to read canonical hash for block %d: %w", stageProgressBlockNo, err) + return fmt.Errorf("ReadCanonicalHash %d: %w", stageProgressBlockNo, err) } batchProcessor, err := NewBatchesProcessor(ctx, logPrefix, tx, hermezDb, eriDb, cfg.zkCfg.SyncLimit, cfg.zkCfg.DebugLimit, cfg.zkCfg.DebugStepAfter, cfg.zkCfg.DebugStep, stageProgressBlockNo, stageProgressBatchNo, lastProcessedBlockHash, dsQueryClient, progressChan, cfg.chainConfig, cfg.miningConfig, unwindFn) if err != nil { - return err + return fmt.Errorf("NewBatchesProcessor: %w", err) } // start routine to download blocks and push them in a channel @@ -291,12 +300,17 @@ func SpawnStageBatches( log.Warn("Error in datastream client, stopping consumption") endLoop = true case entry := <-*entryChan: + // DEBUG LIMIT - don't write more than we need to + if cfg.zkCfg.DebugLimit > 0 && batchProcessor.LastBlockHeight() >= cfg.zkCfg.DebugLimit { + endLoop = true + break + } if endLoop, err = batchProcessor.ProcessEntry(entry); err != nil { // if we triggered an unwind somewhere we need to return from the stage if err == ErrorTriggeredUnwind { return nil } - return err + return fmt.Errorf("ProcessEntry: %w", err) } dsClientProgress.Store(batchProcessor.LastBlockHeight()) case <-ctx.Done(): @@ -315,10 +329,10 @@ func SpawnStageBatches( // commit progress from time to time if batchProcessor.TotalBlocksWritten() != prevAmountBlocksWritten && batchProcessor.TotalBlocksWritten()%STAGE_PROGRESS_SAVE == 0 { if err = saveStageProgress(tx, logPrefix, batchProcessor.HighestHashableL2BlockNo(), batchProcessor.HighestSeenBatchNumber(), batchProcessor.LastBlockHeight(), batchProcessor.LastForkId()); err != nil { - return err + return fmt.Errorf("saveStageProgress: %w", err) } if err := hermezDb.WriteBlockL1InfoTreeIndexProgress(batchProcessor.LastBlockHeight(), highestL1InfoTreeIndex); err != nil { - return err + return fmt.Errorf("WriteBlockL1InfoTreeIndexProgress: %w", err) } if freshTx { @@ -345,10 +359,10 @@ func SpawnStageBatches( } if err = saveStageProgress(tx, logPrefix, batchProcessor.HighestHashableL2BlockNo(), batchProcessor.HighestSeenBatchNumber(), batchProcessor.LastBlockHeight(), batchProcessor.LastForkId()); err != nil { - return err + return fmt.Errorf("saveStageProgress: %w", err) } if err := hermezDb.WriteBlockL1InfoTreeIndexProgress(batchProcessor.LastBlockHeight(), highestL1InfoTreeIndex); err != nil { - return err + return fmt.Errorf("WriteBlockL1InfoTreeIndexProgress: %w", err) } // stop printing blocks written progress routine @@ -357,7 +371,7 @@ func SpawnStageBatches( if freshTx { if err := tx.Commit(); err != nil { - return fmt.Errorf("failed to commit tx, %w", err) + return fmt.Errorf("tx.Commit: %w", err) } } @@ -368,27 +382,27 @@ func saveStageProgress(tx kv.RwTx, logPrefix string, highestHashableL2BlockNo, h var err error // store the highest hashable block number if err := stages.SaveStageProgress(tx, stages.HighestHashableL2BlockNo, highestHashableL2BlockNo); err != nil { - return fmt.Errorf("save stage progress error: %v", err) + return fmt.Errorf("SaveStageProgress: %w", err) } if err = stages.SaveStageProgress(tx, stages.HighestSeenBatchNumber, highestSeenBatchNo); err != nil { - return fmt.Errorf("save stage progress error: %v", err) + return fmt.Errorf("SaveStageProgress: %w", err) } // store the highest seen forkid if err := stages.SaveStageProgress(tx, stages.ForkId, lastForkId); err != nil { - return fmt.Errorf("save stage progress error: %v", err) + return fmt.Errorf("SaveStageProgress: %w", err) } // save the latest verified batch number as well just in case this node is upgraded // to a sequencer in the future if err := stages.SaveStageProgress(tx, stages.SequenceExecutorVerify, highestSeenBatchNo); err != nil { - return fmt.Errorf("save stage progress error: %w", err) + return fmt.Errorf("SaveStageProgress: %w", err) } log.Info(fmt.Sprintf("[%s] Saving stage progress", logPrefix), "lastBlockHeight", lastBlockHeight) if err := stages.SaveStageProgress(tx, stages.Batches, lastBlockHeight); err != nil { - return fmt.Errorf("save stage progress error: %v", err) + return fmt.Errorf("SaveStageProgress: %w", err) } return nil @@ -399,9 +413,8 @@ func UnwindBatchesStage(u *stagedsync.UnwindState, tx kv.RwTx, cfg BatchesCfg, c useExternalTx := tx != nil if !useExternalTx { - tx, err = cfg.db.BeginRw(ctx) - if err != nil { - return err + if tx, err = cfg.db.BeginRw(ctx); err != nil { + return fmt.Errorf("cfg.db.BeginRw: %w", err) } defer tx.Rollback() } @@ -418,20 +431,20 @@ func UnwindBatchesStage(u *stagedsync.UnwindState, tx kv.RwTx, cfg BatchesCfg, c ////////////////////////////////// highestVerifiedBatch, err := stages.GetStageProgress(tx, stages.L1VerificationsBatchNo) if err != nil { - return errors.New("could not retrieve l1 verifications batch no progress") + return fmt.Errorf("GetStageProgress: %w", err) } fromBatchPrev, err := hermezDb.GetBatchNoByL2Block(fromBlock - 1) if err != nil && !errors.Is(err, hermez_db.ErrorNotStored) { - return fmt.Errorf("get batch no by l2 block error: %v", err) + return fmt.Errorf("GetBatchNoByL2Block: %w", err) } fromBatch, err := hermezDb.GetBatchNoByL2Block(fromBlock) if err != nil && !errors.Is(err, hermez_db.ErrorNotStored) { - return fmt.Errorf("get fromBatch no by l2 block error: %v", err) + return fmt.Errorf("GetBatchNoByL2Block: %w", err) } toBatch, err := hermezDb.GetBatchNoByL2Block(toBlock) if err != nil && !errors.Is(err, hermez_db.ErrorNotStored) { - return fmt.Errorf("get toBatch no by l2 block error: %v", err) + return fmt.Errorf("GetBatchNoByL2Block: %w", err) } // if previous block has different batch, delete the "fromBlock" one @@ -443,16 +456,16 @@ func UnwindBatchesStage(u *stagedsync.UnwindState, tx kv.RwTx, cfg BatchesCfg, c if fromBatch <= toBatch { if err := hermezDb.DeleteForkIds(fromBatch, toBatch); err != nil { - return fmt.Errorf("delete fork ids error: %v", err) + return fmt.Errorf("DeleteForkIds: %w", err) } if err := hermezDb.DeleteBatchGlobalExitRoots(fromBatch); err != nil { - return fmt.Errorf("delete batch global exit roots error: %v", err) + return fmt.Errorf("DeleteBatchGlobalExitRoots: %w", err) } } if highestVerifiedBatch >= fromBatch { if err := rawdb.DeleteForkchoiceFinalized(tx); err != nil { - return fmt.Errorf("delete forkchoice finalized error: %v", err) + return fmt.Errorf("DeleteForkchoiceFinalized: %w", err) } } ///////////////////////////////////////// @@ -462,19 +475,19 @@ func UnwindBatchesStage(u *stagedsync.UnwindState, tx kv.RwTx, cfg BatchesCfg, c // cannot unwind EffectiveGasPricePercentage here although it is written in stage batches, because we have already deleted the transactions if err := hermezDb.DeleteStateRoots(fromBlock, toBlock); err != nil { - return fmt.Errorf("delete state roots error: %v", err) + return fmt.Errorf("DeleteStateRoots: %w", err) } if err := hermezDb.DeleteIntermediateTxStateRoots(fromBlock, toBlock); err != nil { - return fmt.Errorf("delete intermediate tx state roots error: %v", err) + return fmt.Errorf("DeleteIntermediateTxStateRoots: %w", err) } if err = rawdb.TruncateBlocks(ctx, tx, fromBlock); err != nil { - return fmt.Errorf("delete blocks: %w", err) + return fmt.Errorf("TruncateBlocks: %w", err) } if err := hermezDb.DeleteBlockBatches(fromBlock, toBlock); err != nil { - return fmt.Errorf("delete block batches error: %v", err) + return fmt.Errorf("DeleteBlockBatches: %w", err) } if err := hermezDb.DeleteForkIdBlock(fromBlock, toBlock); err != nil { - return fmt.Errorf("delete fork id block error: %v", err) + return fmt.Errorf("DeleteForkIdBlock: %w", err) } ////////////////////////////////////////////////////// @@ -483,31 +496,31 @@ func UnwindBatchesStage(u *stagedsync.UnwindState, tx kv.RwTx, cfg BatchesCfg, c ////////////////////////////////////////////////////// gers, err := hermezDb.GetBlockGlobalExitRoots(fromBlock, toBlock) if err != nil { - return fmt.Errorf("get block global exit roots error: %v", err) + return fmt.Errorf("GetBlockGlobalExitRoots: %w", err) } if err := hermezDb.DeleteGlobalExitRoots(&gers); err != nil { - return fmt.Errorf("delete global exit roots error: %v", err) + return fmt.Errorf("DeleteGlobalExitRoots: %w", err) } if err = hermezDb.DeleteLatestUsedGers(fromBlock, toBlock); err != nil { - return fmt.Errorf("delete latest used gers error: %v", err) + return fmt.Errorf("DeleteLatestUsedGers: %w", err) } if err := hermezDb.DeleteBlockGlobalExitRoots(fromBlock, toBlock); err != nil { - return fmt.Errorf("delete block global exit roots error: %v", err) + return fmt.Errorf("DeleteBlockGlobalExitRoots: %w", err) } if err := hermezDb.DeleteBlockL1BlockHashes(fromBlock, toBlock); err != nil { - return fmt.Errorf("delete block l1 block hashes error: %v", err) + return fmt.Errorf("DeleteBlockL1BlockHashes: %w", err) } if err = hermezDb.DeleteReusedL1InfoTreeIndexes(fromBlock, toBlock); err != nil { - return fmt.Errorf("write reused l1 info tree index error: %w", err) + return fmt.Errorf("DeleteReusedL1InfoTreeIndexes: %w", err) } if err = hermezDb.DeleteBatchEnds(fromBlock, toBlock); err != nil { - return fmt.Errorf("delete batch ends error: %v", err) + return fmt.Errorf("DeleteBatchEnds: %w", err) } /////////////////////////////////////////////////////// @@ -518,7 +531,7 @@ func UnwindBatchesStage(u *stagedsync.UnwindState, tx kv.RwTx, cfg BatchesCfg, c stageprogress = fromBlock - 1 } if err := stages.SaveStageProgress(tx, stages.Batches, stageprogress); err != nil { - return fmt.Errorf("save stage progress error: %v", err) + return fmt.Errorf("SaveStageProgress: %w", err) } log.Info(fmt.Sprintf("[%s] Saving stage progress", logPrefix), "fromBlock", stageprogress) @@ -530,15 +543,15 @@ func UnwindBatchesStage(u *stagedsync.UnwindState, tx kv.RwTx, cfg BatchesCfg, c // this is the last block of the previous batch and the highest hashable block for verifications lastBatchHighestBlock, _, err := hermezDb.GetHighestBlockInBatch(fromBatchPrev - 1) if err != nil { - return fmt.Errorf("get batch highest block error: %w", err) + return fmt.Errorf("GetHighestBlockInBatch: %w", err) } if err := stages.SaveStageProgress(tx, stages.HighestHashableL2BlockNo, lastBatchHighestBlock); err != nil { - return fmt.Errorf("save stage progress error: %v", err) + return fmt.Errorf("SaveStageProgress: %w", err) } if err = stages.SaveStageProgress(tx, stages.SequenceExecutorVerify, fromBatchPrev); err != nil { - return fmt.Errorf("save stage progress error: %v", err) + return fmt.Errorf("SaveStageProgress: %w", err) } ///////////////////////////////////////////////////// @@ -550,10 +563,10 @@ func UnwindBatchesStage(u *stagedsync.UnwindState, tx kv.RwTx, cfg BatchesCfg, c ////////////////////////////////// forkId, err := hermezDb.GetForkId(fromBatchPrev) if err != nil { - return fmt.Errorf("get fork id error: %v", err) + return fmt.Errorf("GetForkId: %w", err) } if err := stages.SaveStageProgress(tx, stages.ForkId, forkId); err != nil { - return fmt.Errorf("save stage progress error: %v", err) + return fmt.Errorf("SaveStageProgress: %w", err) } ///////////////////////////////////////// // finish store the highest seen forkid// @@ -568,7 +581,7 @@ func UnwindBatchesStage(u *stagedsync.UnwindState, tx kv.RwTx, cfg BatchesCfg, c } if err := hermezDb.DeleteBlockL1InfoTreeIndexes(fromBlock, toBlock); err != nil { - return fmt.Errorf("delete block l1 block hashes error: %v", err) + return fmt.Errorf("DeleteBlockL1InfoTreeIndexes: %w", err) } //////////////////////////////////////////////// @@ -576,15 +589,15 @@ func UnwindBatchesStage(u *stagedsync.UnwindState, tx kv.RwTx, cfg BatchesCfg, c //////////////////////////////////////////////// if err = stages.SaveStageProgress(tx, stages.HighestSeenBatchNumber, fromBatchPrev); err != nil { - return fmt.Errorf("save stage progress error: %v", err) + return fmt.Errorf("SaveStageProgress: %w", err) } if err := u.Done(tx); err != nil { - return err + return fmt.Errorf("u.Done: %w", err) } if !useExternalTx { if err := tx.Commit(); err != nil { - return err + return fmt.Errorf("tx.Commit: %w", err) } } return nil @@ -596,7 +609,7 @@ func PruneBatchesStage(s *stagedsync.PruneState, tx kv.RwTx, cfg BatchesCfg, ctx if !useExternalTx { tx, err = cfg.db.BeginRw(ctx) if err != nil { - return err + return fmt.Errorf("cfg.db.BeginRw: %w", err) } defer tx.Rollback() } @@ -608,26 +621,32 @@ func PruneBatchesStage(s *stagedsync.PruneState, tx kv.RwTx, cfg BatchesCfg, ctx toBlock, err := stages.GetStageProgress(tx, stages.Batches) if err != nil { - return fmt.Errorf("get stage datastream progress error: %v", err) + return fmt.Errorf("GetStageProgress: %w", err) } if err = rawdb.TruncateBlocks(ctx, tx, 1); err != nil { - return fmt.Errorf("delete blocks: %w", err) + return fmt.Errorf("TruncateBlocks: %w", err) } - hermezDb.DeleteForkIds(0, toBlock) - hermezDb.DeleteBlockBatches(0, toBlock) - hermezDb.DeleteBlockGlobalExitRoots(0, toBlock) + if err := hermezDb.DeleteForkIds(0, toBlock); err != nil { + return fmt.Errorf("DeleteForkIds: %w", err) + } + if err := hermezDb.DeleteBlockBatches(0, toBlock); err != nil { + return fmt.Errorf("DeleteBlockBatches: %w", err) + } + if hermezDb.DeleteBlockGlobalExitRoots(0, toBlock); err != nil { + return fmt.Errorf("DeleteBlockGlobalExitRoots: %w", err) + } log.Info(fmt.Sprintf("[%s] Deleted headers, bodies, forkIds and blockBatches.", logPrefix)) log.Info(fmt.Sprintf("[%s] Saving stage progress", logPrefix), "stageProgress", 0) if err := stages.SaveStageProgress(tx, stages.Batches, 0); err != nil { - return fmt.Errorf("save stage progress error: %v", err) + return fmt.Errorf("SaveStageProgress: %v", err) } if !useExternalTx { if err := tx.Commit(); err != nil { - return err + return fmt.Errorf("tx.Commit: %w", err) } } return nil @@ -659,17 +678,17 @@ func rollback( }() ancestorBlockNum, ancestorBlockHash, err := findCommonAncestor(cfg, eriDb, hermezDb, l2BlockReaderRpc{}, latestDSBlockNum) if err != nil { - return 0, err + return 0, fmt.Errorf("findCommonAncestor: %w", err) } log.Debug(fmt.Sprintf("[%s] The common ancestor for datastream and db is block %d (%s)", logPrefix, ancestorBlockNum, ancestorBlockHash)) unwindBlockNum, unwindBlockHash, batchNum, err := getUnwindPoint(eriDb, hermezDb, ancestorBlockNum, ancestorBlockHash) if err != nil { - return 0, err + return 0, fmt.Errorf("getUnwindPoint: %w", err) } if err = stages.SaveStageProgress(tx, stages.HighestSeenBatchNumber, batchNum-1); err != nil { - return 0, err + return 0, fmt.Errorf("SaveStageProgress: %w", err) } log.Warn(fmt.Sprintf("[%s] Unwinding to block %d (%s)", logPrefix, unwindBlockNum, unwindBlockHash)) @@ -720,12 +739,12 @@ func findCommonAncestor( midBlockDbHash, err := db.ReadCanonicalHash(midBlockNum) if err != nil { - return 0, emptyHash, fmt.Errorf("ReadCanonicalHash: failed to get canonical hash for block %d: %w", midBlockNum, err) + return 0, emptyHash, fmt.Errorf("ReadCanonicalHash block %d: %w", midBlockNum, err) } dbBatchNum, err := hermezDb.GetBatchNoByL2Block(midBlockNum) if err != nil { - return 0, emptyHash, fmt.Errorf("GetBatchNoByL2Block: failed to get batch number for block %d: %w", midBlockNum, err) + return 0, emptyHash, fmt.Errorf("GetBatchNoByL2Block block %d: %w", midBlockNum, err) } if headerHash != (common.Hash{}) && @@ -751,7 +770,7 @@ func findCommonAncestor( func getUnwindPoint(eriDb erigon_db.ReadOnlyErigonDb, hermezDb state.ReadOnlyHermezDb, blockNum uint64, blockHash common.Hash) (uint64, common.Hash, uint64, error) { batchNum, err := hermezDb.GetBatchNoByL2Block(blockNum) if err != nil { - return 0, emptyHash, 0, err + return 0, emptyHash, 0, fmt.Errorf("GetBatchNoByL2Block: block %d (%s): %w", blockNum, blockHash, err) } if batchNum == 0 { @@ -761,12 +780,12 @@ func getUnwindPoint(eriDb erigon_db.ReadOnlyErigonDb, hermezDb state.ReadOnlyHer unwindBlockNum, _, err := hermezDb.GetHighestBlockInBatch(batchNum - 1) if err != nil { - return 0, emptyHash, 0, fmt.Errorf("GetHighestBlockInBatch: batch %d: %w", batchNum-1, err) + return 0, emptyHash, 0, fmt.Errorf("GetHighestBlockInBatch batch %d: %w", batchNum-1, err) } unwindBlockHash, err := eriDb.ReadCanonicalHash(unwindBlockNum) if err != nil { - return 0, emptyHash, 0, fmt.Errorf("ReadCanonicalHash: block %d: %w", unwindBlockNum, err) + return 0, emptyHash, 0, fmt.Errorf("ReadCanonicalHash block %d: %w", unwindBlockNum, err) } return unwindBlockNum, unwindBlockHash, batchNum, nil diff --git a/zk/stages/stage_data_stream_catch_up_test.go b/zk/stages/stage_data_stream_catch_up_test.go index 00b1fb880d6..2a87d820f2d 100644 --- a/zk/stages/stage_data_stream_catch_up_test.go +++ b/zk/stages/stage_data_stream_catch_up_test.go @@ -14,7 +14,7 @@ import ( "github.com/ledgerwatch/erigon/eth/stagedsync" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" "github.com/ledgerwatch/erigon/smt/pkg/db" - mocks "github.com/ledgerwatch/erigon/zk/datastream/mock_services" + "github.com/ledgerwatch/erigon/zk/datastream/mocks" "github.com/ledgerwatch/erigon/zk/hermez_db" "github.com/stretchr/testify/require" "go.uber.org/mock/gomock" diff --git a/zk/stages/stage_interhashes.go b/zk/stages/stage_interhashes.go index 381c8fdee08..c2f708f6967 100644 --- a/zk/stages/stage_interhashes.go +++ b/zk/stages/stage_interhashes.go @@ -3,9 +3,7 @@ package stages import ( "fmt" - "github.com/holiman/uint256" "github.com/ledgerwatch/erigon-lib/common" - libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/common/length" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/state" @@ -25,10 +23,7 @@ import ( "os" - "math" - "github.com/ledgerwatch/erigon-lib/kv/dbutils" - "github.com/ledgerwatch/erigon-lib/kv/membatchwithdb" "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/systemcontracts" "github.com/ledgerwatch/erigon/core/types/accounts" @@ -39,6 +34,7 @@ import ( "github.com/ledgerwatch/erigon/turbo/stages/headerdownload" "github.com/ledgerwatch/erigon/turbo/trie" "github.com/ledgerwatch/erigon/zk" + zkSmt "github.com/ledgerwatch/erigon/zk/smt" "github.com/status-im/keycard-go/hexutils" ) @@ -81,7 +77,7 @@ func StageZkInterHashesCfg( } } -func SpawnZkIntermediateHashesStage(s *stagedsync.StageState, u stagedsync.Unwinder, tx kv.RwTx, cfg ZkInterHashesCfg, ctx context.Context) (root libcommon.Hash, err error) { +func SpawnZkIntermediateHashesStage(s *stagedsync.StageState, u stagedsync.Unwinder, tx kv.RwTx, cfg ZkInterHashesCfg, ctx context.Context) (root common.Hash, err error) { logPrefix := s.LogPrefix() quit := ctx.Done() @@ -90,7 +86,7 @@ func SpawnZkIntermediateHashesStage(s *stagedsync.StageState, u stagedsync.Unwin useExternalTx := tx != nil if !useExternalTx { var err error - tx, err = cfg.db.BeginRw(context.Background()) + tx, err = cfg.db.BeginRw(ctx) if err != nil { return trie.EmptyRoot, err } @@ -105,12 +101,10 @@ func SpawnZkIntermediateHashesStage(s *stagedsync.StageState, u stagedsync.Unwin ///// DEBUG BISECT ///// defer func() { if cfg.zk.DebugLimit > 0 { + log.Info(fmt.Sprintf("[%s] Debug limits", logPrefix), "Limit", cfg.zk.DebugLimit, "TO", to, "Err is nil ?", err == nil) if err != nil { log.Error("Hashing Failed", "block", to, "err", err) os.Exit(1) - } else if to >= cfg.zk.DebugLimit { - tx.Commit() - os.Exit(0) } } }() @@ -197,7 +191,6 @@ func SpawnZkIntermediateHashesStage(s *stagedsync.StageState, u stagedsync.Unwin } func UnwindZkIntermediateHashesStage(u *stagedsync.UnwindState, s *stagedsync.StageState, tx kv.RwTx, cfg ZkInterHashesCfg, ctx context.Context, silent bool) (err error) { - quit := ctx.Done() useExternalTx := tx != nil if !useExternalTx { tx, err = cfg.db.BeginRw(ctx) @@ -221,12 +214,9 @@ func UnwindZkIntermediateHashesStage(u *stagedsync.UnwindState, s *stagedsync.St expectedRootHash = syncHeadHeader.Root } - root, err := unwindZkSMT(ctx, s.LogPrefix(), s.BlockNumber, u.UnwindPoint, tx, cfg.checkRoot, &expectedRootHash, silent, quit) - if err != nil { + if _, err = zkSmt.UnwindZkSMT(ctx, s.LogPrefix(), s.BlockNumber, u.UnwindPoint, tx, cfg.checkRoot, &expectedRootHash, silent); err != nil { return err } - _ = root - hermezDb := hermez_db.NewHermezDb(tx) if err := hermezDb.TruncateSmtDepths(u.UnwindPoint); err != nil { return err @@ -456,197 +446,6 @@ func zkIncrementIntermediateHashes(ctx context.Context, logPrefix string, s *sta return hash, nil } -func unwindZkSMT(ctx context.Context, logPrefix string, from, to uint64, db kv.RwTx, checkRoot bool, expectedRootHash *common.Hash, quiet bool, quit <-chan struct{}) (common.Hash, error) { - if !quiet { - log.Info(fmt.Sprintf("[%s] Unwind trie hashes started", logPrefix)) - defer log.Info(fmt.Sprintf("[%s] Unwind ended", logPrefix)) - } - - eridb := db2.NewEriDb(db) - dbSmt := smt.NewSMT(eridb, false) - - if !quiet { - log.Info(fmt.Sprintf("[%s]", logPrefix), "last root", common.BigToHash(dbSmt.LastRoot())) - } - - if quit == nil { - log.Warn("quit channel is nil, creating a new one") - quit = make(chan struct{}) - } - - // only open the batch if tx is not already one - if _, ok := db.(*membatchwithdb.MemoryMutation); !ok { - eridb.OpenBatch(quit) - } - - ac, err := db.CursorDupSort(kv.AccountChangeSet) - if err != nil { - return trie.EmptyRoot, err - } - defer ac.Close() - - sc, err := db.CursorDupSort(kv.StorageChangeSet) - if err != nil { - return trie.EmptyRoot, err - } - defer sc.Close() - - currentPsr := state2.NewPlainStateReader(db) - - total := uint64(math.Abs(float64(from) - float64(to) + 1)) - printerStopped := false - progressChan, stopPrinter := zk.ProgressPrinter(fmt.Sprintf("[%s] Progress unwinding", logPrefix), total, quiet) - defer func() { - if !printerStopped { - stopPrinter() - } - }() - - // walk backwards through the blocks, applying state changes, and deletes - // PlainState contains data AT the block - // History tables contain data BEFORE the block - so need a +1 offset - accChanges := make(map[common.Address]*accounts.Account) - codeChanges := make(map[common.Address]string) - storageChanges := make(map[common.Address]map[string]string) - - addDeletedAcc := func(addr common.Address) { - deletedAcc := new(accounts.Account) - deletedAcc.Balance = *uint256.NewInt(0) - deletedAcc.Nonce = 0 - accChanges[addr] = deletedAcc - } - - psr := state2.NewPlainState(db, from, systemcontracts.SystemContractCodeLookup["Hermez"]) - defer psr.Close() - - for i := from; i >= to+1; i-- { - select { - case <-ctx.Done(): - return trie.EmptyRoot, fmt.Errorf("[%s] Context done", logPrefix) - default: - } - - psr.SetBlockNr(i) - - dupSortKey := dbutils.EncodeBlockNumber(i) - - // collect changes to accounts and code - for _, v, err2 := ac.SeekExact(dupSortKey); err2 == nil && v != nil; _, v, err2 = ac.NextDup() { - - addr := common.BytesToAddress(v[:length.Addr]) - - // if the account was created in this changeset we should delete it - if len(v[length.Addr:]) == 0 { - codeChanges[addr] = "" - addDeletedAcc(addr) - continue - } - - oldAcc, err := psr.ReadAccountData(addr) - if err != nil { - return trie.EmptyRoot, err - } - - // currAcc at block we're unwinding from - currAcc, err := currentPsr.ReadAccountData(addr) - if err != nil { - return trie.EmptyRoot, err - } - - if oldAcc.Incarnation > 0 { - if len(v) == 0 { // self-destructed - addDeletedAcc(addr) - } else { - if currAcc.Incarnation > oldAcc.Incarnation { - addDeletedAcc(addr) - } - } - } - - // store the account - accChanges[addr] = oldAcc - - if oldAcc.CodeHash != currAcc.CodeHash { - cc, err := currentPsr.ReadAccountCode(addr, oldAcc.Incarnation, oldAcc.CodeHash) - if err != nil { - return trie.EmptyRoot, err - } - - ach := hexutils.BytesToHex(cc) - hexcc := "" - if len(ach) > 0 { - hexcc = "0x" + ach - } - codeChanges[addr] = hexcc - } - } - - err = db.ForPrefix(kv.StorageChangeSet, dupSortKey, func(sk, sv []byte) error { - changesetKey := sk[length.BlockNum:] - address, _ := dbutils.PlainParseStoragePrefix(changesetKey) - - sstorageKey := sv[:length.Hash] - stk := common.BytesToHash(sstorageKey) - - value := []byte{0} - if len(sv[length.Hash:]) != 0 { - value = sv[length.Hash:] - } - - stkk := fmt.Sprintf("0x%032x", stk) - v := fmt.Sprintf("0x%032x", common.BytesToHash(value)) - - m := make(map[string]string) - m[stkk] = v - - if storageChanges[address] == nil { - storageChanges[address] = make(map[string]string) - } - storageChanges[address][stkk] = v - return nil - }) - if err != nil { - return trie.EmptyRoot, err - } - - progressChan <- 1 - } - - stopPrinter() - printerStopped = true - - if _, _, err := dbSmt.SetStorage(ctx, logPrefix, accChanges, codeChanges, storageChanges); err != nil { - return trie.EmptyRoot, err - } - - if err := verifyLastHash(dbSmt, expectedRootHash, checkRoot, logPrefix, quiet); err != nil { - log.Error("failed to verify hash") - eridb.RollbackBatch() - return trie.EmptyRoot, err - } - - if err := eridb.CommitBatch(); err != nil { - return trie.EmptyRoot, err - } - - lr := dbSmt.LastRoot() - - hash := common.BigToHash(lr) - return hash, nil -} - -func verifyLastHash(dbSmt *smt.SMT, expectedRootHash *common.Hash, checkRoot bool, logPrefix string, quiet bool) error { - hash := common.BigToHash(dbSmt.LastRoot()) - - if checkRoot && hash != *expectedRootHash { - panic(fmt.Sprintf("[%s] Wrong trie root: %x, expected (from header): %x", logPrefix, hash, expectedRootHash)) - } - if !quiet { - log.Info(fmt.Sprintf("[%s] Trie root matches", logPrefix), "hash", hash.Hex()) - } - return nil -} - func processAccount(db smt.DB, a *accounts.Account, as map[string]string, inc uint64, psr *state2.PlainStateReader, addr common.Address, keys []utils.NodeKey) ([]utils.NodeKey, error) { // get the account balance and nonce keys, err := insertAccountStateToKV(db, keys, addr.String(), a.Balance.ToBig(), new(big.Int).SetUint64(a.Nonce)) diff --git a/zk/stages/stage_l1_info_tree.go b/zk/stages/stage_l1_info_tree.go index 7547d240230..252a29be3f1 100644 --- a/zk/stages/stage_l1_info_tree.go +++ b/zk/stages/stage_l1_info_tree.go @@ -42,18 +42,18 @@ func SpawnL1InfoTreeStage( var err error tx, err = cfg.db.BeginRw(ctx) if err != nil { - return err + return fmt.Errorf("cfg.db.BeginRw: %w", err) } defer tx.Rollback() } if err := cfg.updater.WarmUp(tx); err != nil { - return err + return fmt.Errorf("cfg.updater.WarmUp: %w", err) } allLogs, err := cfg.updater.CheckForInfoTreeUpdates(logPrefix, tx) if err != nil { - return err + return fmt.Errorf("CheckForInfoTreeUpdates: %w", err) } var latestIndex uint64 @@ -65,7 +65,7 @@ func SpawnL1InfoTreeStage( if freshTx { if funcErr = tx.Commit(); funcErr != nil { - return funcErr + return fmt.Errorf("tx.Commit: %w", funcErr) } } diff --git a/zk/stages/stage_l1_info_tree_test.go b/zk/stages/stage_l1_info_tree_test.go index c2e3a93d511..585a1ad299a 100644 --- a/zk/stages/stage_l1_info_tree_test.go +++ b/zk/stages/stage_l1_info_tree_test.go @@ -10,7 +10,6 @@ import ( ethereum "github.com/ledgerwatch/erigon" "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/kv/memdb" - "github.com/ledgerwatch/erigon/cmd/rpcdaemon/commands/mocks" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/eth/ethconfig" "github.com/ledgerwatch/erigon/eth/stagedsync" @@ -20,6 +19,7 @@ import ( "github.com/ledgerwatch/erigon/zk/hermez_db" "github.com/ledgerwatch/erigon/zk/l1infotree" "github.com/ledgerwatch/erigon/zk/syncer" + "github.com/ledgerwatch/erigon/zk/syncer/mocks" "github.com/ledgerwatch/log/v3" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" diff --git a/zk/stages/stage_l1_sequencer_sync_test.go b/zk/stages/stage_l1_sequencer_sync_test.go index 5dc1f836dbb..e69d5eedf13 100644 --- a/zk/stages/stage_l1_sequencer_sync_test.go +++ b/zk/stages/stage_l1_sequencer_sync_test.go @@ -9,7 +9,6 @@ import ( ethereum "github.com/ledgerwatch/erigon" "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/kv/memdb" - "github.com/ledgerwatch/erigon/cmd/rpcdaemon/commands/mocks" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/eth/ethconfig" "github.com/ledgerwatch/erigon/eth/stagedsync" @@ -19,6 +18,7 @@ import ( "github.com/ledgerwatch/erigon/zk/contracts" "github.com/ledgerwatch/erigon/zk/hermez_db" "github.com/ledgerwatch/erigon/zk/syncer" + "github.com/ledgerwatch/erigon/zk/syncer/mocks" "github.com/ledgerwatch/log/v3" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" diff --git a/zk/stages/stage_l1syncer.go b/zk/stages/stage_l1_syncer.go similarity index 87% rename from zk/stages/stage_l1syncer.go rename to zk/stages/stage_l1_syncer.go index 39da1fa22f3..40edbc1a082 100644 --- a/zk/stages/stage_l1syncer.go +++ b/zk/stages/stage_l1_syncer.go @@ -25,7 +25,6 @@ import ( ) type IL1Syncer interface { - // atomic IsSyncStarted() bool IsDownloading() bool @@ -46,8 +45,7 @@ type IL1Syncer interface { } var ( - ErrStateRootMismatch = errors.New("state root mismatch") - + ErrStateRootMismatch = errors.New("state root mismatch") lastCheckedL1BlockCounter = metrics.GetOrCreateGauge(`last_checked_l1_block`) ) @@ -95,7 +93,7 @@ func SpawnStageL1Syncer( var err error tx, err = cfg.db.BeginRw(ctx) if err != nil { - return fmt.Errorf("failed to open tx, %w", err) + return fmt.Errorf("cfg.db.BeginRw: %w", err) } defer tx.Rollback() } @@ -106,7 +104,7 @@ func SpawnStageL1Syncer( // get l1 block progress from this stage's progress l1BlockProgress, err := stages.GetStageProgress(tx, stages.L1Syncer) if err != nil { - return fmt.Errorf("failed to get l1 progress block, %w", err) + return fmt.Errorf("GetStageProgress, %w", err) } // start syncer if not started @@ -149,8 +147,7 @@ Loop: continue } if err := hermezDb.WriteSequence(info.L1BlockNo, info.BatchNo, info.L1TxHash, info.StateRoot, info.L1InfoRoot); err != nil { - funcErr = fmt.Errorf("failed to write batch info, %w", err) - return funcErr + return fmt.Errorf("WriteSequence: %w", err) } if info.L1BlockNo > highestWrittenL1BlockNo { highestWrittenL1BlockNo = info.L1BlockNo @@ -158,8 +155,7 @@ Loop: newSequencesCount++ case logRollbackBatches: if err := hermezDb.RollbackSequences(info.BatchNo); err != nil { - funcErr = fmt.Errorf("failed to write rollback sequence, %w", err) - return funcErr + return fmt.Errorf("RollbackSequences: %w", err) } if info.L1BlockNo > highestWrittenL1BlockNo { highestWrittenL1BlockNo = info.L1BlockNo @@ -175,8 +171,7 @@ Loop: highestVerification = info } if err := hermezDb.WriteVerification(info.L1BlockNo, info.BatchNo, info.L1TxHash, info.StateRoot); err != nil { - funcErr = fmt.Errorf("failed to write verification for block %d, %w", info.L1BlockNo, err) - return funcErr + return fmt.Errorf("WriteVerification for block %d: %w", info.L1BlockNo, funcErr) } if info.L1BlockNo > highestWrittenL1BlockNo { highestWrittenL1BlockNo = info.L1BlockNo @@ -206,19 +201,17 @@ Loop: log.Info(fmt.Sprintf("[%s] Saving L1 syncer progress", logPrefix), "latestCheckedBlock", latestCheckedBlock, "newVerificationsCount", newVerificationsCount, "newSequencesCount", newSequencesCount, "highestWrittenL1BlockNo", highestWrittenL1BlockNo) if err := stages.SaveStageProgress(tx, stages.L1Syncer, highestWrittenL1BlockNo); err != nil { - funcErr = fmt.Errorf("failed to save stage progress, %w", err) - return funcErr + return fmt.Errorf("SaveStageProgress: %w", err) } if highestVerification.BatchNo > 0 { log.Info(fmt.Sprintf("[%s]", logPrefix), "highestVerificationBatchNo", highestVerification.BatchNo) if err := stages.SaveStageProgress(tx, stages.L1VerificationsBatchNo, highestVerification.BatchNo); err != nil { - return fmt.Errorf("failed to save stage progress, %w", err) + return fmt.Errorf("SaveStageProgress: %w", err) } } // State Root Verifications Check - err = verifyAgainstLocalBlocks(tx, hermezDb, logPrefix) - if err != nil { + if err = verifyAgainstLocalBlocks(tx, hermezDb, logPrefix); err != nil { if errors.Is(err, ErrStateRootMismatch) { panic(err) } @@ -231,8 +224,7 @@ Loop: if internalTxOpened { log.Debug("l1 sync: first cycle, committing tx") if err := tx.Commit(); err != nil { - funcErr = fmt.Errorf("failed to commit tx, %w", err) - return funcErr + return fmt.Errorf("tx.Commit: %w", err) } } @@ -272,15 +264,9 @@ func parseLogType(l1RollupId uint64, log *ethTypes.Log) (l1BatchInfo types.L1Bat batchNum = new(big.Int).SetBytes(log.Topics[1].Bytes()).Uint64() stateRoot = common.BytesToHash(log.Data[:32]) case contracts.VerificationValidiumTopicEtrog: - bigRollupId := new(big.Int).SetUint64(l1RollupId) - isRollupIdMatching := log.Topics[1] == common.BigToHash(bigRollupId) - if isRollupIdMatching { - batchLogType = logVerifyEtrog - batchNum = new(big.Int).SetBytes(log.Topics[1].Bytes()).Uint64() - stateRoot = common.BytesToHash(log.Data[:32]) - } else { - batchLogType = logIncompatible - } + batchLogType = logVerifyEtrog + batchNum = new(big.Int).SetBytes(log.Topics[1].Bytes()).Uint64() + stateRoot = common.BytesToHash(log.Data[:32]) case contracts.VerificationTopicEtrog: bigRollupId := new(big.Int).SetUint64(l1RollupId) isRollupIdMatching := log.Topics[1] == common.BigToHash(bigRollupId) @@ -325,7 +311,7 @@ func verifyAgainstLocalBlocks(tx kv.RwTx, hermezDb *hermez_db.HermezDb, logPrefi // get the highest hashed block hashedBlockNo, err := stages.GetStageProgress(tx, stages.IntermediateHashes) if err != nil { - return fmt.Errorf("failed to get highest hashed block, %w", err) + return fmt.Errorf("GetStageProgress: %w", err) } // no need to check - interhashes has not yet run @@ -336,7 +322,7 @@ func verifyAgainstLocalBlocks(tx kv.RwTx, hermezDb *hermez_db.HermezDb, logPrefi // get the highest verified block verifiedBlockNo, err := hermezDb.GetHighestVerifiedBlockNo() if err != nil { - return fmt.Errorf("failed to get highest verified block no, %w", err) + return fmt.Errorf("GetHighestVerifiedBlockNo: %w", err) } // no verifications on l1 @@ -356,7 +342,7 @@ func verifyAgainstLocalBlocks(tx kv.RwTx, hermezDb *hermez_db.HermezDb, logPrefi // get the batch of the last hashed block hashedBatch, err := hermezDb.GetBatchNoByL2Block(hashedBlockNo) if err != nil && !errors.Is(err, hermez_db.ErrorNotStored) { - return err + return fmt.Errorf("GetBatchNoByL2Block: %w", err) } if hashedBatch == 0 { @@ -368,7 +354,7 @@ func verifyAgainstLocalBlocks(tx kv.RwTx, hermezDb *hermez_db.HermezDb, logPrefi // find the higher blocknum for previous batch blockNumbers, err := hermezDb.GetL2BlockNosByBatch(hashedBatch) if err != nil { - return err + return fmt.Errorf("GetL2BlockNosByBatch: %w", err) } if len(blockNumbers) == 0 { @@ -386,18 +372,17 @@ func verifyAgainstLocalBlocks(tx kv.RwTx, hermezDb *hermez_db.HermezDb, logPrefi // already checked highestChecked, err := stages.GetStageProgress(tx, stages.VerificationsStateRootCheck) if err != nil { - return fmt.Errorf("failed to get highest checked block, %w", err) + return fmt.Errorf("GetStageProgress: %w", err) } if highestChecked >= blockToCheck { return nil } if !sequencer.IsSequencer() { - err = blockComparison(tx, hermezDb, blockToCheck, logPrefix) - if err == nil { + if err = blockComparison(tx, hermezDb, blockToCheck, logPrefix); err == nil { log.Info(fmt.Sprintf("[%s] State root verified in block %d", logPrefix, blockToCheck)) if err := stages.SaveStageProgress(tx, stages.VerificationsStateRootCheck, verifiedBlockNo); err != nil { - return fmt.Errorf("failed to save stage progress, %w", err) + return fmt.Errorf("SaveStageProgress: %w", err) } } } @@ -408,12 +393,12 @@ func verifyAgainstLocalBlocks(tx kv.RwTx, hermezDb *hermez_db.HermezDb, logPrefi func blockComparison(tx kv.RwTx, hermezDb *hermez_db.HermezDb, blockNo uint64, logPrefix string) error { v, err := hermezDb.GetVerificationByL2BlockNo(blockNo) if err != nil { - return fmt.Errorf("failed to get verification by l2 block no, %w", err) + return fmt.Errorf("GetVerificationByL2BlockNo: %w", err) } block, err := rawdb.ReadBlockByNumber(tx, blockNo) if err != nil { - return fmt.Errorf("failed to read block by number, %w", err) + return fmt.Errorf("ReadBlockByNumber: %w", err) } if v == nil || block == nil { diff --git a/zk/stages/stage_l1_syncer_test.go b/zk/stages/stage_l1_syncer_test.go new file mode 100644 index 00000000000..dd7bb76e71f --- /dev/null +++ b/zk/stages/stage_l1_syncer_test.go @@ -0,0 +1,316 @@ +package stages + +import ( + "context" + "math/big" + "testing" + "time" + + ethereum "github.com/ledgerwatch/erigon" + "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon-lib/kv/memdb" + "github.com/ledgerwatch/erigon/core/rawdb" + "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/eth/ethconfig" + "github.com/ledgerwatch/erigon/eth/stagedsync" + "github.com/ledgerwatch/erigon/eth/stagedsync/stages" + "github.com/ledgerwatch/erigon/smt/pkg/db" + "github.com/ledgerwatch/erigon/zk/contracts" + "github.com/ledgerwatch/erigon/zk/hermez_db" + "github.com/ledgerwatch/erigon/zk/syncer" + "github.com/ledgerwatch/erigon/zk/syncer/mocks" + "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" +) + +func TestSpawnStageL1Syncer(t *testing.T) { + // Arrange + ctx, db1 := context.Background(), memdb.NewTestDB(t) + tx := memdb.BeginRw(t, db1) + err := hermez_db.CreateHermezBuckets(tx) + require.NoError(t, err) + err = db.CreateEriDbBuckets(tx) + require.NoError(t, err) + + l1FirstBlock := big.NewInt(20) + l2BlockNumber := uint64(10) + verifiedBatchNumber := uint64(2) + + hDB := hermez_db.NewHermezDb(tx) + err = hDB.WriteBlockBatch(0, 0) + require.NoError(t, err) + err = hDB.WriteBlockBatch(l2BlockNumber-1, verifiedBatchNumber-1) + require.NoError(t, err) + err = hDB.WriteBlockBatch(l2BlockNumber, verifiedBatchNumber) + require.NoError(t, err) + err = stages.SaveStageProgress(tx, stages.L1Syncer, 0) + require.NoError(t, err) + err = stages.SaveStageProgress(tx, stages.IntermediateHashes, l2BlockNumber-1) + require.NoError(t, err) + + err = hDB.WriteVerification(l1FirstBlock.Uint64(), verifiedBatchNumber-1, common.HexToHash("0x1"), common.HexToHash("0x99990")) + require.NoError(t, err) + err = hDB.WriteVerification(l1FirstBlock.Uint64(), verifiedBatchNumber, common.HexToHash("0x2"), common.HexToHash("0x99999")) + require.NoError(t, err) + + genesisHeader := &types.Header{ + Number: big.NewInt(0).SetUint64(l2BlockNumber - 1), + Time: 0, + Difficulty: big.NewInt(1), + GasLimit: 8000000, + GasUsed: 0, + ParentHash: common.HexToHash("0x1"), + TxHash: common.HexToHash("0x2"), + ReceiptHash: common.HexToHash("0x3"), + Root: common.HexToHash("0x99990"), + } + + txs := []types.Transaction{} + uncles := []*types.Header{} + receipts := []*types.Receipt{} + withdrawals := []*types.Withdrawal{} + + genesisBlock := types.NewBlock(genesisHeader, txs, uncles, receipts, withdrawals) + + err = rawdb.WriteBlock(tx, genesisBlock) + require.NoError(t, err) + err = rawdb.WriteCanonicalHash(tx, genesisBlock.Hash(), genesisBlock.NumberU64()) + require.NoError(t, err) + + s := &stagedsync.StageState{ID: stages.L1Syncer, BlockNumber: 0} + u := &stagedsync.Sync{} + + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + EthermanMock := mocks.NewMockIEtherman(mockCtrl) + + l1ContractAddresses := []common.Address{ + common.HexToAddress("0x1"), + common.HexToAddress("0x2"), + common.HexToAddress("0x3"), + } + l1ContractTopics := [][]common.Hash{ + []common.Hash{common.HexToHash("0x1")}, + []common.Hash{common.HexToHash("0x2")}, + []common.Hash{common.HexToHash("0x3")}, + } + + latestBlockParentHash := common.HexToHash("0x123456789") + latestBlockTime := uint64(time.Now().Unix()) + latestBlockNumber := big.NewInt(21) + latestBlockHeader := &types.Header{ParentHash: latestBlockParentHash, Number: latestBlockNumber, Time: latestBlockTime} + latestBlock := types.NewBlockWithHeader(latestBlockHeader) + + EthermanMock.EXPECT().BlockByNumber(gomock.Any(), nil).Return(latestBlock, nil).AnyTimes() + + filterQuery := ethereum.FilterQuery{ + FromBlock: l1FirstBlock, + ToBlock: latestBlockNumber, + Addresses: l1ContractAddresses, + Topics: l1ContractTopics, + } + + const rollupID = uint64(1) + + type testCase struct { + name string + getLog func(hDB *hermez_db.HermezDb) (types.Log, error) + assert func(t *testing.T, hDB *hermez_db.HermezDb) + } + + testCases := []testCase{ + { + name: "SequencedBatchTopicPreEtrog", + getLog: func(hDB *hermez_db.HermezDb) (types.Log, error) { + batchNum := uint64(1) + batchNumHash := common.BytesToHash(big.NewInt(0).SetUint64(batchNum).Bytes()) + txHash := common.HexToHash("0x1") + return types.Log{ + BlockNumber: latestBlockNumber.Uint64(), + Address: l1ContractAddresses[0], + Topics: []common.Hash{contracts.SequencedBatchTopicPreEtrog, batchNumHash}, + TxHash: txHash, + Data: []byte{}, + }, nil + }, + assert: func(t *testing.T, hDB *hermez_db.HermezDb) { + l1BatchInfo, err := hDB.GetSequenceByBatchNo(1) + require.NoError(t, err) + + require.Equal(t, l1BatchInfo.BatchNo, uint64(1)) + require.Equal(t, l1BatchInfo.L1BlockNo, latestBlockNumber.Uint64()) + require.Equal(t, l1BatchInfo.L1TxHash.String(), common.HexToHash("0x1").String()) + require.Equal(t, l1BatchInfo.StateRoot.String(), common.Hash{}.String()) + require.Equal(t, l1BatchInfo.L1InfoRoot.String(), common.Hash{}.String()) + }, + }, + { + name: "SequencedBatchTopicEtrog", + getLog: func(hDB *hermez_db.HermezDb) (types.Log, error) { + batchNum := uint64(2) + batchNumHash := common.BytesToHash(big.NewInt(0).SetUint64(batchNum).Bytes()) + txHash := common.HexToHash("0x2") + l1InfoRoot := common.HexToHash("0x3") + return types.Log{ + BlockNumber: latestBlockNumber.Uint64(), + Address: l1ContractAddresses[0], + Topics: []common.Hash{contracts.SequencedBatchTopicEtrog, batchNumHash}, + Data: l1InfoRoot.Bytes(), + TxHash: txHash, + }, nil + }, + assert: func(t *testing.T, hDB *hermez_db.HermezDb) { + l1BatchInfo, err := hDB.GetSequenceByBatchNo(2) + require.NoError(t, err) + + require.Equal(t, l1BatchInfo.BatchNo, uint64(2)) + require.Equal(t, l1BatchInfo.L1BlockNo, latestBlockNumber.Uint64()) + require.Equal(t, l1BatchInfo.L1TxHash.String(), common.HexToHash("0x2").String()) + require.Equal(t, l1BatchInfo.StateRoot.String(), common.Hash{}.String()) + require.Equal(t, l1BatchInfo.L1InfoRoot.String(), common.HexToHash("0x3").String()) + }, + }, + { + name: "VerificationTopicPreEtrog", + getLog: func(hDB *hermez_db.HermezDb) (types.Log, error) { + batchNum := uint64(3) + batchNumHash := common.BytesToHash(big.NewInt(0).SetUint64(batchNum).Bytes()) + txHash := common.HexToHash("0x4") + stateRoot := common.HexToHash("0x5") + return types.Log{ + BlockNumber: latestBlockNumber.Uint64(), + Address: l1ContractAddresses[0], + Topics: []common.Hash{contracts.VerificationTopicPreEtrog, batchNumHash}, + Data: stateRoot.Bytes(), + TxHash: txHash, + }, nil + }, + assert: func(t *testing.T, hDB *hermez_db.HermezDb) { + l1BatchInfo, err := hDB.GetVerificationByBatchNo(3) + require.NoError(t, err) + + require.Equal(t, l1BatchInfo.BatchNo, uint64(3)) + require.Equal(t, l1BatchInfo.L1BlockNo, latestBlockNumber.Uint64()) + require.Equal(t, l1BatchInfo.L1TxHash.String(), common.HexToHash("0x4").String()) + require.Equal(t, l1BatchInfo.StateRoot.String(), common.HexToHash("0x5").String()) + require.Equal(t, l1BatchInfo.L1InfoRoot.String(), common.Hash{}.String()) + }, + }, + { + name: "VerificationValidiumTopicEtrog", + getLog: func(hDB *hermez_db.HermezDb) (types.Log, error) { + batchNum := uint64(4) + batchNumHash := common.BytesToHash(big.NewInt(0).SetUint64(batchNum).Bytes()) + txHash := common.HexToHash("0x4") + stateRoot := common.HexToHash("0x5") + return types.Log{ + BlockNumber: latestBlockNumber.Uint64(), + Address: l1ContractAddresses[0], + Topics: []common.Hash{contracts.VerificationValidiumTopicEtrog, batchNumHash}, + Data: stateRoot.Bytes(), + TxHash: txHash, + }, nil + }, + assert: func(t *testing.T, hDB *hermez_db.HermezDb) { + l1BatchInfo, err := hDB.GetVerificationByBatchNo(4) + require.NoError(t, err) + + require.Equal(t, l1BatchInfo.BatchNo, uint64(4)) + require.Equal(t, l1BatchInfo.L1BlockNo, latestBlockNumber.Uint64()) + require.Equal(t, l1BatchInfo.L1TxHash.String(), common.HexToHash("0x4").String()) + require.Equal(t, l1BatchInfo.StateRoot.String(), common.HexToHash("0x5").String()) + require.Equal(t, l1BatchInfo.L1InfoRoot.String(), common.Hash{}.String()) + }, + }, + { + name: "VerificationTopicEtrog", + getLog: func(hDB *hermez_db.HermezDb) (types.Log, error) { + rollupIDHash := common.BytesToHash(big.NewInt(0).SetUint64(rollupID).Bytes()) + batchNum := uint64(5) + batchNumHash := common.BytesToHash(big.NewInt(0).SetUint64(batchNum).Bytes()) + txHash := common.HexToHash("0x6") + stateRoot := common.HexToHash("0x7") + data := append(batchNumHash.Bytes(), stateRoot.Bytes()...) + return types.Log{ + BlockNumber: latestBlockNumber.Uint64(), + Address: l1ContractAddresses[0], + Topics: []common.Hash{contracts.VerificationTopicEtrog, rollupIDHash}, + Data: data, + TxHash: txHash, + }, nil + }, + assert: func(t *testing.T, hDB *hermez_db.HermezDb) { + l1BatchInfo, err := hDB.GetVerificationByBatchNo(5) + require.NoError(t, err) + + require.Equal(t, l1BatchInfo.BatchNo, uint64(5)) + require.Equal(t, l1BatchInfo.L1BlockNo, latestBlockNumber.Uint64()) + require.Equal(t, l1BatchInfo.L1TxHash.String(), common.HexToHash("0x6").String()) + require.Equal(t, l1BatchInfo.StateRoot.String(), common.HexToHash("0x7").String()) + require.Equal(t, l1BatchInfo.L1InfoRoot.String(), common.Hash{}.String()) + }, + }, + { + name: "RollbackBatchesTopic", + getLog: func(hDB *hermez_db.HermezDb) (types.Log, error) { + blockNum := uint64(10) + batchNum := uint64(20) + batchNumHash := common.BytesToHash(big.NewInt(0).SetUint64(batchNum).Bytes()) + txHash := common.HexToHash("0x888") + stateRoot := common.HexToHash("0x999") + l1InfoRoot := common.HexToHash("0x101010") + + for i := uint64(15); i <= uint64(25); i++ { + err := hDB.WriteSequence(blockNum, i, txHash, stateRoot, l1InfoRoot) + require.NoError(t, err) + } + + return types.Log{ + BlockNumber: latestBlockNumber.Uint64(), + Address: l1ContractAddresses[0], + Topics: []common.Hash{contracts.RollbackBatchesTopic, batchNumHash}, + TxHash: txHash, + }, nil + }, + assert: func(t *testing.T, hDB *hermez_db.HermezDb) { + for i := uint64(15); i <= uint64(20); i++ { + l1BatchInfo, err := hDB.GetSequenceByBatchNo(i) + require.NotNil(t, l1BatchInfo) + require.NoError(t, err) + } + for i := uint64(21); i <= uint64(25); i++ { + l1BatchInfo, err := hDB.GetSequenceByBatchNo(i) + require.Nil(t, l1BatchInfo) + require.NoError(t, err) + } + }, + }, + } + + filteredLogs := []types.Log{} + for _, tc := range testCases { + ll, err := tc.getLog(hDB) + require.NoError(t, err) + filteredLogs = append(filteredLogs, ll) + } + + EthermanMock.EXPECT().FilterLogs(gomock.Any(), filterQuery).Return(filteredLogs, nil).AnyTimes() + + l1Syncer := syncer.NewL1Syncer(ctx, []syncer.IEtherman{EthermanMock}, l1ContractAddresses, l1ContractTopics, 10, 0, "latest") + + zkCfg := ðconfig.Zk{ + L1RollupId: rollupID, + L1FirstBlock: l1FirstBlock.Uint64(), + } + cfg := StageL1SyncerCfg(db1, l1Syncer, zkCfg) + quiet := false + + // Act + err = SpawnStageL1Syncer(s, u, ctx, tx, cfg, quiet) + require.NoError(t, err) + + // Assert + for _, tc := range testCases { + tc.assert(t, hDB) + } +} diff --git a/zk/stages/stage_sequence_execute.go b/zk/stages/stage_sequence_execute.go index 510c74dfe49..15406d9091a 100644 --- a/zk/stages/stage_sequence_execute.go +++ b/zk/stages/stage_sequence_execute.go @@ -2,6 +2,7 @@ package stages import ( "context" + "errors" "fmt" "strconv" "time" @@ -21,7 +22,7 @@ import ( "github.com/ledgerwatch/erigon/zk/utils" ) -var shouldCheckForExecutionAndDataStreamAlighment = true +var shouldCheckForExecutionAndDataStreamAlignment = true func SpawnSequencingStage( s *stagedsync.StageState, @@ -142,7 +143,7 @@ func sequencingBatchStep( return sdb.tx.Commit() } - if shouldCheckForExecutionAndDataStreamAlighment { + if shouldCheckForExecutionAndDataStreamAlignment { // handle cases where the last batch wasn't committed to the data stream. // this could occur because we're migrating from an RPC node to a sequencer // or because the sequencer was restarted and not all processes completed (like waiting from remote executor) @@ -161,11 +162,11 @@ func sequencingBatchStep( // do not set shouldCheckForExecutionAndDataStreamAlighment=false because of the error return err } - shouldCheckForExecutionAndDataStreamAlighment = false + shouldCheckForExecutionAndDataStreamAlignment = false return nil } } - shouldCheckForExecutionAndDataStreamAlighment = false + shouldCheckForExecutionAndDataStreamAlignment = false } needsUnwind, err := tryHaltSequencer(batchContext, batchState, streamWriter, u, executionAt) @@ -322,8 +323,14 @@ func sequencingBatchStep( log.Info(fmt.Sprintf("[%s] Waiting for txs from the pool...", logPrefix)) } - LOOP_TRANSACTIONS: + innerBreak := false + emptyBlockOverflow := false + + OuterLoopTransactions: for { + if innerBreak { + break + } select { case <-logTicker.C: if !batchState.isAnyRecovery() { @@ -331,7 +338,7 @@ func sequencingBatchStep( } case <-blockTicker.C: if !batchState.isAnyRecovery() { - break LOOP_TRANSACTIONS + break OuterLoopTransactions } case <-batchTicker.C: if !batchState.isAnyRecovery() { @@ -351,7 +358,7 @@ func sequencingBatchStep( log.Info(fmt.Sprintf("[%s] Info tree updates", logPrefix), "count", len(newLogs), "latestIndex", latestIndex) default: if batchState.isLimboRecovery() { - batchState.blockState.transactionsForInclusion, err = getLimboTransaction(ctx, cfg, batchState.limboRecoveryData.limboTxHash) + batchState.blockState.transactionsForInclusion, err = getLimboTransaction(ctx, cfg, batchState.limboRecoveryData.limboTxHash, executionAt) if err != nil { return err } @@ -361,12 +368,21 @@ func sequencingBatchStep( return err } } else if !batchState.isL1Recovery() { + var allConditionsOK bool - batchState.blockState.transactionsForInclusion, allConditionsOK, err = getNextPoolTransactions(ctx, cfg, executionAt, batchState.forkId, batchState.yieldedTransactions) + var newTransactions []types.Transaction + var newIds []common.Hash + + newTransactions, newIds, allConditionsOK, err = getNextPoolTransactions(ctx, cfg, executionAt, batchState.forkId, batchState.yieldedTransactions) if err != nil { return err } + batchState.blockState.transactionsForInclusion = append(batchState.blockState.transactionsForInclusion, newTransactions...) + for idx, tx := range newTransactions { + batchState.blockState.transactionHashesToSlots[tx.Hash()] = newIds[idx] + } + if len(batchState.blockState.transactionsForInclusion) == 0 { if allConditionsOK { time.Sleep(batchContext.cfg.zk.SequencerTimeoutOnEmptyTxPool) @@ -385,9 +401,22 @@ func sequencingBatchStep( log.Trace(fmt.Sprintf("[%s] Yielded transactions from the pool", logPrefix), "txCount", len(batchState.blockState.transactionsForInclusion)) } + badTxHashes := make([]common.Hash, 0) + minedTxHashes := make([]common.Hash, 0) + + InnerLoopTransactions: for i, transaction := range batchState.blockState.transactionsForInclusion { // For X Layer metrics.GetLogStatistics().CumulativeCounting(metrics.TxCounter) + // quick check if we should stop handling transactions + select { + case <-blockTicker.C: + if !batchState.isAnyRecovery() { + innerBreak = true + break InnerLoopTransactions + } + default: + } txHash := transaction.Hash() effectiveGas := batchState.blockState.getL1EffectiveGases(cfg, i) @@ -423,9 +452,21 @@ func sequencingBatchStep( continue } - // if running in normal operation mode and error != nil then just allow the code to continue - // It is safe because this approach ensures that the problematic transaction (the one that caused err != nil to be returned) is kept in yielded - // Each transaction in yielded will be reevaluated at the end of each batch + if isOkKnownError(err) { + // if this is a known error that could be caused by some edge case coming from the pool we want to warn + // about it and continue on as normal but ensure we don't continue to keep trying to add this transaction + // to the block + log.Warn(fmt.Sprintf("[%s] known error adding transaction to block, skipping for now: %v", logPrefix, err), + "hash", txHash) + badTxHashes = append(badTxHashes, txHash) + } else { + // if we have an error at this point something has gone wrong, either in the pool or otherwise + // to stop the pool growing and hampering further processing of good transactions here + // we mark it for being discarded + log.Warn(fmt.Sprintf("[%s] error adding transaction to batch, discarding from pool", logPrefix), "hash", txHash, "err", err) + badTxHashes = append(badTxHashes, txHash) + batchState.blockState.transactionsToDiscard = append(batchState.blockState.transactionsToDiscard, batchState.blockState.transactionHashesToSlots[txHash]) + } } switch anyOverflow { @@ -463,7 +504,10 @@ func sequencingBatchStep( if batchState.reachedOverflowTransactionLimit() || cfg.zk.SealBatchImmediatelyOnOverflow { log.Info(fmt.Sprintf("[%s] closing batch due to counters", logPrefix), "counters: ", batchState.overflowTransactions, "immediate", cfg.zk.SealBatchImmediatelyOnOverflow) runLoopBlocks = false - break LOOP_TRANSACTIONS + if len(batchState.blockState.builtBlockElements.transactions) == 0 { + emptyBlockOverflow = true + } + break OuterLoopTransactions } } @@ -480,7 +524,7 @@ func sequencingBatchStep( } log.Info(fmt.Sprintf("[%s] gas overflowed adding transaction to block", logPrefix), "block", blockNumber, "tx-hash", txHash) runLoopBlocks = false - break LOOP_TRANSACTIONS + break OuterLoopTransactions case overflowNone: } @@ -489,6 +533,7 @@ func sequencingBatchStep( metrics.GetLogStatistics().CumulativeValue(metrics.BatchGas, int64(execResult.UsedGas)) blockDataSizeChecker = &backupDataSizeChecker batchState.onAddedTransaction(transaction, receipt, execResult, effectiveGas) + minedTxHashes = append(minedTxHashes, txHash) } // We will only update the processed index in resequence job if there isn't overflow @@ -501,19 +546,41 @@ func sequencingBatchStep( if len(batchState.blockState.transactionsForInclusion) == 0 { // We need to jump to the next block here if there are no transactions in current block batchState.resequenceBatchJob.UpdateLastProcessedTx(batchState.resequenceBatchJob.CurrentBlock().L2Blockhash) - break LOOP_TRANSACTIONS + break OuterLoopTransactions } if batchState.resequenceBatchJob.AtNewBlockBoundary() { // We need to jump to the next block here if we are at the end of the current block - break LOOP_TRANSACTIONS + break OuterLoopTransactions } else { if cfg.zk.SequencerResequenceStrict { return fmt.Errorf("strict mode enabled, but resequenced batch %d has transactions that overflowed counters or failed transactions", batchState.batchNumber) } } } + + // For X Layer metrics.GetLogStatistics().CumulativeTiming(metrics.ProcessingTxTiming, time.Since(start)) + + // remove bad and mined transactions from the list for inclusion + for i := len(batchState.blockState.transactionsForInclusion) - 1; i >= 0; i-- { + tx := batchState.blockState.transactionsForInclusion[i] + hash := tx.Hash() + for _, badHash := range badTxHashes { + if badHash == hash { + batchState.blockState.transactionsForInclusion = removeInclusionTransaction(batchState.blockState.transactionsForInclusion, i) + break + } + } + + for _, minedHash := range minedTxHashes { + if minedHash == hash { + batchState.blockState.transactionsForInclusion = removeInclusionTransaction(batchState.blockState.transactionsForInclusion, i) + break + } + } + } + if batchState.isL1Recovery() { // just go into the normal loop waiting for new transactions to signal that the recovery // has finished as far as it can go @@ -521,21 +588,35 @@ func sequencingBatchStep( log.Info(fmt.Sprintf("[%s] L1 recovery no more transactions to recover", logPrefix)) } - break LOOP_TRANSACTIONS + break OuterLoopTransactions } if batchState.isLimboRecovery() { batchCloseReason = metrics.BatchLimboRecovery runLoopBlocks = false - break LOOP_TRANSACTIONS + break OuterLoopTransactions } } } + // we do not want to commit this block if it has no transactions and we detected an overflow - essentially the batch is too + // full to get any more transactions in it and we don't want to commit an empty block + if emptyBlockOverflow { + log.Info(fmt.Sprintf("[%s] Block %d overflow detected with no transactions added, skipping block for next batch", logPrefix, blockNumber)) + break + } + if block, err = doFinishBlockAndUpdateState(batchContext, ibs, header, parentBlock, batchState, ger, l1BlockHash, l1TreeUpdateIndex, infoTreeIndexProgress, batchCounters); err != nil { return err } + if err := cfg.txPool.RemoveMinedTransactions(ctx, sdb.tx, header.GasLimit, batchState.blockState.builtBlockElements.txSlots); err != nil { + return err + } + if err := cfg.txPool.RemoveMinedTransactions(ctx, sdb.tx, header.GasLimit, batchState.blockState.transactionsToDiscard); err != nil { + return err + } + if batchState.isLimboRecovery() { stateRoot := block.Root() cfg.txPool.UpdateLimboRootByTxHash(batchState.limboRecoveryData.limboTxHash, &stateRoot) @@ -628,3 +709,15 @@ func sequencingBatchStep( return err } + +func removeInclusionTransaction(orig []types.Transaction, index int) []types.Transaction { + if index < 0 || index >= len(orig) { + return orig + } + return append(orig[:index], orig[index+1:]...) +} + +func isOkKnownError(err error) bool { + return err == nil || + errors.Is(err, core.ErrNonceTooHigh) +} diff --git a/zk/stages/stage_sequence_execute_blocks.go b/zk/stages/stage_sequence_execute_blocks.go index 8e4731e14e1..9af748af6f2 100644 --- a/zk/stages/stage_sequence_execute_blocks.go +++ b/zk/stages/stage_sequence_execute_blocks.go @@ -313,6 +313,14 @@ func addSenders( cryptoContext := secp256k1.ContextForThread(1) senders := make([]common.Address, 0, len(finalTransactions)) for _, transaction := range finalTransactions { + from, ok := transaction.GetSender() + if ok { + senders = append(senders, from) + continue + } + + // shouldn't be hit as we preload this value before processing the transaction + // to look for errors in handling it. from, err := signer.SenderWithContext(cryptoContext, transaction) if err != nil { return err diff --git a/zk/stages/stage_sequence_execute_state.go b/zk/stages/stage_sequence_execute_state.go index 4e74f6210a8..d5af44c9df1 100644 --- a/zk/stages/stage_sequence_execute_state.go +++ b/zk/stages/stage_sequence_execute_state.go @@ -165,7 +165,11 @@ func (bs *BatchState) getCoinbase(cfg *SequenceBlockCfg) common.Address { } func (bs *BatchState) onAddedTransaction(transaction types.Transaction, receipt *types.Receipt, execResult *core.ExecutionResult, effectiveGas uint8) { - bs.blockState.builtBlockElements.onFinishAddingTransaction(transaction, receipt, execResult, effectiveGas) + slotId, ok := bs.blockState.transactionHashesToSlots[transaction.Hash()] + if !ok { + log.Warn("[batchState] transaction hash not found in transaction hashes to slots map", "hash", transaction.Hash()) + } + bs.blockState.builtBlockElements.onFinishAddingTransaction(transaction, receipt, execResult, effectiveGas, slotId) bs.hasAnyTransactionsInThisBatch = true } @@ -250,12 +254,16 @@ func newLimboRecoveryData(limboHeaderTimestamp uint64, limboTxHash *common.Hash) // TYPE BLOCK STATE type BlockState struct { transactionsForInclusion []types.Transaction + transactionHashesToSlots map[common.Hash]common.Hash builtBlockElements BuiltBlockElements blockL1RecoveryData *zktx.DecodedBatchL2Data + transactionsToDiscard []common.Hash } func newBlockState() *BlockState { - return &BlockState{} + return &BlockState{ + transactionHashesToSlots: make(map[common.Hash]common.Hash), + } } func (bs *BlockState) hasAnyTransactionForInclusion() bool { @@ -294,6 +302,7 @@ type BuiltBlockElements struct { receipts types.Receipts effectiveGases []uint8 executionResults []*core.ExecutionResult + txSlots []common.Hash } func (bbe *BuiltBlockElements) resetBlockBuildingArrays() { @@ -303,11 +312,12 @@ func (bbe *BuiltBlockElements) resetBlockBuildingArrays() { bbe.executionResults = []*core.ExecutionResult{} } -func (bbe *BuiltBlockElements) onFinishAddingTransaction(transaction types.Transaction, receipt *types.Receipt, execResult *core.ExecutionResult, effectiveGas uint8) { +func (bbe *BuiltBlockElements) onFinishAddingTransaction(transaction types.Transaction, receipt *types.Receipt, execResult *core.ExecutionResult, effectiveGas uint8, slotId common.Hash) { bbe.transactions = append(bbe.transactions, transaction) bbe.receipts = append(bbe.receipts, receipt) bbe.executionResults = append(bbe.executionResults, execResult) bbe.effectiveGases = append(bbe.effectiveGases, effectiveGas) + bbe.txSlots = append(bbe.txSlots, slotId) } type resequenceTxMetadata struct { diff --git a/zk/stages/stage_sequence_execute_test.go b/zk/stages/stage_sequence_execute_test.go new file mode 100644 index 00000000000..b937eebafee --- /dev/null +++ b/zk/stages/stage_sequence_execute_test.go @@ -0,0 +1,199 @@ +package stages + +import ( + "context" + "math/big" + "testing" + "time" + + "github.com/holiman/uint256" + "github.com/ledgerwatch/erigon-lib/chain" + "github.com/ledgerwatch/erigon-lib/common" + cMocks "github.com/ledgerwatch/erigon-lib/kv/kvcache/mocks" + "github.com/ledgerwatch/erigon-lib/kv/memdb" + "github.com/ledgerwatch/erigon-lib/txpool/txpoolcfg" + "github.com/ledgerwatch/erigon/consensus" + "github.com/ledgerwatch/erigon/core/rawdb" + "github.com/ledgerwatch/erigon/core/state" + "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/eth/ethconfig" + "github.com/ledgerwatch/erigon/eth/stagedsync" + "github.com/ledgerwatch/erigon/eth/stagedsync/stages" + "github.com/ledgerwatch/erigon/ethdb/prune" + "github.com/ledgerwatch/erigon/smt/pkg/db" + dsMocks "github.com/ledgerwatch/erigon/zk/datastream/mocks" + "github.com/ledgerwatch/erigon/zk/hermez_db" + "github.com/ledgerwatch/erigon/zk/l1infotree" + verifier "github.com/ledgerwatch/erigon/zk/legacy_executor_verifier" + "github.com/ledgerwatch/erigon/zk/syncer" + "github.com/ledgerwatch/erigon/zk/syncer/mocks" + "github.com/ledgerwatch/erigon/zk/txpool" + zkTypes "github.com/ledgerwatch/erigon/zk/types" + "github.com/ledgerwatch/log/v3" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" +) + +func TestSpawnSequencingStage(t *testing.T) { + // Arrange + ctx, db1, txPoolDb := context.Background(), memdb.NewTestDB(t), memdb.NewTestDB(t) + tx := memdb.BeginRw(t, db1) + err := hermez_db.CreateHermezBuckets(tx) + require.NoError(t, err) + err = db.CreateEriDbBuckets(tx) + require.NoError(t, err) + + chainID := *uint256.NewInt(1) + forkID := uint64(11) + latestBatchNumber := uint64(20) + latestL1BlockNumber := big.NewInt(100) + latestL2BlockNumber := big.NewInt(100) + l1ContractAddresses := []common.Address{ + common.HexToAddress("0x1"), + common.HexToAddress("0x2"), + common.HexToAddress("0x3"), + } + l1ContractTopics := [][]common.Hash{ + []common.Hash{common.HexToHash("0x1")}, + []common.Hash{common.HexToHash("0x2")}, + []common.Hash{common.HexToHash("0x3")}, + } + + hDB := hermez_db.NewHermezDb(tx) + + err = hDB.WriteForkId(latestBatchNumber, forkID) + require.NoError(t, err) + + err = hDB.WriteNewForkHistory(forkID, latestBatchNumber) + require.NoError(t, err) + + err = stages.SaveStageProgress(tx, stages.HighestSeenBatchNumber, latestBatchNumber) + require.NoError(t, err) + + err = stages.SaveStageProgress(tx, stages.Execution, latestL1BlockNumber.Uint64()) + require.NoError(t, err) + + hDB.WriteL1InfoTreeUpdate(&zkTypes.L1InfoTreeUpdate{ + Index: 1, + GER: common.HexToHash("0x1"), + MainnetExitRoot: common.HexToHash("0x2"), + RollupExitRoot: common.HexToHash("0x3"), + ParentHash: common.HexToHash("0x4"), + Timestamp: 100, + BlockNumber: latestL2BlockNumber.Uint64(), + }) + + latestL2BlockParentHash := common.HexToHash("0x123456789") + latestL2BlockTime := uint64(time.Now().Unix()) + latestL2BlockHeader := &types.Header{ParentHash: latestL2BlockParentHash, Number: latestL2BlockNumber, Time: latestL2BlockTime} + latestL2Block := types.NewBlockWithHeader(latestL2BlockHeader) + + err = rawdb.WriteBlock(tx, latestL2Block) + require.NoError(t, err) + err = rawdb.WriteCanonicalHash(tx, latestL2Block.Hash(), latestL2Block.NumberU64()) + require.NoError(t, err) + + err = tx.Commit() + require.NoError(t, err) + + s := &stagedsync.StageState{ID: stages.HighestSeenBatchNumber, BlockNumber: latestBatchNumber} + u := &stagedsync.Sync{} + + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + dataStreamServerMock := dsMocks.NewMockDataStreamServer(mockCtrl) + ethermanMock := mocks.NewMockIEtherman(mockCtrl) + engineMock := consensus.NewMockEngine(mockCtrl) + + dataStreamServerMock.EXPECT().GetHighestBatchNumber().Return(latestBatchNumber, nil).AnyTimes() + dataStreamServerMock.EXPECT().GetHighestClosedBatch().Return(latestBatchNumber, nil).AnyTimes() + dataStreamServerMock.EXPECT().GetHighestBlockNumber().Return(latestL1BlockNumber.Uint64(), nil).AnyTimes() + dataStreamServerMock.EXPECT(). + WriteBlockWithBatchStartToStream(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()). + Return(nil). + AnyTimes() + + latestL1BlockParentHash := common.HexToHash("0x123456789") + latestL1BlockTime := uint64(time.Now().Unix()) + latestL1BlockHeader := &types.Header{ParentHash: latestL1BlockParentHash, Number: latestL1BlockNumber, Time: latestL1BlockTime} + latestL1Block := types.NewBlockWithHeader(latestL1BlockHeader) + + ethermanMock.EXPECT().BlockByNumber(gomock.Any(), nil).Return(latestL1Block, nil).AnyTimes() + + l1Syncer := syncer.NewL1Syncer(ctx, []syncer.IEtherman{ethermanMock}, l1ContractAddresses, l1ContractTopics, 10, 0, "latest") + updater := l1infotree.NewUpdater(ðconfig.Zk{}, l1Syncer) + + cacheMock := cMocks.NewMockCache(mockCtrl) + cacheMock.EXPECT().View(gomock.Any(), gomock.Any()).Return(nil, nil).AnyTimes() + + txPool, err := txpool.New(nil, txPoolDb, txpoolcfg.Config{}, ðconfig.Config{}, cacheMock, chainID, nil, nil, nil) + require.NoError(t, err) + + engineMock.EXPECT(). + Type(). + Return(chain.CliqueConsensus). + AnyTimes() + engineMock.EXPECT(). + FinalizeAndAssemble(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(config *chain.Config, header *types.Header, state *state.IntraBlockState, txs types.Transactions, uncles []*types.Header, receipts types.Receipts, withdrawals []*types.Withdrawal, chain consensus.ChainReader, syscall consensus.SystemCall, call consensus.Call, logger log.Logger) (*types.Block, types.Transactions, types.Receipts, error) { + finalBlock := types.NewBlockWithHeader(header) + return finalBlock, txs, receipts, nil + }). + AnyTimes() + + zkCfg := ðconfig.Zk{ + SequencerResequence: false, + SequencerBatchSealTime: 10 * time.Second, + SequencerBlockSealTime: 10 * time.Second, + InfoTreeUpdateInterval: 10 * time.Second, + } + + legacyVerifier := verifier.NewLegacyExecutorVerifier(*zkCfg, nil, db1, nil, nil) + + cfg := SequenceBlockCfg{ + dataStreamServer: dataStreamServerMock, + db: db1, + zk: zkCfg, + infoTreeUpdater: updater, + txPool: txPool, + chainConfig: &chain.Config{ChainID: chainID.ToBig()}, + txPoolDb: txPoolDb, + engine: engineMock, + legacyVerifier: legacyVerifier, + } + historyCfg := stagedsync.StageHistoryCfg(db1, prune.DefaultMode, "") + quiet := true + + // Act + err = SpawnSequencingStage(s, u, ctx, cfg, historyCfg, quiet) + require.NoError(t, err) + + // Assert + tx = memdb.BeginRw(t, db1) + hDB = hermez_db.NewHermezDb(tx) + + // WriteBlockL1InfoTreeIndex + l1InfoTreeIndex, err := hDB.GetBlockL1InfoTreeIndex(101) + require.NoError(t, err) + assert.Equal(t, uint64(1), l1InfoTreeIndex) + + // WriteBlockL1InfoTreeIndexProgress + blockNumber, l1InfoTreeIndex, err := hDB.GetLatestBlockL1InfoTreeIndexProgress() + require.NoError(t, err) + assert.Equal(t, uint64(101), blockNumber) + assert.Equal(t, uint64(1), l1InfoTreeIndex) + + // WriteBlockInfoRoot + root, err := hDB.GetBlockInfoRoot(101) + require.NoError(t, err) + assert.Equal(t, uint64(101), blockNumber) + assert.NotEmpty(t, root.String()) + + // IncrementStateVersionByBlockNumberIfNeeded + blockNumber, stateVersion, err := rawdb.GetLatestStateVersion(tx) + require.NoError(t, err) + assert.Equal(t, uint64(101), blockNumber) + assert.Equal(t, uint64(1), stateVersion) + tx.Rollback() +} diff --git a/zk/stages/stage_sequence_execute_transactions.go b/zk/stages/stage_sequence_execute_transactions.go index dcd65fca6da..c529b10e1ab 100644 --- a/zk/stages/stage_sequence_execute_transactions.go +++ b/zk/stages/stage_sequence_execute_transactions.go @@ -17,12 +17,14 @@ import ( "github.com/ledgerwatch/erigon/core/vm/evmtypes" "github.com/ledgerwatch/erigon/zk/utils" "github.com/ledgerwatch/log/v3" + "github.com/ledgerwatch/secp256k1" ) -func getNextPoolTransactions(ctx context.Context, cfg SequenceBlockCfg, executionAt, forkId uint64, alreadyYielded mapset.Set[[32]byte]) ([]types.Transaction, bool, error) { +func getNextPoolTransactions(ctx context.Context, cfg SequenceBlockCfg, executionAt, forkId uint64, alreadyYielded mapset.Set[[32]byte]) ([]types.Transaction, []common.Hash, bool, error) { cfg.txPool.LockFlusher() defer cfg.txPool.UnlockFlusher() + var ids []common.Hash var transactions []types.Transaction var allConditionsOk bool var err error @@ -37,7 +39,7 @@ func getNextPoolTransactions(ctx context.Context, cfg SequenceBlockCfg, executio if allConditionsOk, _, err = cfg.txPool.YieldBest(cfg.yieldSize, &slots, poolTx, executionAt, gasLimit, 0, alreadyYielded); err != nil { return err } - yieldedTxs, toRemove, err := extractTransactionsFromSlot(&slots) + yieldedTxs, yieldedIds, toRemove, err := extractTransactionsFromSlot(&slots, executionAt, cfg) if err != nil { return err } @@ -45,15 +47,16 @@ func getNextPoolTransactions(ctx context.Context, cfg SequenceBlockCfg, executio cfg.txPool.MarkForDiscardFromPendingBest(txId) } transactions = append(transactions, yieldedTxs...) + ids = append(ids, yieldedIds...) return nil }); err != nil { - return nil, allConditionsOk, err + return nil, nil, allConditionsOk, err } - return transactions, allConditionsOk, err + return transactions, ids, allConditionsOk, err } -func getLimboTransaction(ctx context.Context, cfg SequenceBlockCfg, txHash *common.Hash) ([]types.Transaction, error) { +func getLimboTransaction(ctx context.Context, cfg SequenceBlockCfg, txHash *common.Hash, executionAt uint64) ([]types.Transaction, error) { cfg.txPool.LockFlusher() defer cfg.txPool.UnlockFlusher() @@ -68,7 +71,7 @@ func getLimboTransaction(ctx context.Context, cfg SequenceBlockCfg, txHash *comm if slots != nil { // ignore the toRemove value here, we know the RLP will be sound as we had to read it from the pool // in the first place to get it into limbo - transactions, _, err = extractTransactionsFromSlot(slots) + transactions, _, _, err = extractTransactionsFromSlot(slots, executionAt, cfg) if err != nil { return err } @@ -82,9 +85,12 @@ func getLimboTransaction(ctx context.Context, cfg SequenceBlockCfg, txHash *comm return transactions, nil } -func extractTransactionsFromSlot(slot *types2.TxsRlp) ([]types.Transaction, []common.Hash, error) { +func extractTransactionsFromSlot(slot *types2.TxsRlp, currentHeight uint64, cfg SequenceBlockCfg) ([]types.Transaction, []common.Hash, []common.Hash, error) { + ids := make([]common.Hash, 0, len(slot.TxIds)) transactions := make([]types.Transaction, 0, len(slot.Txs)) toRemove := make([]common.Hash, 0) + signer := types.MakeSigner(cfg.chainConfig, currentHeight, 0) + cryptoContext := secp256k1.ContextForThread(1) for idx, txBytes := range slot.Txs { transaction, err := types.DecodeTransaction(txBytes) if err == io.EOF { @@ -93,16 +99,28 @@ func extractTransactionsFromSlot(slot *types2.TxsRlp) ([]types.Transaction, []co if err != nil { // we have a transaction that cannot be decoded or a similar issue. We don't want to handle // this tx so just WARN about it and remove it from the pool and continue - log.Warn("Failed to decode transaction from pool, skipping and removing from pool", "error", err) + log.Warn("[extractTransaction] Failed to decode transaction from pool, skipping and removing from pool", + "error", err, + "id", slot.TxIds[idx]) toRemove = append(toRemove, slot.TxIds[idx]) continue } - var sender common.Address - copy(sender[:], slot.Senders.At(idx)) + + // now attempt to recover the sender + sender, err := signer.SenderWithContext(cryptoContext, transaction) + if err != nil { + log.Warn("[extractTransaction] Failed to recover sender from transaction, skipping and removing from pool", + "error", err, + "hash", transaction.Hash()) + toRemove = append(toRemove, slot.TxIds[idx]) + continue + } + transaction.SetSender(sender) transactions = append(transactions, transaction) + ids = append(ids, slot.TxIds[idx]) } - return transactions, toRemove, nil + return transactions, ids, toRemove, nil } type overflowType uint8 diff --git a/zk/stages/stage_witness.go b/zk/stages/stage_witness.go new file mode 100644 index 00000000000..34f928ef6e7 --- /dev/null +++ b/zk/stages/stage_witness.go @@ -0,0 +1,327 @@ +package stages + +import ( + "context" + "fmt" + "time" + + "github.com/ledgerwatch/erigon-lib/chain" + "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon-lib/common/datadir" + "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon-lib/kv/membatchwithdb" + eristate "github.com/ledgerwatch/erigon-lib/state" + "github.com/ledgerwatch/erigon/core" + "github.com/ledgerwatch/erigon/core/systemcontracts" + eritypes "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/core/vm" + zkUtils "github.com/ledgerwatch/erigon/zk/utils" + "github.com/ledgerwatch/erigon/zk/witness" + + "github.com/ledgerwatch/erigon/consensus" + "github.com/ledgerwatch/erigon/core/state" + "github.com/ledgerwatch/erigon/eth/stagedsync" + "github.com/ledgerwatch/erigon/eth/stagedsync/stages" + "github.com/ledgerwatch/erigon/turbo/services" + "github.com/ledgerwatch/erigon/zk/hermez_db" + "github.com/ledgerwatch/erigon/zk/sequencer" + + "github.com/ledgerwatch/erigon/core/rawdb" + "github.com/ledgerwatch/erigon/eth/ethconfig" + "github.com/ledgerwatch/log/v3" +) + +type WitnessDb interface { +} + +type WitnessCfg struct { + db kv.RwDB + zkCfg *ethconfig.Zk + chainConfig *chain.Config + engine consensus.Engine + blockReader services.FullBlockReader + agg *eristate.Aggregator + historyV3 bool + dirs datadir.Dirs + forcedContracs []common.Address +} + +func StageWitnessCfg(db kv.RwDB, zkCfg *ethconfig.Zk, chainConfig *chain.Config, engine consensus.Engine, blockReader services.FullBlockReader, agg *eristate.Aggregator, historyV3 bool, dirs datadir.Dirs, forcedContracs []common.Address) WitnessCfg { + cfg := WitnessCfg{ + db: db, + zkCfg: zkCfg, + chainConfig: chainConfig, + engine: engine, + blockReader: blockReader, + agg: agg, + historyV3: historyV3, + dirs: dirs, + forcedContracs: forcedContracs, + } + + return cfg +} + +// /////////////////////////////////////////// +// 1. Check to which block it should calculate witnesses +// 2. Unwind to that block +// 3. Calculate witnesses up to current executed block +// 4. Delete old block witnesses +// //////////////////////////////////////////// +func SpawnStageWitness( + s *stagedsync.StageState, + u stagedsync.Unwinder, + ctx context.Context, + tx kv.RwTx, + cfg WitnessCfg, +) error { + logPrefix := s.LogPrefix() + if cfg.zkCfg.WitnessCacheLimit == 0 { + log.Info(fmt.Sprintf("[%s] Skipping witness cache stage. Cache not set or limit is set to 0", logPrefix)) + return nil + } + log.Info(fmt.Sprintf("[%s] Starting witness cache stage", logPrefix)) + if sequencer.IsSequencer() { + log.Info(fmt.Sprintf("[%s] skipping -- sequencer", logPrefix)) + return nil + } + defer log.Info(fmt.Sprintf("[%s] Finished witness cache stage", logPrefix)) + + freshTx := false + if tx == nil { + freshTx = true + log.Debug(fmt.Sprintf("[%s] no tx provided, creating a new one", logPrefix)) + var err error + tx, err = cfg.db.BeginRw(ctx) + if err != nil { + return fmt.Errorf("cfg.db.BeginRw, %w", err) + } + defer tx.Rollback() + } + + stageWitnessProgressBlockNo, err := stages.GetStageProgress(tx, stages.Witness) + if err != nil { + return fmt.Errorf("GetStageProgress: %w", err) + } + + stageInterhashesProgressBlockNo, err := stages.GetStageProgress(tx, stages.IntermediateHashes) + if err != nil { + return fmt.Errorf("GetStageProgress: %w", err) + } + + if stageInterhashesProgressBlockNo <= stageWitnessProgressBlockNo { + log.Info(fmt.Sprintf("[%s] Skipping stage, no new blocks", logPrefix)) + return nil + } + + unwindPoint := stageWitnessProgressBlockNo + if stageInterhashesProgressBlockNo-cfg.zkCfg.WitnessCacheLimit > unwindPoint { + unwindPoint = stageInterhashesProgressBlockNo - cfg.zkCfg.WitnessCacheLimit + } + + //get unwind point to be end of previous batch + hermezDb := hermez_db.NewHermezDb(tx) + blocks, err := getBlocks(tx, unwindPoint, stageInterhashesProgressBlockNo) + if err != nil { + return fmt.Errorf("getBlocks: %w", err) + } + + // generator := witness.NewGenerator(cfg.dirs, cfg.historyV3, cfg.agg, cfg.blockReader, cfg.chainConfig, cfg.zkCfg, cfg.engine) + memTx := membatchwithdb.NewMemoryBatchWithSize(tx, cfg.dirs.Tmp, cfg.zkCfg.WitnessMemdbSize) + defer memTx.Rollback() + if err := zkUtils.PopulateMemoryMutationTables(memTx); err != nil { + return fmt.Errorf("PopulateMemoryMutationTables: %w", err) + } + memHermezDb := hermez_db.NewHermezDbReader(memTx) + + log.Info(fmt.Sprintf("[%s] Unwinding tree and hashess for witness generation", logPrefix), "from", unwindPoint, "to", stageInterhashesProgressBlockNo) + if err := witness.UnwindForWitness(ctx, memTx, unwindPoint, stageInterhashesProgressBlockNo, cfg.dirs, cfg.historyV3, cfg.agg); err != nil { + return fmt.Errorf("UnwindForWitness: %w", err) + } + log.Info(fmt.Sprintf("[%s] Unwind done", logPrefix)) + startBlock := blocks[0].NumberU64() + + prevHeader, err := cfg.blockReader.HeaderByNumber(ctx, tx, startBlock-1) + if err != nil { + return fmt.Errorf("blockReader.HeaderByNumber: %w", err) + } + + getHeader := func(hash common.Hash, number uint64) *eritypes.Header { + h, e := cfg.blockReader.Header(ctx, tx, hash, number) + if e != nil { + log.Error("getHeader error", "number", number, "hash", hash, "err", e) + } + return h + } + + reader := state.NewPlainState(tx, blocks[0].NumberU64(), systemcontracts.SystemContractCodeLookup[cfg.chainConfig.ChainName]) + defer reader.Close() + prevStateRoot := prevHeader.Root + + log.Info(fmt.Sprintf("[%s] Executing blocks and collecting witnesses", logPrefix), "from", startBlock, "to", stageInterhashesProgressBlockNo) + + now := time.Now() + for _, block := range blocks { + reader.SetBlockNr(block.NumberU64()) + tds := state.NewTrieDbState(prevHeader.Root, tx, startBlock-1, nil) + tds.SetResolveReads(true) + tds.StartNewBuffer() + tds.SetStateReader(reader) + + trieStateWriter := tds.NewTrieStateWriter() + if err := witness.PrepareGersForWitness(block, memHermezDb, tds, trieStateWriter); err != nil { + return fmt.Errorf("PrepareGersForWitness: %w", err) + } + + getHashFn := core.GetHashFn(block.Header(), getHeader) + + chainReader := stagedsync.NewChainReaderImpl(cfg.chainConfig, tx, nil, log.New()) + + vmConfig := vm.Config{} + if _, err = core.ExecuteBlockEphemerallyZk(cfg.chainConfig, &vmConfig, getHashFn, cfg.engine, block, tds, trieStateWriter, chainReader, nil, hermezDb, &prevStateRoot); err != nil { + return fmt.Errorf("ExecuteBlockEphemerallyZk: %w", err) + } + + prevStateRoot = block.Root() + + w, err := witness.BuildWitnessFromTrieDbState(ctx, memTx, tds, reader, cfg.forcedContracs, false) + if err != nil { + return fmt.Errorf("BuildWitnessFromTrieDbState: %w", err) + } + + bytes, err := witness.GetWitnessBytes(w, false) + if err != nil { + return fmt.Errorf("GetWitnessBytes: %w", err) + } + + if hermezDb.WriteWitnessCache(block.NumberU64(), bytes); err != nil { + return fmt.Errorf("WriteWitnessCache: %w", err) + } + if time.Since(now) > 10*time.Second { + log.Info(fmt.Sprintf("[%s] Executing blocks and collecting witnesses", logPrefix), "block", block.NumberU64()) + now = time.Now() + } + } + log.Info(fmt.Sprintf("[%s] Witnesses collected", logPrefix)) + + // delete cache for blocks lower than the limit + log.Info(fmt.Sprintf("[%s] Deleting old witness caches", logPrefix)) + if err := hermezDb.DeleteWitnessCaches(0, stageInterhashesProgressBlockNo-cfg.zkCfg.WitnessCacheLimit); err != nil { + return fmt.Errorf("DeleteWitnessCache: %w", err) + } + + if err := stages.SaveStageProgress(tx, stages.Witness, stageInterhashesProgressBlockNo); err != nil { + return fmt.Errorf("SaveStageProgress: %w", err) + } + + log.Info(fmt.Sprintf("[%s] Saving stage progress", logPrefix), "lastBlockNumber", stageInterhashesProgressBlockNo) + + if freshTx { + if err := tx.Commit(); err != nil { + return fmt.Errorf("tx.Commit: %w", err) + } + } + + return nil +} + +func getBlocks(tx kv.Tx, startBlock, endBlock uint64) (blocks []*eritypes.Block, err error) { + idx := 0 + blocks = make([]*eritypes.Block, endBlock-startBlock+1) + for blockNum := startBlock; blockNum <= endBlock; blockNum++ { + block, err := rawdb.ReadBlockByNumber(tx, blockNum) + if err != nil { + return nil, fmt.Errorf("ReadBlockByNumber: %w", err) + } + blocks[idx] = block + idx++ + } + + return blocks, nil +} + +func UnwindWitnessStage(u *stagedsync.UnwindState, tx kv.RwTx, cfg WitnessCfg, ctx context.Context) (err error) { + logPrefix := u.LogPrefix() + if cfg.zkCfg.WitnessCacheLimit == 0 { + log.Info(fmt.Sprintf("[%s] Skipping witness cache stage. Cache not set or limit is set to 0", logPrefix)) + return nil + } + useExternalTx := tx != nil + if !useExternalTx { + if tx, err = cfg.db.BeginRw(ctx); err != nil { + return fmt.Errorf("cfg.db.BeginRw: %w", err) + } + defer tx.Rollback() + } + + if cfg.zkCfg.WitnessCacheLimit == 0 { + log.Info(fmt.Sprintf("[%s] Skipping witness cache stage. Cache not set or limit is set to 0", logPrefix)) + return nil + } + + fromBlock := u.UnwindPoint + 1 + toBlock := u.CurrentBlockNumber + log.Info(fmt.Sprintf("[%s] Unwinding witness cache stage from block number", logPrefix), "fromBlock", fromBlock, "toBlock", toBlock) + defer log.Info(fmt.Sprintf("[%s] Unwinding witness cache complete", logPrefix)) + + hermezDb := hermez_db.NewHermezDb(tx) + if err := hermezDb.DeleteWitnessCaches(fromBlock, toBlock); err != nil { + return fmt.Errorf("DeleteWitnessCache: %w", err) + } + + if err := stages.SaveStageProgress(tx, stages.Witness, fromBlock); err != nil { + return fmt.Errorf("SaveStageProgress: %w", err) + } + + if err := u.Done(tx); err != nil { + return fmt.Errorf("u.Done: %w", err) + } + if !useExternalTx { + if err := tx.Commit(); err != nil { + return fmt.Errorf("tx.Commit: %w", err) + } + } + return nil +} + +func PruneWitnessStage(s *stagedsync.PruneState, tx kv.RwTx, cfg WitnessCfg, ctx context.Context) (err error) { + logPrefix := s.LogPrefix() + if cfg.zkCfg.WitnessCacheLimit == 0 { + log.Info(fmt.Sprintf("[%s] Skipping witness cache stage. Cache not set or limit is set to 0", logPrefix)) + return nil + } + useExternalTx := tx != nil + if !useExternalTx { + tx, err = cfg.db.BeginRw(ctx) + if err != nil { + return fmt.Errorf("cfg.db.BeginRw: %w", err) + } + defer tx.Rollback() + } + + log.Info(fmt.Sprintf("[%s] Pruning witnes caches...", logPrefix)) + defer log.Info(fmt.Sprintf("[%s] Pruning witnes caches complete", logPrefix)) + + hermezDb := hermez_db.NewHermezDb(tx) + + toBlock, err := stages.GetStageProgress(tx, stages.Witness) + if err != nil { + return fmt.Errorf("GetStageProgress: %w", err) + } + + if err := hermezDb.DeleteWitnessCaches(0, toBlock); err != nil { + return fmt.Errorf("DeleteWitnessCache: %w", err) + } + + log.Info(fmt.Sprintf("[%s] Saving stage progress", logPrefix), "stageProgress", 0) + if err := stages.SaveStageProgress(tx, stages.Witness, 0); err != nil { + return fmt.Errorf("SaveStageProgress: %v", err) + } + + if !useExternalTx { + if err := tx.Commit(); err != nil { + return fmt.Errorf("tx.Commit: %w", err) + } + } + return nil +} diff --git a/zk/stages/stages.go b/zk/stages/stages.go index 4ada15e99ec..ef7d76a435b 100644 --- a/zk/stages/stages.go +++ b/zk/stages/stages.go @@ -98,7 +98,7 @@ func SequencerZkStages( if sequencerErr != nil || u.IsUnwindSet() { exec.legacyVerifier.CancelAllRequests() // on the begining of next iteration the EXECUTION will be aligned to DS - shouldCheckForExecutionAndDataStreamAlighment = true + shouldCheckForExecutionAndDataStreamAlignment = true } return sequencerErr }, @@ -233,6 +233,7 @@ func DefaultZkStages( exec stages.ExecuteBlockCfg, hashState stages.HashStateCfg, zkInterHashesCfg ZkInterHashesCfg, + stageWitnessCfg WitnessCfg, history stages.HistoryCfg, logIndex stages.LogIndexCfg, callTraces stages.CallTracesCfg, @@ -439,6 +440,20 @@ func DefaultZkStages( return nil }, }, + { + ID: stages2.Witness, + Description: "Generate witness caches for each block", + Disabled: false, + Forward: func(firstCycle bool, badBlockUnwind bool, s *stages.StageState, u stages.Unwinder, txc wrap.TxContainer, logger log.Logger) error { + return SpawnStageWitness(s, u, ctx, txc.Tx, stageWitnessCfg) + }, + Unwind: func(firstCycle bool, u *stages.UnwindState, s *stages.StageState, txc wrap.TxContainer, logger log.Logger) error { + return UnwindWitnessStage(u, txc.Tx, stageWitnessCfg, ctx) + }, + Prune: func(firstCycle bool, p *stages.PruneState, tx kv.RwTx, logger log.Logger) error { + return PruneWitnessStage(p, tx, stageWitnessCfg, ctx) + }, + }, { ID: stages2.Finish, Description: "Final: update current block for the RPC API", diff --git a/zk/syncer/l1_syncer.go b/zk/syncer/l1_syncer.go index 2d5f3984917..0ee815d9e7a 100644 --- a/zk/syncer/l1_syncer.go +++ b/zk/syncer/l1_syncer.go @@ -35,6 +35,8 @@ const ( sequencedBatchesMapSignature = "0xb4d63f58" ) +//go:generate mockgen -typed=true -destination=./mocks/etherman_mock.go -package=mocks . IEtherman + type IEtherman interface { HeaderByNumber(ctx context.Context, blockNumber *big.Int) (*ethTypes.Header, error) BlockByNumber(ctx context.Context, blockNumber *big.Int) (*ethTypes.Block, error) @@ -205,17 +207,17 @@ func (s *L1Syncer) RunQueryBlocks(lastCheckedBlock uint64) { func (s *L1Syncer) GetHeader(number uint64) (*ethTypes.Header, error) { em := s.getNextEtherman() - return em.HeaderByNumber(context.Background(), new(big.Int).SetUint64(number)) + return em.HeaderByNumber(s.ctx, new(big.Int).SetUint64(number)) } func (s *L1Syncer) GetBlock(number uint64) (*ethTypes.Block, error) { em := s.getNextEtherman() - return em.BlockByNumber(context.Background(), new(big.Int).SetUint64(number)) + return em.BlockByNumber(s.ctx, new(big.Int).SetUint64(number)) } func (s *L1Syncer) GetTransaction(hash common.Hash) (ethTypes.Transaction, bool, error) { em := s.getNextEtherman() - return em.TransactionByHash(context.Background(), hash) + return em.TransactionByHash(s.ctx, hash) } func (s *L1Syncer) GetPreElderberryAccInputHash(ctx context.Context, addr *common.Address, batchNum uint64) (common.Hash, error) { diff --git a/zk/syncer/mocks/etherman_mock.go b/zk/syncer/mocks/etherman_mock.go new file mode 100644 index 00000000000..0d55810bb51 --- /dev/null +++ b/zk/syncer/mocks/etherman_mock.go @@ -0,0 +1,318 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/ledgerwatch/erigon/zk/syncer (interfaces: IEtherman) +// +// Generated by this command: +// +// mockgen -typed=true -destination=./mocks/etherman_mock.go -package=mocks . IEtherman +// + +// Package mocks is a generated GoMock package. +package mocks + +import ( + context "context" + big "math/big" + reflect "reflect" + + ethereum "github.com/ledgerwatch/erigon" + common "github.com/ledgerwatch/erigon-lib/common" + types "github.com/ledgerwatch/erigon/core/types" + gomock "go.uber.org/mock/gomock" +) + +// MockIEtherman is a mock of IEtherman interface. +type MockIEtherman struct { + ctrl *gomock.Controller + recorder *MockIEthermanMockRecorder +} + +// MockIEthermanMockRecorder is the mock recorder for MockIEtherman. +type MockIEthermanMockRecorder struct { + mock *MockIEtherman +} + +// NewMockIEtherman creates a new mock instance. +func NewMockIEtherman(ctrl *gomock.Controller) *MockIEtherman { + mock := &MockIEtherman{ctrl: ctrl} + mock.recorder = &MockIEthermanMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockIEtherman) EXPECT() *MockIEthermanMockRecorder { + return m.recorder +} + +// BlockByNumber mocks base method. +func (m *MockIEtherman) BlockByNumber(arg0 context.Context, arg1 *big.Int) (*types.Block, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "BlockByNumber", arg0, arg1) + ret0, _ := ret[0].(*types.Block) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// BlockByNumber indicates an expected call of BlockByNumber. +func (mr *MockIEthermanMockRecorder) BlockByNumber(arg0, arg1 any) *MockIEthermanBlockByNumberCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BlockByNumber", reflect.TypeOf((*MockIEtherman)(nil).BlockByNumber), arg0, arg1) + return &MockIEthermanBlockByNumberCall{Call: call} +} + +// MockIEthermanBlockByNumberCall wrap *gomock.Call +type MockIEthermanBlockByNumberCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockIEthermanBlockByNumberCall) Return(arg0 *types.Block, arg1 error) *MockIEthermanBlockByNumberCall { + c.Call = c.Call.Return(arg0, arg1) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockIEthermanBlockByNumberCall) Do(f func(context.Context, *big.Int) (*types.Block, error)) *MockIEthermanBlockByNumberCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockIEthermanBlockByNumberCall) DoAndReturn(f func(context.Context, *big.Int) (*types.Block, error)) *MockIEthermanBlockByNumberCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// CallContract mocks base method. +func (m *MockIEtherman) CallContract(arg0 context.Context, arg1 ethereum.CallMsg, arg2 *big.Int) ([]byte, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CallContract", arg0, arg1, arg2) + ret0, _ := ret[0].([]byte) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CallContract indicates an expected call of CallContract. +func (mr *MockIEthermanMockRecorder) CallContract(arg0, arg1, arg2 any) *MockIEthermanCallContractCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CallContract", reflect.TypeOf((*MockIEtherman)(nil).CallContract), arg0, arg1, arg2) + return &MockIEthermanCallContractCall{Call: call} +} + +// MockIEthermanCallContractCall wrap *gomock.Call +type MockIEthermanCallContractCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockIEthermanCallContractCall) Return(arg0 []byte, arg1 error) *MockIEthermanCallContractCall { + c.Call = c.Call.Return(arg0, arg1) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockIEthermanCallContractCall) Do(f func(context.Context, ethereum.CallMsg, *big.Int) ([]byte, error)) *MockIEthermanCallContractCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockIEthermanCallContractCall) DoAndReturn(f func(context.Context, ethereum.CallMsg, *big.Int) ([]byte, error)) *MockIEthermanCallContractCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// FilterLogs mocks base method. +func (m *MockIEtherman) FilterLogs(arg0 context.Context, arg1 ethereum.FilterQuery) ([]types.Log, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "FilterLogs", arg0, arg1) + ret0, _ := ret[0].([]types.Log) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// FilterLogs indicates an expected call of FilterLogs. +func (mr *MockIEthermanMockRecorder) FilterLogs(arg0, arg1 any) *MockIEthermanFilterLogsCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FilterLogs", reflect.TypeOf((*MockIEtherman)(nil).FilterLogs), arg0, arg1) + return &MockIEthermanFilterLogsCall{Call: call} +} + +// MockIEthermanFilterLogsCall wrap *gomock.Call +type MockIEthermanFilterLogsCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockIEthermanFilterLogsCall) Return(arg0 []types.Log, arg1 error) *MockIEthermanFilterLogsCall { + c.Call = c.Call.Return(arg0, arg1) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockIEthermanFilterLogsCall) Do(f func(context.Context, ethereum.FilterQuery) ([]types.Log, error)) *MockIEthermanFilterLogsCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockIEthermanFilterLogsCall) DoAndReturn(f func(context.Context, ethereum.FilterQuery) ([]types.Log, error)) *MockIEthermanFilterLogsCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// HeaderByNumber mocks base method. +func (m *MockIEtherman) HeaderByNumber(arg0 context.Context, arg1 *big.Int) (*types.Header, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "HeaderByNumber", arg0, arg1) + ret0, _ := ret[0].(*types.Header) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// HeaderByNumber indicates an expected call of HeaderByNumber. +func (mr *MockIEthermanMockRecorder) HeaderByNumber(arg0, arg1 any) *MockIEthermanHeaderByNumberCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HeaderByNumber", reflect.TypeOf((*MockIEtherman)(nil).HeaderByNumber), arg0, arg1) + return &MockIEthermanHeaderByNumberCall{Call: call} +} + +// MockIEthermanHeaderByNumberCall wrap *gomock.Call +type MockIEthermanHeaderByNumberCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockIEthermanHeaderByNumberCall) Return(arg0 *types.Header, arg1 error) *MockIEthermanHeaderByNumberCall { + c.Call = c.Call.Return(arg0, arg1) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockIEthermanHeaderByNumberCall) Do(f func(context.Context, *big.Int) (*types.Header, error)) *MockIEthermanHeaderByNumberCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockIEthermanHeaderByNumberCall) DoAndReturn(f func(context.Context, *big.Int) (*types.Header, error)) *MockIEthermanHeaderByNumberCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// StorageAt mocks base method. +func (m *MockIEtherman) StorageAt(arg0 context.Context, arg1 common.Address, arg2 common.Hash, arg3 *big.Int) ([]byte, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StorageAt", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].([]byte) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StorageAt indicates an expected call of StorageAt. +func (mr *MockIEthermanMockRecorder) StorageAt(arg0, arg1, arg2, arg3 any) *MockIEthermanStorageAtCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StorageAt", reflect.TypeOf((*MockIEtherman)(nil).StorageAt), arg0, arg1, arg2, arg3) + return &MockIEthermanStorageAtCall{Call: call} +} + +// MockIEthermanStorageAtCall wrap *gomock.Call +type MockIEthermanStorageAtCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockIEthermanStorageAtCall) Return(arg0 []byte, arg1 error) *MockIEthermanStorageAtCall { + c.Call = c.Call.Return(arg0, arg1) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockIEthermanStorageAtCall) Do(f func(context.Context, common.Address, common.Hash, *big.Int) ([]byte, error)) *MockIEthermanStorageAtCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockIEthermanStorageAtCall) DoAndReturn(f func(context.Context, common.Address, common.Hash, *big.Int) ([]byte, error)) *MockIEthermanStorageAtCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// TransactionByHash mocks base method. +func (m *MockIEtherman) TransactionByHash(arg0 context.Context, arg1 common.Hash) (types.Transaction, bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "TransactionByHash", arg0, arg1) + ret0, _ := ret[0].(types.Transaction) + ret1, _ := ret[1].(bool) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 +} + +// TransactionByHash indicates an expected call of TransactionByHash. +func (mr *MockIEthermanMockRecorder) TransactionByHash(arg0, arg1 any) *MockIEthermanTransactionByHashCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TransactionByHash", reflect.TypeOf((*MockIEtherman)(nil).TransactionByHash), arg0, arg1) + return &MockIEthermanTransactionByHashCall{Call: call} +} + +// MockIEthermanTransactionByHashCall wrap *gomock.Call +type MockIEthermanTransactionByHashCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockIEthermanTransactionByHashCall) Return(arg0 types.Transaction, arg1 bool, arg2 error) *MockIEthermanTransactionByHashCall { + c.Call = c.Call.Return(arg0, arg1, arg2) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockIEthermanTransactionByHashCall) Do(f func(context.Context, common.Hash) (types.Transaction, bool, error)) *MockIEthermanTransactionByHashCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockIEthermanTransactionByHashCall) DoAndReturn(f func(context.Context, common.Hash) (types.Transaction, bool, error)) *MockIEthermanTransactionByHashCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// TransactionReceipt mocks base method. +func (m *MockIEtherman) TransactionReceipt(arg0 context.Context, arg1 common.Hash) (*types.Receipt, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "TransactionReceipt", arg0, arg1) + ret0, _ := ret[0].(*types.Receipt) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// TransactionReceipt indicates an expected call of TransactionReceipt. +func (mr *MockIEthermanMockRecorder) TransactionReceipt(arg0, arg1 any) *MockIEthermanTransactionReceiptCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TransactionReceipt", reflect.TypeOf((*MockIEtherman)(nil).TransactionReceipt), arg0, arg1) + return &MockIEthermanTransactionReceiptCall{Call: call} +} + +// MockIEthermanTransactionReceiptCall wrap *gomock.Call +type MockIEthermanTransactionReceiptCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockIEthermanTransactionReceiptCall) Return(arg0 *types.Receipt, arg1 error) *MockIEthermanTransactionReceiptCall { + c.Call = c.Call.Return(arg0, arg1) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockIEthermanTransactionReceiptCall) Do(f func(context.Context, common.Hash) (*types.Receipt, error)) *MockIEthermanTransactionReceiptCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockIEthermanTransactionReceiptCall) DoAndReturn(f func(context.Context, common.Hash) (*types.Receipt, error)) *MockIEthermanTransactionReceiptCall { + c.Call = c.Call.DoAndReturn(f) + return c +} diff --git a/zk/syncer/utils.go b/zk/syncer/utils.go index a6ad885af74..a8e63273c0a 100644 --- a/zk/syncer/utils.go +++ b/zk/syncer/utils.go @@ -108,7 +108,9 @@ func DecodeSequenceBatchesCalldata(data []byte) (calldata interface{}, err error case contracts.SequenceBatchesIdv6_6: if method.Name == sequenceBatchesMethodName { return decodeElderberryBatchesCallData(unpackedCalldata), nil - } else { + } + case contracts.SequenceBatchesValidiumElderBerry: + if method.Name == sequenceBatchesValidiumMethodName { return decodeElderberryBatchesValidiumCallData(unpackedCalldata), nil } case contracts.SequenceBatchesBanana: @@ -120,6 +122,8 @@ func DecodeSequenceBatchesCalldata(data []byte) (calldata interface{}, err error default: return nil, fmt.Errorf("no decoder found for method signature: %s", methodSig) } + + return nil, fmt.Errorf("no decoder found for method signature: %s", methodSig) } type SequencedBatchBanana struct { diff --git a/zk/tests/unwinds/unwind.sh b/zk/tests/unwinds/unwind.sh index b48f1c15c55..e404be21ae7 100755 --- a/zk/tests/unwinds/unwind.sh +++ b/zk/tests/unwinds/unwind.sh @@ -11,103 +11,179 @@ # 8. dump the data # 9. compare the dumps at the unwind level and tip level +SECONDS=0 + +dspid=$(lsof -i :6900 | awk 'NR==2 {print $2}') +kill -9 "$dspid" + +cleanup() { + echo "killing datastream server" + if [[ -n "$dspid" ]]; then + echo "killing process with PID $dspid on port 6900" + kill -9 "$dspid" + fi + + echo "cleaning data directories" + rm -rf "$dataPath/rpc-datadir" + rm -rf "$dataPath/phase1-dump1" + rm -rf "$dataPath/phase1-dump2" + + rm -rf "$dataPath/phase2-dump1" + rm -rf "$dataPath/phase2-dump2" + + echo "Total execution time: $SECONDS seconds" +} + +trap cleanup EXIT + dataPath="./datadir" -firstStop=11204 +datastreamPath="zk/tests/unwinds/datastream" +datastreamZipFileName="./datastream-net8-upto-11318-101.zip" +firstStop=11203 stopBlock=11315 unwindBatch=70 -firstTimeout=300s -secondTimeout=300s + +pushd "$datastreamPath" || exit + tar -xzf "$datastreamZipFileName" +popd || exit rm -rf "$dataPath/rpc-datadir" rm -rf "$dataPath/phase1-dump1" rm -rf "$dataPath/phase1-dump2" + rm -rf "$dataPath/phase2-dump1" rm -rf "$dataPath/phase2-dump2" -rm -rf "$dataPath/phase1-diffs" -rm -rf "$dataPath/phase2-diffs" + +# rm -rf "$dataPath/phase1-diffs" +# rm -rf "$dataPath/phase2-diffs" # run datastream server +echo -e '\nStarting datastream server \n' go run ./zk/debug_tools/datastream-host --file="$(pwd)/zk/tests/unwinds/datastream/hermez-dynamic-integration8-datastream/data-stream.bin" & -# in order to start the datastream server -sleep 10 +dspid=$! # get the id of the DS process + +echo "Waiting for datastream server to become available on port 6900..." +while ! bash -c "/dev/null; do + sleep 1 +done +echo "Datastream server is now available." -# run erigon for a while to sync to the unwind point to capture the dump -timeout $firstTimeout ./build/bin/cdk-erigon \ +# try with 1 and check on time +echo -e '\nRun Erigon to BlockHeight: ' "${firstStop}" '\n' +./build/bin/cdk-erigon \ --datadir="$dataPath/rpc-datadir" \ - --config=./dynamic-integration8.yaml \ - --zkevm.sync-limit=${firstStop} + --config="zk/tests/unwinds/config/dynamic-integration8.yaml" \ + --debug.limit="${firstStop}" + +echo -e '\nDumping data \n' # now get a dump of the datadir at this point go run ./cmd/hack --action=dumpAll --chaindata="$dataPath/rpc-datadir/chaindata" --output="$dataPath/phase1-dump1" +echo -e '\nRun Erigon to Block Height: ' "${stopBlock}" '\n' # now run to the final stop block -timeout $secondTimeout ./build/bin/cdk-erigon \ +./build/bin/cdk-erigon \ --datadir="$dataPath/rpc-datadir" \ - --config=./dynamic-integration8.yaml \ - --zkevm.sync-limit=${stopBlock} + --config="zk/tests/unwinds/config/dynamic-integration8.yaml" \ + --debug.limit="${stopBlock}" +echo -e '\nDumping data phase 2 \n' # now get a dump of the datadir at this point go run ./cmd/hack --action=dumpAll --chaindata="$dataPath/rpc-datadir/chaindata" --output="$dataPath/phase2-dump1" # now run the unwind +echo -e '\nUnwinding to batch: ' "${unwindBatch}" '\n' go run ./cmd/integration state_stages_zkevm \ --datadir="$dataPath/rpc-datadir" \ - --config=./dynamic-integration8.yaml \ + --config="zk/tests/unwinds/config/dynamic-integration8.yaml" \ --chain=dynamic-integration \ - --unwind-batch-no=${unwindBatch} + --unwind-batch-no="${unwindBatch}" +echo -e '\nDumping data after unwind \n' # now get a dump of the datadir at this point go run ./cmd/hack --action=dumpAll --chaindata="$dataPath/rpc-datadir/chaindata" --output="$dataPath/phase1-dump2" - -mkdir -p "$dataPath/phase1-diffs/pre" -mkdir -p "$dataPath/phase1-diffs/post" +# mkdir -p "$dataPath/phase1-diffs/pre" +# mkdir -p "$dataPath/phase1-diffs/post" + +different_files=( + "Code.txt" + "HashedCodeHash.txt" + "hermez_l1Sequences.txt" + "hermez_l1Verifications.txt" + "HermezSmt.txt" + "PlainCodeHash.txt" + "SyncStage.txt" + "BadHeaderNumber.txt" + "CallToIndex.txt" + "InnerTx.txt" +) + +is_in_array() { + local element + for element in "${different_files[@]}"; do + if [[ "$element" == "$filename" ]]; then + return 0 + fi + done + return 1 +} # iterate over the files in the pre-dump folder -for file in $(ls $dataPath/phase1-dump1); do +# we are going to check if unwind worked +for file in "$dataPath/phase1-dump1"/*; do # get the filename - filename=$(basename $file) + filename=$(basename "$file") # diff the files and if there is a difference found copy the pre and post files into the diffs folder - if cmp -s $dataPath/phase1-dump1/$filename $dataPath/phase1-dump2/$filename; then + if cmp -s "$dataPath/phase1-dump1/$filename" "$dataPath/phase1-dump2/$filename"; then echo "No difference found in $filename" else - if [ "$filename" = "Code.txt" ] || [ "$filename" = "HashedCodeHash.txt" ] || [ "$filename" = "hermez_l1Sequences.txt" ] || [ "$filename" = "hermez_l1Verifications.txt" ] || [ "$filename" = "HermezSmt.txt" ] || [ "$filename" = "PlainCodeHash.txt" ] || [ "$filename" = "SyncStage.txt" ] || [ "$filename" = "BadHeaderNumber.txt" ]; then - echo "Phase 1 Expected differences in $filename" + # this is a list of files where we expect differences. + if is_in_array; then + echo "Phase 1 - Expected differences in $filename" else - echo "Phase 1 Unexpected differences in $filename" + # unwind tests failed + echo "Phase 1 - Error unexpected differences in $filename" + echo "Unwind failed" exit 1 fi fi done # now sync again -timeout $secondTimeout ./build/bin/cdk-erigon \ +# the data must match, if it doesn't match something is wrong, because if we unwinded returning to it should be the same. +echo -e '\nRunning erigon to the same stopBlock again \n' +./build/bin/cdk-erigon \ --datadir="$dataPath/rpc-datadir" \ - --config=./dynamic-integration8.yaml \ - --zkevm.sync-limit=${stopBlock} + --config="zk/tests/unwinds/config/dynamic-integration8.yaml" \ + --debug.limit="${stopBlock}" +echo -e '\nDumping data after unwind \n' # dump the data again into the post folder go run ./cmd/hack --action=dumpAll --chaindata="$dataPath/rpc-datadir/chaindata" --output="$dataPath/phase2-dump2" -mkdir -p "$dataPath/phase2-diffs/pre" -mkdir -p "$dataPath/phase2-diffs/post" +# mkdir -p "$dataPath/phase2-diffs/pre" +# mkdir -p "$dataPath/phase2-diffs/post" # iterate over the files in the pre-dump folder -for file in $(ls $dataPath/phase2-dump1); do +for file in "$dataPath/phase2-dump1"/*; do # get the filename - filename=$(basename $file) + filename=$(basename "$file") # diff the files and if there is a difference found copy the pre and post files into the diffs folder - if cmp -s $dataPath/phase2-dump1/$filename $dataPath/phase2-dump2/$filename; then + if cmp -s "$dataPath/phase2-dump1/$filename" "$dataPath/phase2-dump2/$filename"; then echo "Phase 2 No difference found in $filename" else - if [ "$filename" = "BadHeaderNumber.txt" ]; then - echo "Phase 2 Expected differences in $filename" + # file where it should be different + if [ "$filename" = "BadHeaderNumber.txt" ] || [ "$filename" = "InnerTx.txt" ]; then + echo "Phase 2 - Expected differences in $filename" else - echo "Phase 2 Unexpected differences in $filename" + echo "Phase 2 - Error unexpected differences in $filename" exit 2 fi fi done + +echo "No error" diff --git a/zk/txpool/pool.go b/zk/txpool/pool.go index 023212b7942..0e4d065d93c 100644 --- a/zk/txpool/pool.go +++ b/zk/txpool/pool.go @@ -40,8 +40,8 @@ import ( "github.com/holiman/uint256" "github.com/ledgerwatch/erigon-lib/txpool/txpoolcfg" "github.com/ledgerwatch/erigon/eth/ethconfig" - "github.com/ledgerwatch/log/v3" "github.com/ledgerwatch/erigon/eth/gasprice/gaspricecfg" + "github.com/ledgerwatch/log/v3" "github.com/status-im/keycard-go/hexutils" "github.com/ledgerwatch/erigon-lib/chain" @@ -149,10 +149,12 @@ const ( SenderDisallowedDeploy DiscardReason = 26 // sender is not allowed to deploy contracts by ACL policy DiscardByLimbo DiscardReason = 27 SmartContractDeploymentDisabled DiscardReason = 28 // to == null not allowed, config set to block smart contract deployment + GasLimitTooHigh DiscardReason = 29 // gas limit is too high + Expired DiscardReason = 30 // used when a transaction is purged from the pool + // For X Layer ReceiverDisallowedReceiveTx DiscardReason = 127 // receiver is not allowed to receive transactions NoWhiteListedSender DiscardReason = 128 // the transaction is sent by a non-whitelisted account - GasLimitTooHigh DiscardReason = 29 // gas limit is too high ) func (r DiscardReason) String() string { @@ -236,13 +238,14 @@ type metaTx struct { bestIndex int worstIndex int timestamp uint64 // when it was added to pool + created uint64 // unix timestamp of creation subPool SubPoolMarker currentSubPool SubPoolType alreadyYielded bool } func newMetaTx(slot *types.TxSlot, isLocal bool, timestmap uint64) *metaTx { - mt := &metaTx{Tx: slot, worstIndex: -1, bestIndex: -1, timestamp: timestmap} + mt := &metaTx{Tx: slot, worstIndex: -1, bestIndex: -1, timestamp: timestmap, created: uint64(time.Now().Unix())} if isLocal { mt.subPool = IsLocal } @@ -1248,9 +1251,18 @@ func (p *TxPool) addLocked(mt *metaTx, announcements *types.Announcements) Disca if bytes.Equal(found.Tx.IDHash[:], mt.Tx.IDHash[:]) { return NotSet } + log.Info(fmt.Sprintf("Transaction %s was attempted to be replaced.", hex.EncodeToString(mt.Tx.IDHash[:]))) return NotReplaced } + // Log nonce issue + log.Info("Transaction is to be replaced", + "account", p.senders.senderID2Addr[mt.Tx.SenderID], + "oldTxHash", hex.EncodeToString(found.Tx.IDHash[:]), + "newTxHash", hex.EncodeToString(mt.Tx.IDHash[:]), + "nonce", mt.Tx.Nonce, + ) + switch found.currentSubPool { case PendingSubPool: p.pending.Remove(found) @@ -1445,6 +1457,8 @@ func MainLoop(ctx context.Context, db kv.RwDB, coreDB kv.RoDB, p *TxPool, newTxs defer commitEvery.Stop() logEvery := time.NewTicker(p.cfg.LogEvery) defer logEvery.Stop() + purgeEvery := time.NewTicker(p.cfg.PurgeEvery) + defer purgeEvery.Stop() for { select { @@ -1574,6 +1588,8 @@ func MainLoop(ctx context.Context, db kv.RwDB, coreDB kv.RoDB, p *TxPool, newTxs types, sizes, hashes = p.AppendAllAnnouncements(types, sizes, hashes[:0]) go send.PropagatePooledTxsToPeersList(newPeers, types, sizes, hashes) propagateToNewPeerTimer.UpdateDuration(t) + case <-purgeEvery.C: + p.purge() } } } @@ -1889,6 +1905,57 @@ func (p *TxPool) deprecatedForEach(_ context.Context, f func(rlp []byte, sender }) } +func (p *TxPool) purge() { + p.lock.Lock() + defer p.lock.Unlock() + + // go through all transactions and remove the ones that have a timestamp older than the purge time in config + cutOff := uint64(time.Now().Add(-p.cfg.PurgeDistance).Unix()) + log.Debug("[txpool] purging", "cutOff", cutOff) + + toDelete := make([]*metaTx, 0) + + p.all.ascendAll(func(mt *metaTx) bool { + // don't purge from pending + if mt.currentSubPool == PendingSubPool { + return true + } + if mt.created < cutOff { + toDelete = append(toDelete, mt) + } + return true + }) + + for _, mt := range toDelete { + switch mt.currentSubPool { + case PendingSubPool: + p.pending.Remove(mt) + case BaseFeeSubPool: + p.baseFee.Remove(mt) + case QueuedSubPool: + p.queued.Remove(mt) + default: + //already removed + } + + p.discardLocked(mt, Expired) + + // do not hold on to the discard reason as we're purging it completely from the pool and an end user + // may wish to resubmit it and we should allow this + p.discardReasonsLRU.Remove(string(mt.Tx.IDHash[:])) + + // get the address of the sender + addr := common.Address{} + if checkAddr, ok := p.senders.senderID2Addr[mt.Tx.SenderID]; ok { + addr = checkAddr + } + log.Debug("[txpool] purge", + "sender", addr, + "hash", hex.EncodeToString(mt.Tx.IDHash[:]), + "ts", mt.created) + } +} + // CalcIntrinsicGas computes the 'intrinsic gas' for a message with the given data. func CalcIntrinsicGas(dataLen, dataNonZeroLen uint64, accessList types.AccessList, isContractCreation, isHomestead, isEIP2028, isShanghai bool) (uint64, DiscardReason) { // Set the starting gas for the raw transaction diff --git a/zk/txpool/pool_zk.go b/zk/txpool/pool_zk.go index d0ab2e6d84a..8a425cae5e2 100644 --- a/zk/txpool/pool_zk.go +++ b/zk/txpool/pool_zk.go @@ -206,12 +206,14 @@ func (p *TxPool) best(n uint16, txs *types.TxsRlp, tx kv.Tx, onTopOf, availableG defer p.lock.Unlock() if p.isDeniedYieldingTransactions() { + log.Trace("Denied yielding transactions, cannot proceed") return false, 0, nil } // First wait for the corresponding block to arrive if p.lastSeenBlock.Load() < onTopOf { - return false, 0, nil // Too early + log.Trace("Block not yet arrived, too early to process", "lastSeenBlock", p.lastSeenBlock.Load(), "requiredBlock", onTopOf) + return false, 0, nil } isShanghai := p.isShanghai() @@ -232,8 +234,10 @@ func (p *TxPool) best(n uint16, txs *types.TxsRlp, tx kv.Tx, onTopOf, availableG } mt := best.ms[i] + log.Trace("Processing transaction", "txID", mt.Tx.IDHash) if toSkip.Contains(mt.Tx.IDHash) { + log.Trace("Skipping transaction, already in toSkip", "txID", mt.Tx.IDHash) continue } @@ -241,26 +245,31 @@ func (p *TxPool) best(n uint16, txs *types.TxsRlp, tx kv.Tx, onTopOf, availableG // remove ldn txs when not in london toRemove = append(toRemove, mt) toSkip.Add(mt.Tx.IDHash) + log.Trace("Removing London transaction in non-London environment", "txID", mt.Tx.IDHash) continue } if mt.Tx.Gas > transactionGasLimit { // Skip transactions with very large gas limit, these shouldn't enter the pool at all log.Debug("found a transaction in the pending pool with too high gas for tx - clear the tx pool") + log.Trace("Skipping transaction with too high gas", "txID", mt.Tx.IDHash, "gas", mt.Tx.Gas) continue } rlpTx, sender, isLocal, err := p.getRlpLocked(tx, mt.Tx.IDHash[:]) if err != nil { + log.Trace("Error getting RLP of transaction", "txID", mt.Tx.IDHash, "error", err) return false, count, err } if len(rlpTx) == 0 { toRemove = append(toRemove, mt) + log.Trace("Removing transaction with empty RLP", "txID", mt.Tx.IDHash) continue } // Skip transactions that require more blob gas than is available blobCount := uint64(len(mt.Tx.BlobHashes)) if blobCount*fixedgas.BlobGasPerBlob > availableBlobGas { + log.Trace("Skipping transaction due to insufficient blob gas", "txID", mt.Tx.IDHash, "requiredBlobGas", blobCount*fixedgas.BlobGasPerBlob, "availableBlobGas", availableBlobGas) continue } availableBlobGas -= blobCount * fixedgas.BlobGasPerBlob @@ -271,6 +280,7 @@ func (p *TxPool) best(n uint16, txs *types.TxsRlp, tx kv.Tx, onTopOf, availableG intrinsicGas, _ := CalcIntrinsicGas(uint64(mt.Tx.DataLen), uint64(mt.Tx.DataNonZeroLen), nil, mt.Tx.Creation, true, true, isShanghai) if intrinsicGas > availableGas { // we might find another TX with a low enough intrinsic gas to include so carry on + log.Trace("Skipping transaction due to insufficient gas", "txID", mt.Tx.IDHash, "intrinsicGas", intrinsicGas, "availableGas", availableGas) continue } @@ -278,6 +288,7 @@ func (p *TxPool) best(n uint16, txs *types.TxsRlp, tx kv.Tx, onTopOf, availableG availableGas -= intrinsicGas } + log.Trace("Including transaction", "txID", mt.Tx.IDHash) txs.Txs[count] = rlpTx txs.TxIds[count] = mt.Tx.IDHash copy(txs.Senders.At(count), sender.Bytes()) @@ -290,6 +301,7 @@ func (p *TxPool) best(n uint16, txs *types.TxsRlp, tx kv.Tx, onTopOf, availableG if len(toRemove) > 0 { for _, mt := range toRemove { p.pending.Remove(mt) + log.Trace("Removed transaction from pending pool", "txID", mt.Tx.IDHash) } } return true, count, nil @@ -319,6 +331,57 @@ func (p *TxPool) MarkForDiscardFromPendingBest(txHash common.Hash) { } } +func (p *TxPool) RemoveMinedTransactions(ctx context.Context, tx kv.Tx, blockGasLimit uint64, ids []common.Hash) error { + cache := p.cache() + + p.lock.Lock() + defer p.lock.Unlock() + + toDelete := make([]*metaTx, 0) + + p.all.ascendAll(func(mt *metaTx) bool { + for _, id := range ids { + if bytes.Equal(mt.Tx.IDHash[:], id[:]) { + toDelete = append(toDelete, mt) + switch mt.currentSubPool { + case PendingSubPool: + p.pending.Remove(mt) + case BaseFeeSubPool: + p.baseFee.Remove(mt) + case QueuedSubPool: + p.queued.Remove(mt) + default: + //already removed + } + } + } + return true + }) + + sendersWithChangedState := make(map[uint64]struct{}) + for _, mt := range toDelete { + p.discardLocked(mt, Mined) + sendersWithChangedState[mt.Tx.SenderID] = struct{}{} + } + + baseFee := p.pendingBaseFee.Load() + + cacheView, err := cache.View(ctx, tx) + if err != nil { + return err + } + for senderID := range sendersWithChangedState { + nonce, balance, err := p.senders.info(cacheView, senderID) + if err != nil { + return err + } + p.onSenderStateChange(senderID, nonce, balance, p.all, + baseFee, blockGasLimit, p.pending, p.baseFee, p.queued, p.discardLocked) + + } + return nil +} + // discards the transactions that are in overflowZkCoutners from pending // executes the discard function on them // deletes the tx from the sendersWithChangedState map diff --git a/zk/witness/witness.go b/zk/witness/witness.go index 5ae7ac04bcf..66346367db4 100644 --- a/zk/witness/witness.go +++ b/zk/witness/witness.go @@ -1,15 +1,15 @@ package witness import ( - "bytes" "context" "errors" "fmt" "math/big" "time" + "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon-lib/chain" - libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/common/datadir" "github.com/ledgerwatch/erigon-lib/kv" libstate "github.com/ledgerwatch/erigon-lib/state" @@ -23,20 +23,14 @@ import ( "github.com/ledgerwatch/erigon/eth/ethconfig" "github.com/ledgerwatch/erigon/eth/stagedsync" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" - db2 "github.com/ledgerwatch/erigon/smt/pkg/db" - "github.com/ledgerwatch/erigon/smt/pkg/smt" "github.com/ledgerwatch/erigon/turbo/services" "github.com/ledgerwatch/erigon/turbo/trie" - dstypes "github.com/ledgerwatch/erigon/zk/datastream/types" "github.com/ledgerwatch/erigon/zk/hermez_db" "github.com/ledgerwatch/erigon/zk/l1_data" - zkStages "github.com/ledgerwatch/erigon/zk/stages" zkUtils "github.com/ledgerwatch/erigon/zk/utils" "github.com/ledgerwatch/log/v3" "github.com/ledgerwatch/erigon-lib/kv/membatchwithdb" - "github.com/holiman/uint256" - "math" ) var ( @@ -54,7 +48,7 @@ type Generator struct { chainCfg *chain.Config zkConfig *ethconfig.Zk engine consensus.EngineReader - forcedContracts []libcommon.Address + forcedContracts []common.Address } func NewGenerator( @@ -65,7 +59,7 @@ func NewGenerator( chainCfg *chain.Config, zkConfig *ethconfig.Zk, engine consensus.EngineReader, - forcedContracs []libcommon.Address, + forcedContracs []common.Address, ) *Generator { return &Generator{ dirs: dirs, @@ -79,80 +73,55 @@ func NewGenerator( } } -func (g *Generator) GetWitnessByBatch(tx kv.Tx, ctx context.Context, batchNum uint64, debug, witnessFull bool) (witness []byte, err error) { - t := zkUtils.StartTimer("witness", "getwitnessbybatch") +func (g *Generator) GetWitnessByBadBatch(tx kv.Tx, ctx context.Context, batchNum uint64, debug, witnessFull bool) (witness []byte, err error) { + t := zkUtils.StartTimer("witness", "getwitnessbybadbatch") defer t.LogTimer() reader := hermez_db.NewHermezDbReader(tx) - badBatch, err := reader.GetInvalidBatch(batchNum) + // we need the header of the block prior to this batch to build up the blocks + previousHeight, _, err := reader.GetHighestBlockInBatch(batchNum - 1) if err != nil { return nil, err } - if badBatch { - // we need the header of the block prior to this batch to build up the blocks - previousHeight, _, err := reader.GetHighestBlockInBatch(batchNum - 1) - if err != nil { - return nil, err - } - previousHeader := rawdb.ReadHeaderByNumber(tx, previousHeight) - if previousHeader == nil { - return nil, fmt.Errorf("failed to get header for block %d", previousHeight) - } + previousHeader := rawdb.ReadHeaderByNumber(tx, previousHeight) + if previousHeader == nil { + return nil, fmt.Errorf("failed to get header for block %d", previousHeight) + } - // 1. get l1 batch data for the bad batch - fork, err := reader.GetForkId(batchNum) - if err != nil { - return nil, err - } + // 1. get l1 batch data for the bad batch + fork, err := reader.GetForkId(batchNum) + if err != nil { + return nil, err + } - decoded, err := l1_data.BreakDownL1DataByBatch(batchNum, fork, reader) - if err != nil { - return nil, err - } + decoded, err := l1_data.BreakDownL1DataByBatch(batchNum, fork, reader) + if err != nil { + return nil, err + } - nextNum := previousHeader.Number.Uint64() - parentHash := previousHeader.Hash() - timestamp := previousHeader.Time - blocks := make([]*eritypes.Block, len(decoded.DecodedData)) - for i, d := range decoded.DecodedData { - timestamp += uint64(d.DeltaTimestamp) - nextNum++ - newHeader := &eritypes.Header{ - ParentHash: parentHash, - Coinbase: decoded.Coinbase, - Difficulty: new(big.Int).SetUint64(0), - Number: new(big.Int).SetUint64(nextNum), - GasLimit: zkUtils.GetBlockGasLimitForFork(fork), - Time: timestamp, - } - - parentHash = newHeader.Hash() - transactions := d.Transactions - block := eritypes.NewBlock(newHeader, transactions, nil, nil, nil) - blocks[i] = block + nextNum := previousHeader.Number.Uint64() + parentHash := previousHeader.Hash() + timestamp := previousHeader.Time + blocks := make([]*eritypes.Block, len(decoded.DecodedData)) + for i, d := range decoded.DecodedData { + timestamp += uint64(d.DeltaTimestamp) + nextNum++ + newHeader := &eritypes.Header{ + ParentHash: parentHash, + Coinbase: decoded.Coinbase, + Difficulty: new(big.Int).SetUint64(0), + Number: new(big.Int).SetUint64(nextNum), + GasLimit: zkUtils.GetBlockGasLimitForFork(fork), + Time: timestamp, } - return g.generateWitness(tx, ctx, batchNum, blocks, debug, witnessFull) - } else { - blockNumbers, err := reader.GetL2BlockNosByBatch(batchNum) - if err != nil { - return nil, err - } - if len(blockNumbers) == 0 { - return nil, fmt.Errorf("no blocks found for batch %d", batchNum) - } - blocks := make([]*eritypes.Block, len(blockNumbers)) - idx := 0 - for _, blockNum := range blockNumbers { - block, err := rawdb.ReadBlockByNumber(tx, blockNum) - if err != nil { - return nil, err - } - blocks[idx] = block - idx++ - } - return g.generateWitness(tx, ctx, batchNum, blocks, debug, witnessFull) + parentHash = newHeader.Hash() + transactions := d.Transactions + block := eritypes.NewBlock(newHeader, transactions, nil, nil, nil) + blocks[i] = block } + + return g.generateWitness(tx, ctx, batchNum, blocks, debug, witnessFull) } func (g *Generator) GetWitnessByBlockRange(tx kv.Tx, ctx context.Context, startBlock, endBlock uint64, debug, witnessFull bool) ([]byte, error) { @@ -164,9 +133,10 @@ func (g *Generator) GetWitnessByBlockRange(tx kv.Tx, ctx context.Context, startB } if endBlock == 0 { witness := trie.NewWitness([]trie.WitnessOperator{}) - return getWitnessBytes(witness, debug) + return GetWitnessBytes(witness, debug) } hermezDb := hermez_db.NewHermezDbReader(tx) + idx := 0 blocks := make([]*eritypes.Block, endBlock-startBlock+1) var firstBatch uint64 = 0 @@ -214,9 +184,9 @@ func (g *Generator) generateWitness(tx kv.Tx, ctx context.Context, batchNum uint return nil, fmt.Errorf("block number is in the future latest=%d requested=%d", latestBlock, endBlock) } - batch := membatchwithdb.NewMemoryBatchWithSize(tx, g.dirs.Tmp, g.zkConfig.WitnessMemdbSize) - defer batch.Rollback() - if err = zkUtils.PopulateMemoryMutationTables(batch); err != nil { + rwtx := membatchwithdb.NewMemoryBatchWithSize(tx, g.dirs.Tmp, g.zkConfig.WitnessMemdbSize) + defer rwtx.Rollback() + if err = zkUtils.PopulateMemoryMutationTables(rwtx); err != nil { return nil, err } @@ -230,21 +200,11 @@ func (g *Generator) generateWitness(tx kv.Tx, ctx context.Context, batchNum uint return nil, fmt.Errorf("requested block is too old, block must be within %d blocks of the head block number (currently %d)", maxGetProofRewindBlockCount, latestBlock) } - unwindState := &stagedsync.UnwindState{UnwindPoint: startBlock - 1} - stageState := &stagedsync.StageState{BlockNumber: latestBlock} - - hashStageCfg := stagedsync.StageHashStateCfg(nil, g.dirs, g.historyV3, g.agg) - if err := stagedsync.UnwindHashStateStage(unwindState, stageState, batch, hashStageCfg, ctx, log.New(), true); err != nil { - return nil, fmt.Errorf("unwind hash state: %w", err) + if err := UnwindForWitness(ctx, rwtx, startBlock, latestBlock, g.dirs, g.historyV3, g.agg); err != nil { + return nil, fmt.Errorf("UnwindForWitness: %w", err) } - interHashStageCfg := zkStages.StageZkInterHashesCfg(nil, true, true, false, g.dirs.Tmp, g.blockReader, nil, g.historyV3, g.agg, nil) - - if err = zkStages.UnwindZkIntermediateHashesStage(unwindState, stageState, batch, interHashStageCfg, ctx, true); err != nil { - return nil, fmt.Errorf("unwind intermediate hashes: %w", err) - } - - tx = batch + tx = rwtx } prevHeader, err := g.blockReader.HeaderByNumber(ctx, tx, startBlock-1) @@ -255,9 +215,9 @@ func (g *Generator) generateWitness(tx kv.Tx, ctx context.Context, batchNum uint tds := state.NewTrieDbState(prevHeader.Root, tx, startBlock-1, nil) tds.SetResolveReads(true) tds.StartNewBuffer() - trieStateWriter := tds.TrieStateWriter() + trieStateWriter := tds.NewTrieStateWriter() - getHeader := func(hash libcommon.Hash, number uint64) *eritypes.Header { + getHeader := func(hash common.Hash, number uint64) *eritypes.Header { h, e := g.blockReader.Header(ctx, tx, hash, number) if e != nil { log.Error("getHeader error", "number", number, "hash", hash, "err", e) @@ -278,48 +238,8 @@ func (g *Generator) generateWitness(tx kv.Tx, ctx context.Context, batchNum uint hermezDb := hermez_db.NewHermezDbReader(tx) - //[zkevm] get batches between last block and this one - // plus this blocks ger - lastBatchInserted, err := hermezDb.GetBatchNoByL2Block(blockNum - 1) - if err != nil { - return nil, fmt.Errorf("failed to get batch for block %d: %v", blockNum-1, err) - } - - currentBatch, err := hermezDb.GetBatchNoByL2Block(blockNum) - if err != nil { - return nil, fmt.Errorf("failed to get batch for block %d: %v", blockNum, err) - } - - gersInBetween, err := hermezDb.GetBatchGlobalExitRoots(lastBatchInserted, currentBatch) - if err != nil { - return nil, err - } - - var globalExitRoots []dstypes.GerUpdate - - if gersInBetween != nil { - globalExitRoots = append(globalExitRoots, *gersInBetween...) - } - - blockGer, err := hermezDb.GetBlockGlobalExitRoot(blockNum) - if err != nil { - return nil, err - } - emptyHash := libcommon.Hash{} - - if blockGer != emptyHash { - blockGerUpdate := dstypes.GerUpdate{ - GlobalExitRoot: blockGer, - Timestamp: block.Header().Time, - } - globalExitRoots = append(globalExitRoots, blockGerUpdate) - } - - for _, ger := range globalExitRoots { - // [zkevm] - add GER if there is one for this batch - if err := zkUtils.WriteGlobalExitRoot(tds, trieStateWriter, ger.GlobalExitRoot, ger.Timestamp); err != nil { - return nil, err - } + if err := PrepareGersForWitness(block, hermezDb, tds, trieStateWriter); err != nil { + return nil, fmt.Errorf("PrepareGersForWitness: %w", err) } engine, ok := g.engine.(consensus.Engine) @@ -328,60 +248,24 @@ func (g *Generator) generateWitness(tx kv.Tx, ctx context.Context, batchNum uint return nil, fmt.Errorf("engine is not consensus.Engine") } - vmConfig := vm.Config{} - getHashFn := core.GetHashFn(block.Header(), getHeader) chainReader := stagedsync.NewChainReaderImpl(g.chainCfg, tx, nil, log.New()) - _, err = core.ExecuteBlockEphemerallyZk(g.chainCfg, &vmConfig, getHashFn, engine, block, tds, trieStateWriter, chainReader, nil, hermezDb, &prevStateRoot) - if err != nil { - return nil, err + vmConfig := vm.Config{} + if _, err = core.ExecuteBlockEphemerallyZk(g.chainCfg, &vmConfig, getHashFn, engine, block, tds, trieStateWriter, chainReader, nil, hermezDb, &prevStateRoot); err != nil { + return nil, fmt.Errorf("ExecuteBlockEphemerallyZk: %w", err) } prevStateRoot = block.Root() } - inclusion := make(map[libcommon.Address][]libcommon.Hash) - for _, contract := range g.forcedContracts { - err = reader.ForEachStorage(contract, libcommon.Hash{}, func(key, secKey libcommon.Hash, value uint256.Int) bool { - inclusion[contract] = append(inclusion[contract], key) - return false - }, math.MaxInt64) - if err != nil { - return nil, err - } - } - - var rl trie.RetainDecider - // if full is true, we will send all the nodes to the witness - rl = &trie.AlwaysTrueRetainDecider{} - - if !witnessFull { - rl, err = tds.ResolveSMTRetainList(inclusion) - if err != nil { - return nil, err - } - } - - eridb := db2.NewEriDb(batch) - smtTrie := smt.NewSMT(eridb, false) - - witness, err := smt.BuildWitness(smtTrie, rl, ctx) + witness, err := BuildWitnessFromTrieDbState(ctx, rwtx, tds, reader, g.forcedContracts, witnessFull) if err != nil { - return nil, fmt.Errorf("build witness: %v", err) + return nil, fmt.Errorf("BuildWitnessFromTrieDbState: %w", err) } - return getWitnessBytes(witness, debug) -} - -func getWitnessBytes(witness *trie.Witness, debug bool) ([]byte, error) { - var buf bytes.Buffer - _, err := witness.WriteInto(&buf, debug) - if err != nil { - return nil, err - } - return buf.Bytes(), nil + return GetWitnessBytes(witness, debug) } func (g *Generator) generateMockWitness(batchNum uint64, blocks []*eritypes.Block, debug bool) ([]byte, error) { diff --git a/zk/witness/witness_merge_test_data.go b/zk/witness/witness_merge_test_data.go new file mode 100644 index 00000000000..1bfe7b9cd14 --- /dev/null +++ b/zk/witness/witness_merge_test_data.go @@ -0,0 +1,8 @@ +package witness + +var ( + witness1 = "01020302030203020302030203034b4c181607792b3c46ea253af79666ab9bbfa3d29e8855be6c4e045b3424f6a503fdb52981685167cdab219ae57b3c5869e539e89eb29845d6406b3229247e982e020302030203020302030203020303dc378377acad40e16af2de6482d7a60c1e5f087d067fc716c2485742ac2e29330339535728bf0c5d72ec789110ff3691dfb9cf434399ad849a86ca6725977d3e4f0203020303481a1fc812bcc98ce37225fff9f28a6d8d0ea5c63aeda93b031e8e4603cc8e7c032952530fef71561f9028c37b944df439c0d2968c4f7e247a2ad12dd4969ffc8302030203031ce6733d3a496a34cb114cad924070b0dfad8ff6891f629ed2ae31326540fe120345057d6cbecce08aeecc475c91403549f4fe82bdb953895bdeded2fae6f8688a020302030203020303c4ac3ac799860160a30a3304b765c2c90bc414edc3739a5d098bb7e18009548a039042e98ef239f418f2bf7ad10868e1fa7d0f644458488adf684313dc3f683a5202030203020303949f805ade2be05694c8011fa17fab3646a43f38f96d868386f0ba9558ba5f960302aabd9fbeceb9711f46d634513830181412c8405aea579f470a19b477d090140203020303db978a462b93b2efa3aa3da09e03370b570db692c6d361d52ae1051bdb26a3a903916d67432c505e1dc33f3617e0743d761aba44785726309191e79cb18b666e7402030203033edca13bcadc1db9305f3b15322cc6d774682fffdfe2b509f81d00b16ce2dcd003dc94780e238944094e7856154e6d3e54fec28293a9a70eaf1cc2a81e874e22170203020302010203070354000000000000000000000000000000005ca1ab1e5820e72de8a1b9696dd30f7886b15c4cc9234d52c6b41b9c33e2baaf8d88fc5b7c9f5820f8fb80310ac041e7a5e79c138d7261cda5d8a988dc9268b5a8dc5318fb610a90070354000000000000000000000000000000005ca1ab1e5820000000000000000000000000000000000000000000000000000000000000000358206e82d18bde430935057c321f6c30812e0eae2122da6af753e25974c92f0d7b50020303c6cbb686c9d7a94f49dfbee076ae1b87f1c9bb5f33b7c98a71816c33b4731a3b037514c2021b2bb805e2a6060b265dd53c069a4587e19cd7d1af99d0e9c3d0e550020303784570292bffbc3ee00153e5806a317459fed4c1d84de0515dcefc177404865003f08c92f6786e67148e8fb2bcd4eb813a665e16a270b475605d1d84b587450ff102030344c5e2a1775873020ab4e5a588e95d6702878cd46012682196dc39738fd8780703a6d5ee17fe3be7e20050e4e66c54b5188febbdd3615f832c35b073078258b214020303a42b38dcef18f890c02cdb90473211c95582727b83af287cbfc8a3f10e29649103380623684a9b3b341e01ee65908a6aac96fdf1444ca255b9dd5193537d58709b020303476c478891a8f8d905ebf9e5c031ba1020ca1436538bf9b97c6eaa1b9512da97038c77314895fccd4edafbfd73b531f4dec6f4671b6acde83926907ab376982f310203036416706411fa678c78f77dbfb609d65f63d6b04a8aae3fae4cad23419f6e738b03b6ec59ff099f23c5a528e805fbd9457736b100ea0e96390eb536046b88da3db102030334468b79fd36c8bc812c6613d176983aa4be53642e7e56421faa4ef25031fc73032869ca46586018725007aac483055d85131fcc4432c9a72175a8c6263b65c1ed020303676f9f98ef2cdc44ec8d98d0153be2aeb90b08386286887c94567950df1216440385bdebccb7559d68f55e26ba0980bcf7120609c7bb43cfc1f701e92f670ac1280203031117969a5ad58cb9a441ddd498cf3bebc13ab5aea1ceb29ddb1a226c5343c6e703425597c542fab13f686a7053f6c1e2635a729f8d9da4c01d763ffe9965ddd63402030345f2b9e446c9e743f6899409a4567a9b7f8770f711d39e39773d8173c4ea3a0c03cbc17bc3c54426fc8cf2b13b1ddb800509579856ce251beae01d924a92a8edb8020302030203030560b956a67a313d6d8939eed4cd80cc385eb49f7b6dd269ccde33a145f1216e037b3e0569695b777df45db97a41b025b57c680ad61231b61225fc7825824c4c0502030203033f1ce9dde58980c5bc34a88467f3b8cfd334dab19f28050acc53f33aab0b366f036092ba2243e1d8e20c2aa4ba0aee9ca063e8e8e6da493269065c227232020a590203020303921f9061d1b4082d20b7f9df21566609ca6dc64cd0ffac2625e9ff3090ac73570371757934d2c7d4dfe9b7b1e5c71fe18b66cf56c540c3d04310873976f79ef9f602030203020303e8e045e00ad70f879f31e498fe49aa2fd4c81849b6c55dd98391681351aac7df036e509788bd99ed4034d5fa3901bbda4cb6f94d709b2d54eca545336569791f36020302030310125b6177d5086fcdca8e0607e49e6cb21bebc95404329c9769a7d3ed59e2c4034e9acfa4214d459d1c81a304d862b2dbd4d832e71ab851656bfcc0e9c5b3e6f60203020303ad656bdacec77a2e7e591bddde7b2c7ab9b928945ee65898ff35900e25f0c21f03e47d9766945c4649bd44422f5fa779e92267d76ce44f396ef0b672215e43ce7802030203020303d3858acf0781afe0adae49a25d1724b36c9989179cc884b9a9c6481f89e57706031da6fb50879ede58d816d1624292f0c60a8133bcbc671dd92d0f9cb8d50fc803020302030317221db9e049aebabc83cefc3ebe7040ec1e82022d104d2c78f796753f76f0120352124f3ffee53f7e0f9a0068d5c0b0abfca5aaa148371c91e2e81df5fba6f8bf0203020303e00ce2232f3e208dcf050f887d02c7b91170c4b98e1d098ec5238bb3d387a41e038a0379dab30ce84865bb4f0834a9c3fd7bb2da17994abf03e89fd8d754bf7aab0203020302030333f5853903cb36caedffa12d0aa0a01c39e91309629a97dddafa8da4f738fb3e038e1dc6012aecd5998053b5878c6e3a398c8a286c7ad4dc0b55f043e4b4210950020302030317e041d91830fe8051fc73631cfd56519fc5bb88b5369298da9807b40d93733703dc7745bf8dfa058bcf1328f79efc9441cf5ad5fb5763c75bdebf9492f88a6c8302030203020303bf9a2780e63ac26ffca6df07f182db72e3e65802b277ea6d40057c383d5a37e3036c1548eb1c956ece0a876fff4463cc3f2e9c3b6eef88379f07e6d71013eb4aad020302030202020103c2eebac9e260dad1f051fa75846558dcc0ed120d1a7699fd1d6b739a3419015b020103b9d64db96b69b035a74a90069691afa2b34a705f3ad3aa82f3757c7a6f2a9f17070354000000000000000000000000000000005ca1ab1e58207af492bfc857210d76ff8398a35a942af892e866b1f4c241746b3ee89ca002595820f889596e5b3d4dbe5bcab5cde2af26d3ad8d88bc086e8b4f929885f33f6eec77020303cd3edcf79c7e0e0d6b87ae523729611faeda92e77bbb59f739e9a6127d890cdf03009a5c01c3cb27d2f1ffbd3fd77ff38f66991f64174f5df1411ea21ae2e22f250203035334694d8b56a2f5e0d0ded81283e7f36b3da6fbf2e2d1b469c008d7414296be03953388b0cacc587c5ca452ba8e96a9071958ef94439690bc14866f556a35ebc1020303ac2aae05bc7a68d238e9a9bbd2d5d07a001f8f3651bb25f5a6d6dcbb155569090335b6f55bf3d56419cbc3a45d4fa6bed330d9e0391f8806c97a7aa4149d06725b0203033dfe4a2f0555ff318ad12e49515e712f339134af0237edaef08553d9d67e260b039cd50a46feb34ab47c24391a2579e601956897ad6299bd14a4c8d9628a37d46e02030348f01fedf98979a5fb3df07daded956331fa6a02f697dfe29dd26e71111de5540387829f9a96ed82303e86550747e311d5dbfe94cc71113600595360abb512cb7b02030203020302030203020303eac48d9dbf7d162797293e0acd54382d4fd53e80e29c9c43c51dafb05c0880060306b13c75c66a6e267236b6579bcca576ff889e323ac6ffd0ee317e07623a3866020302030203020302030352e23af8570aeca858a6aa5bd20d2c63a92eb08529a9e6e5fb245aa72c5b72ce0334d7cfef6cb28d62f63cf907e3273d76a8bb858423c6ef446b056fb4f06210e002030203020302030315bf4cd3a7f33296bb4778e216bd18adacf25c97f8f4df9e1052dcba7b6edf2203b3637d0b1cf58c5272f28f8354a764d3cd48ff7c04f807237da8f4a1e2ef5db5020302030203020302030203020303c018dfe606efd5d17f3d45e91d33d3d7ef57d92a1d509291b1556bbb7e78dd0803b08ff5bb304aa8741af608415c541440edcd98bbc0fc849fe2a89c2c783341d502030203031e0eb0ac5664b1d0267d3c51dd66d1828c0d01a0903d599a317e4578926d4e3503330e2ccc546a3db52e796943aa8960d6d483a3b941ae0caa21cc7b9f7a3c2bbc070354a40d5f56745a118d0906a34e69aec8c0db1cb8fa582000000000000000000000000000000000000000000000000000000000000000015820421c2cc0dce9b0fbdb85cbe43bd6c2a1af5a6f5da756cdb8b6f2bb948e3a90da020303ac874a6acbf6de628134cd74ad9f336206e7aadb1ef09456b6267a770612485703ef323528b761720ce04927a54b81025a935f070420d655217e40eb2e084bd170020303a48199a63a429cf44fb39fdbb73098a49dc171e03d32800f483780adb9aa06580388796e8ab2076fc77f00a5096317ceff8a54da310b014a0310504bcd76f8b8da020303f8698a6f24140e0e37f49032fb2da6db2c8bcaea8961a6e1976baded0d9a8bd80371b277886f0d14b6f82cfd063ecddab10fb5da5e0666e040992469d09a6bc8b0020303dee2b54587eeb7db2fe9ef0dc262b6ae679a5bfff89c8f403d181a1d79107b1d032aff27f522ef5fd88213c3865e01c7b4c1720d56778d1bd0e48e6a86fb3b07970203037d0d29240ad72800831a91d8e54019da745c6c6a630a625167723ace857bbb810305edd49a8cfb1eea157734968e95e8b8620c474c3cfc6f3285d3dad36893114302030349b1bd34664838889a2133d716143cb8707a15745738917bfbbeecbe871e6e90035ba74ef0008ce80ac6d199cc4d217aaa9b8a5fd58f2d329aba4e061c16d99b620203030d600bfcd6581d405aaed26aa7cee976fbb2bb9c1c1390bd3eb14cf5f661022303acf3369b84f876dc556ed93718d616864020b1969d24170f4970ddbd944e1bd9020303d5bb847f016f33d8cac460756aad70173d8d6e37c37d1b69e1b1b45c52e5996103c3777105bd00820b49c89486e7589f0ccd0244ab6fd4b1409ba86dece7506f9102030356b2929dbde358b52b652bc842c7a42aea162f0d79bd7d653b5cfee34e9f0e6c03656a686adb3bff7a9d8841d3e296b0dc61c389b399677222ebbd70cf0c19e70a020303e5bf4a0779ccfa6d42a01e532bb6120b168699bfd3f4f44a62780481d5f86588036efb82ef530fb604bdff43bf1ad1a7dde41522bf8a7f5e724dd3074562b0c0ef020303036a50ac7a6e425842820d2a4e07a80f416706903e9d88b5824559515a901aa80303e3c58c1dfb4f5a5a5d1180dd010ceb33a42a0ff7cab200ced5562202261aa0020302030385d60697f5b4482fcbecfaf5f53111681c9b48ed7bbd2cdb1a257bb7f26db9d103ae21f016eadf6448b913ba498fe3d678b8bcdf9569b026053de69bd24563ef0202030203032fd50f1a5b8eddbd5ccb90e37d9c190092927af9b26a1cf8b4576d7982476fb603436882f441f09768b000722da7ec7c74b6f0252c24e16b9e6461ce4f4eeb791d02030203034eb00b994d3a8d439f47981f68baf7fb0f0e88e2167243c6b005de3c48b5c3ec03ac5626fd3f4030d088d0f41834de11510b59739353238241138d70bd8e05c22e02030203030a51165872abbe7260a6777cbbd2f6d81dfcd07c1b7c0783659bf8e8ca8e77b9032f78c81c54fd31d1a25214fa464424ae6e6399f15c1bd8987825f1f0d0dfccde020302030203020303215472473dede3eebcfdd93b1ee898e4d6cf33261a1fba12ff77dff2fb8a0f27037938ac733af661730414e88da9633c04a8914c9ae4263a4f8cea7066e6cefb840203020302030203034d6713bc006056011f31ac6f935e71e33ab8045353e9e138ec9743e8574a8d2f03fcaee2f22e1561702d029c465b755ff5491e4114264dfdf16fe9efd34864a83802030203037cc278f9b41fd17fb5eb3c839a725fcd1ef6000189fcebcb4214303f45dcd2d60386c3bc64da300f1a87efa2eb2724553e41348057fc99d5c23b5b20e216fde46d020302030376bf2ddfddca9910df80bb0785add76937d1e90e029c02b04c0cf421622a232803ba2219bc37e93a89b0effdfc3601f58645c1cb7e818f2254c8fd16fea4ba84440203020303fdf1c4799edef5fe2960f9148627fff521e591247a224eb8d05eae3f51675b560372adafa8e298a14d0da0a71e645a12a23def78db8e81f7a68ef92aac7d5700b40203020303f5a794d38718b283b47993f3bbcd67c76f84c47fcf2d35373fcb7f8a0f43a06b03a14b12d1f03790ac75797e463a8a7edcfb2bc80b65a7dc8d1b15d00cefb315d5020302010312f8462573dc83d436d2498e68019babdcc911f482e1e00c1b3fda70e1a166e40203020303adcfa2154e38e2cdbafd5d56bdaa5dca90a5bfb9c36bfbe140bb31ec0e66716503b5aaf1a6fa2e80ad8f4e49c51808d2898fd74f539ec5de974b57c27466f5e7490203070354000000000000000000000000000000005ca1ab1e58205956a0b12f607189a063054545ab26ce76ea5eb4c9bc1e8d8161646c93ac66515820da6aba51eaf87e14a7585e52e23cc0b789c61b3e808d2aef704ae932bb2ab49d070354ee5a4826068c5326a7f06fd6c7cbf816f096846c5820701c251f0448beefca8b47dce2e42f136d224b8e89e4900d24681e46a70e7448510237426c03237429400000000067445fb8020303dd24d6adc0d7b321eb19905b22f1780707b0d7e30026716c3b0d7ea311cbfeab03e498dca26358e5fd56e5464288da82073a17cbbd112e322488c12bff1661b49b020303413600069144f3379227184b3d365f22778695ad2b812ffe56bdec80df882877033b1e22049401430c9208943101b5ef3e70d99c9853e08591c4729e0f31f4bf56020303ffce2337b88b26e7b0582d1679484fa995b68c0418d72f650531db342e25f12e03493c1bb3f993e9aa63e2736b9e0826f1309ed298bd95bfc169f89b6a62cbed420203031bacbf380b1eafbec9c534577f8972d28087bc6e94bc276ec91e66a11396f07903bb137addf6042ee1a1eb0170ac09f0a092b2f7682f718d5986152d56d192b347020303b89984a9ec10a5bc5835efef55fbf26f3477d21372a55ae4abd26c55ee5e323d035ab47c29775484efde5ad8cfb1a399e9008bcb66f6cd77f28c255980633aeb5d0203037902d8528b89dce0e6a41ff89888121f42520936f3684bdc8481094f1e046b4f03cedf898a501b7bc036d92797f971bf9caa1028994a8d6b15ceb79e4ca532e7cc02030203020303a366f69c8b19d47be34a2a6333298d705692f65daf3fba95d6f48b9676b6cd3b0351f190ff80b28f339034b6be161060cbe4837cf22e0c01b3d5a77b8f349c4f1d02030203038d8eae2b45a21838dbf9f517dae99ff0bac7a25d4756a7a3315c43cfa7dbfb9803785e2e17b8cdb9628ca4c2f963eb5722918462cf75f91dd6fd00ae84d17ba2a90203020302030312a3a949b95a27ae6f73e9d879bc9c9c6eb6757f1c20ee76d1f52e1d4c9ec4eb03d38f8911a661255b0ebcabbadd44e38903841386863c97499f3e57a06bc5c3e702030203020303763e3e4c8cc4a4b30afaaae229ff20ac282d74c923a88be140293d62b2d812bb03b4b4e3386c676de1012a2bdced3714094e57803a98920b0eefe63e186abdd4d902030203032ee550fc2b119e46e3338c971e6f44ea838020e442fce0c4a34b359306a00379038c72343f5e2ac968c7f1edfd71f18128db6b52aa476fbec372eaa58a2acf45220203020303221a1371f01a251478f2a6673db891a3c412d954dc9e741ea2bfd249abf428bf0325059126652b0c2c46d78a02eba6c4df473b674ed378b17827c634bd119f5422020302030203020303313abcaaf43f5d42589a57c6fc0bec04526b43a3dc139415af1de50f8846c004037ee72e1eb97ffd7dfe0c7d40b575103edd3e62c030b86362c41630c6e97bf6bf020302030203020303508d990b34daf5a3f925c435daac3d293f6d861094cc2d343a92c62428fa66da032f8b40a9211667e9c44328d6440091ecb3a46bc15832f7d7cdfa8ec130b527fc0203020303f993f7eae6e45a6f8557c6c5d0e912cb41b71d2bf37f38affc0b2d8e054193220315eeb3ab754628ce727cd0b7028ff8ed3291de7566b99066e127185d043f595702030203032f2c132f32f21e267ab64271e8f2c0c39fedbcc509c4589616cffec21d7332eb03839857347599c19c43a0acfe53e1bb5bbe0d68ddb49cee05f1b24c5acac24a150203020303dcd0869ad1107856680f6bf164623fc709d26d1a0842bb9c60a383f255c0ec2403c92cb1692742c4e2b6a91d13c3b371a9dccd29f898d8f6457ad052b1da9efcf6020302030203031408a1feb1c4cefd2e71c1d7ce58e6b4c2d139d48c67037d40dc0d60390af539039c51675ab13cc260ab6875b12824ed60903c1755add14024e27508ac0a3b9d81020102030376fdbe16ba7e2f048d9c311cb1f99291b4f624717ddd7e9f2aa653099d19314f032ebe85ea3fef7c7033338d1ed98e187eddf75dff4772a23e19392ce61690f77f020303f901f2ba5a7a95db9ea7106268f17f341206944377d1f006921211069cf8a0a103f43daf24401f9ed2d0691570a8ccdcd016c90b722786ff590276f7cd5933ff3d02030378ab72606d2d32782ceccc9c11af9496f599dec259281c01f0c18a3b875518ed0355b4984426bd4db31ca5d70798a18280a4d319786bd897a29365d2db7489b32d02030335706adc0febe81255c960be521ae4c7a6201b2db502fb7016a5d4d9ba36c58803ef3e9f16053b7f799f207451eb3403eb95301e9c9e721dfde0c41ebd8362485c0203033b59831b753c1ca3ed58d3293aab0099027f87ff97f3f7e92d9dfb095839497a03821fc506f41f2a0bcce20367ebd6ae4b461e110e1788d190416c8345ec72c364020303cf6d91b6b57705a8f02a367997e807f49dba00a5bb3e8d0de25eacad5486b88f03abc64c2300b90b30ae3b11fb71095675d1a62860a6471a1a2defcf624b8bb4d4020303be890b95c3a4c5c381f1a00d6d98da4cd8467e002746a8c52f2564e41319d3780394b620da3f2c277f0d4a70c7a54a7245503ed2e808bf722cce0b503e242ae7d10203039f6bac7e82bf632c8b003eed17f050a49d2ea83b6a93e09295b3b3c51c55ada6038d01937127f83a85e3e655363f467385226f7e406409528791f6e2375184ef5e02030203020303e2ba22bcf2fd6923a2ffd1ae073bcffad33e81f4a7cb9cab82e130c63a213b6e031dd2e6a82a0638b027a1f15eac2bceca26ef1519de70dc99bd5275791bab4bb0020302030203031d0be4b4d178c76d39a7689aaa3a9866e63b999a2d11dbec2f04787c714dabbe03e5880788e24aeb6314512538d4cf7382b37132d4d2870122f47de8ac0d09eb020203020303b9af076d8b0e683e730de94273fbcdb5d2ac9f29273a9ffb38875892722f439903e22b2cbffaa7b1ed370a3d8b87199e1f1485703145dd3de0945cede9629702600203020303a019468f5d28919dfcc2d7bfd844492f2ab1df6400a17627b31c29ea02d583f5038dd13cd4ecf8c4151cebaf6e2637913a2310a81d4ecbd5f5fd2f4a4c315558ac0203020303167bb488d1aff473f1027bdeadb8e0e7a439f6a589c78caae1a3d045e78da60303ddda65ddb3f7e0fe430faaeb49419075391fd2559659f2ba88d3655454e079e802030203020302030203020302030203037a46bc17ebfbc47f6d99661de00074c9958e0f7fd66df7c77c236b89b165472e034b58bfe7c7506d2891367c270ca350269dfc0a08b7466ec2496c6330dd602bb302030203039b58b0df7fae59a4cef25184d849214bc145cda115b2c0dfd85fd470ecdea70f0330923d4d299efbc138d4442519d30cd33a7827557231388b81a6ea9c65eabe6f0203020303af3fee608c2e8e5a30ffc6345d86ec1b2d55f10e518b4da5e8eb59abde07b59803c2016682405d3a953eba254601d5fc0b8966a33efaa51918a4a41b8e0acbeb4602030203034b1387aa6d0ab944e2ec65ce38c8643a0ddfca5c3059718f398dee501291569603528cbab25216c4397a402fcb572f0b512a773dfeafa59e401989a4da13406bfe02030203070354000000000000000000000000000000005ca1ab1e582054b6c4d9862a1658dedebe99a0f61d94c5d1515fd031d0dfe9ebce6a1454f5c658203f14693500ccd0260659fd9eaf69570edc0504867134ac88f871d91d388b63690203070354914e7547b9051ea6226c30495190a2efa15930c95820ffffffffffffffffffffffffffffffffffffffffffffffffffffffff74873927548382be7cc5c2cd8b14f44108444ced6745c5fecb02030311ab6695ec969171698c1f56d4c05373d8505a2c7299fb05cda1d4351e22bfa403478b94ae515fbd01728835b532c7c45ccc78d200d3d004da6917337e139eb729020303ffd14369e7c7f7aec3a890a20234885f2c9fb9802ec318d8434ebcd58a696153030ddae742090ea458c3f232dc894bd8cd3378b4b4590a0523e09a44e0439fe0db020303b1688d8c7806365d931579ccac8dbf7a8d7705ac393159dfd9c0395ab7b5ca5b036a6c978a565b15267de4330de7b6166014082043c5cc80370953767ac501ccf2020303f181e47adf88e965d55e1153d76b731c261ad7d7720823919fc11d98bc144d2a03c480f344ef22a4532900fb9d7cb9d8b5ce1e4f11a231e682142f9ffe1962807d0203032db40fdeb2c5256d5a237b6134f844646b325bfc12c687916327e21a65b1ae6a03c73ddb4116e07a00066b925a207dda51fbbfadce21a7459c6c2ae7f598721089020303e81fa28f73bf124de71b54c67334292e397e000428de699c89947d793cacb9da03173e567d72ac2c265860d9103e791fdfe3cad72a9a1dae15d9bec6687eb506d702030305a683365eb32bb92967becff0dba79d1c23ff75b2fc3d40f9a1573b993747b703b8b1075b12927a8f483dc7b802c96483206f98c640e49e22d4b426f9a9eb750f0203031276db0802c8235f9f248bbafaa6cbabb75baead95ede989894ea6d8585c3c8703527ea0179a8814d423775e1f381cc8eee0797216d71c79729ab186714e4daf3702030330b1e1f7a1f7dcbf5cd00932de20748e546bc1a8da9381fa3d5066f3c02b61da033f7308aca0fa70a938e45539d5dcd1864bc233ef232c6d38fa1dd331e536a400020303ad8fe61eca50a88286f382461ecaa93dc71e9aed12e91a2e9930325e5ffd1d7903fd046a02679f734a91031aacb4194ada537220167cfa68306b651433026e6478020302030203020303b7e72973952f51f913dc6818649ddb3c5619982f21e56347003ebe3b3788eadb0384757ebf158021f4bfc0d9a1bf844d13747328fd367727cb0a2d9b7c91926c400203020303593dd6ef2d4c6f8ab3253bec454072a6cf779b5acd194d43cf4d30191d4b24fe03d80a7ee4528b16cb482fd73c259b2e6e4fde5d5d31be6b97703fbbb17c3e61d20203020303992d90fe15b918f58e8dac35e96d0ebf33834ccacc8a69b6a075b263d0df655e0301b8df4b987fcf3a98000ca00d3191fd2292dc9210d7f1ab382035b2e2d02be9020302030328797f5226ad9a63c859dc61073e8ef33fe15094e61db64bcde0379f055f733403b50fe3e685c2e442a3a81715f64a840afaf1f81b49ed21b3fc2ead0620f6caae020302030203020303189a1bc58c5621e4845025a9c534fb9ad2bb2f5be276faee403d59266561d652038325fb098a4b3a402690994212511e710d20cb7966fb26b3687fea719eca217a0203020303ca11813aa459d051b0411eeddd18070506e8fe2054a2e22a763b05454e87cefd03b2cb46d28f3bcf15305b0654ca442442420ccc1b28e44e2e2c84498571b5375a02030203039385ca432e99a05cca8aa7fe5868222cdb6c928c8bbdd7eb13c22c5abe1b11cd03e8cb7cbe434eae4b8b7910183b3b006a1b3df70ae7b30248fef24d64a004c3c90203020302030203035fb731b403c8979aa552e74b3534a247c638547dc7c957467a4b08855b29b74703d49a5d90635d403354f849daf9976a4f4dfd7dab5517b254638eb893511ebcaa02030203032fddd404fe9317d561378c78f3afbe75e18c27face10d4e6ea03fc2888b22e33033c8c390d481f51cf6b43c22677a971beae0e62e8b2ecfdaaed05b48ac0f60294020302030203020302030203070054000000000000000000000000000000005ca1ab1e45e8d4a5100002010341305ecddd1b56329ac9f09a1235eec6ce6be69492a9788db13e9187dc21e9dc020303fb1c6d1aa6d3f3bef7a0bf4130218b3b168f9447e69ebcd3b68c2b2f41d9b2ef03652ba6f9b69aee3d28404079416c2f8fba4078d66b558c7a8d9615cfe7d3bd30020303d9f042d0d2f152e24d8cde02d3a7d7a1fa234efc5dc259572d412a2e607215ba03c5b76ff595e1d74a22eb44a5aed94f3225b6126c2c28ef04bb75e1d3804925ad02030314a2b125da4db5ba673cd5c0aaae8c5bf0857fd45728b868cff3f40eaf9f82790393e93c4f4b58f6f9d397d136319a29aa6b691b652651513bfc2297107379ce62020303f00359907dd68b2ae8e2d252d3313f3ba2bba16d21995333b2162b24c9bbeac4036435af585f0f75e60d362629108f6768756f7b39f1c70ab7f79e6b4e1bd9f08f020303929e2f8eb833089a3773b497247338865ef336de61e7da4a362eb3e5d5601a7203323197b010e3205d910c230463758f39cd6c01258db0a11b9b47f4c278db0494020303ab4bdc2dbea0c00b12cedf9e968135b62101bc1e20e270a1f694ae6a4686627c03140686262c769436fdaece3afe58e8a4423cbf381295a85237e52fac66c57879020303295e1973d07a067f281e3337e756bacf10dcc295f7074564874ea4401eb2a4e503cfec4348d3a697dd4f1835bc31c2615f56f92a02c1935cceec2501c12b8628f10203033892c29a2de6aee7888c2448fdbb3252d32b426bf74edf79223e4ee886fc0f6b03ef287d8ccaa574ebdac646e6d35bfb3ce52b00eda1e671d7d7bbf31bd59ff7ee020303c58f22b2dc782f914b31e3b87185b727a0bd2e2dcc41481e31ab1b26f222fdf703f0dcf8a2ce85de4d96bdc4c1a9c52a7ec54cc771750f0ed7d6c1113b93df65ce02030203039a7c26055306c8884baf96dccb2e3bb3cb30deceafdc73491bbdf0333400efc0036ee70bfe41de62ab49a9a63ca415bb881a92980f87fc044f2f5ae2e84185dfea0203020303c4332d86dee9e03fbda2dc0eb81cb20a6f6a20c7df95090f09e47d8e7efa1d7b03a698f30a106768bc9d451fd96a6808beb2b799deec6423688d02a9ba34b4af280203020302030203020303398dee7348cac5f07e4865c2049207722ec9572e2ae69b21a8cbd1c053c44a0e03612d7861c014aed261de20fd1109fc86ae090eb2c37a02b8a6072bba1c77c8b50203020302030203020302030203031f28ae8c421086878704ec730445ebf2ff23d186ffed24802f0ae24259c8d21403a8e38716cdd8a09095a7036c686009bd8236b1c7eb9507540fb981baa9a8bc4b020302030203030fe638892efa1dbdc2881def87e77dbbba95d91c8debdd9b242bbf0745455a7403e5554fbb47341d48f82f64a26d175a7d3559378657e77cf2de2eff917b95be300203020303512c2cf0ab4340a1623bdddc301aa586932f9413ea9bf8c0f1849d2d70d5d0ff0375d0cc499c7f76c70939fd8d633c658747eebf0eb138c15d902c34f0de9098030203020303b78dcbd59a3668396357cbda038d7e5bc77aac4acdb3cd3a75e96eb05079a6bf03ceb3ed2850bca5df0bd69ce2e85e9daff43cdb4c79f58685340f521521a0943f0203020302030201034b33ab5a3b8d3b01c374c1d7fcfc714398b7f0704ba2f1ee757670269fd5a7f7020302020203070354000000000000000000000000000000005ca1ab1e5820000000000000000000000000000000000000000000000000000000000000000043840c77070354000000000000000000000000000000005ca1ab1e582010c18923d58801103b7e76ccd81e81a281713d174575a74b2ef0341f6b9a42fd5820b8b76bb549992d9bfc44e3b36d087a175b2e78b9584fc752eaa3013e0bdd31e8070354000000000000000000000000000000005ca1ab1e58209bd14ac8c1cf553e0ad3a2c109b9871eb74f3c116bf0bf492ef04d2983722555582090629dad0a40430445b7d2b25a8d19c5d7a929608ed7890877a499aaca01ca5002030315edee06840e36ef17d13817ab5475d12f7bd50113984febf31e2cd80c08952c03d360a7d78676862429feb7c95d052c1e63379b8ad3becf085a21baa353ab93d30203037a2fb952c2cf8e85d9706bcbcb5a69b83b13403b58f06b0767f4204acc5917930310de142eb3b2790cf1e3694b72eecc7e8ab3860f543c15cc24274ff69570f009020303879875563fe8a079ef71e84b6840b187c681499095de9d02d8b101c9dfcd111e0395e9fc3b000e49b65678f256d247786f72c91494c960d117b7668045c35502720203033d99bf4088229f273fa4910aad8f0aae6f7de8fd1832ddd14c8fa3d083aac51603b40316099ecb013c6dcc6ac2a3e831521afa35ea0ee52485c2e8cd40bd81fd870203030b576686ff79ae37ff5ae2d4240131369472a24104dcabaaf3c348da66a638bf03dddfa8283748687718b9672b0a69a6b7758ce10eff383d83986c1a2aca2910e002030313ac1b1da5a5a4232e4e2b766aaba01f45f9444b926476f01882e30d4cc6ae1a0323f57d2012e1874436ddc007ea8bc6dcbeae6e0dac6fd044c8375d2fe593904502030381ee4d8ef714022c3c5fad435af845d213cb988ef7561ddf65929553b70dd69a03178f9fbf18b1d12feb522330c82fe96d15bc4964e1c1053093c4903149652e6b02030323cdd6298c89fc39f87595dedfd8bae3ae7a40b66f312333394482169297dc8d033517a6ff26c035b9f822da8d2abd642c858696e0d970b1026cb524cb0844195a02030391dd21f4c52970493d439537192b245ccd2e4e3e8e16d90dc74e417718b12f9103d56b5ff3ad5ab9205b2d6f9c508e744643224a7ebca8c1a4aea71f01e48b186b02030304375ae3a357e874c2a10fe3596adee75d0ccb96e63838d8db70c9e402663e9903bd8d2e9ed97a66281cbb0733a92dcc92158740088acc7a9c834d8204c0acc1da0203033c9cd711a378c8153572663cfc686ea0324eaabf0feca614928eab900755299f030bdcf7033e475ad4a147377e1bb9ed8619b0c88b728f7935ecbe7bcd2fa82c7c0203020302030313f878d66e026ade0b2e4ffec8b114291f4d832aae729e6da9fe98f316651f67031b597cff5ad0e3ec8b6baa5f15993d6e950c3cf473b0c796f339d7e9e28da24002030203020303ac0a0e8df4bc9026b7b241c34d72dce10c8424eacea17d1670103c8ded2446be03f7a62663d338b5b7e9219d01266b1772ca3720daf925bd302b7dafcf8abebcba0203020302030366c76d1cd3e3136e15f9f29f3dcc4bded2c760f251e06403ea022bf5d67ae2d503c574a334d06a611fa0340a1213e317efb125ace4eda7a487ea00075e9a6b67a902030203020303ed1e8a1502eb462bb7f836f6d72486908e1b2cce7cb00e387cc1aadc827874d103bdbfb8f970bcc72256ac4e1eca0809217c625c6412289a6dc5dff7c91454436602030203020302030203020303e8bb2dae757d043417195292bac773cda990500845f91a94d00179fe89525c3e03e385244b15001ddd43b0180922bbccf4040a86bd116ade66fc3aced03dffecff02030203037d8dcb012bdde19a0dd178c1de91d21cc866a76b9b6315554fec4bc4f5daa79203bc46b61f50585799762df053271c52844c6fe83156fde628c8bc4369c4fed18202030203020303c998c4c602a03cfa0c92a1542165567f11d23f2ae5fb91d04e02292f8e297548039447848097d9500f21ebe819789f98461e01aff7cfcd442c8afe8e07b87a95690203020303f6441de6ba2bc5cc9ead4300519a94a14a80b85f2fb0fa41e2211d7c02af0e6703703a99e4c2133e89a6e892863f14f143cf5f2ad94bd527081c8be6143f14f3db020302030203031da626649ee092857f195eb0059639309d744972389a4e748c471f16b0fc3c2e03f4072cd036d5bbb777ad24fa0b1a235806ef25737404d7ce4f83babb76bf090802030203020303c7f6a9615846a2d627db4c940098479bce3c61c8fc1170d0b7f8c9ac2bec5ea4033664667108158e9377b49cf3632952090b6eab3ba6eaed4f48ca9d5beb273fd002010203070354000000000000000000000000000000005ca1ab1e5820eff9a5f21a1dc5ce907981aedce8e1f0d94116f871970a4c9488b2a6813ffd41582021bb230cc7b5a15416d28b65e85327b729b384a46e7d1208f17d1d74e498f445020102030203070354000000000000000000000000000000005ca1ab1e5820cfe58c94626d82e54c34444223348c504ae148f1e79868731da9b44fc91ddfd4582040fc722808ecb16a4f1cb2e145abfb2b8eb9d731283dbb46fe013c0e3441dfbc070354000000000000000000000000000000005ca1ab1e58200000000000000000000000000000000000000000000000000000000000000002446746e71f070354000000000000000000000000000000005ca1ab1e5820bc32590bee71a54e9565024aca9df30307a01cf8f23561baed0ef54b30f5be68582007b7aa19b2ab0ca977cf557ea4cec4c0b84b00a9430cfe0323877011fa03269c020203e752c67cd3ffa4dacba7194a973b10998b056b5b936d17da2a8eae591a8e175e020303abdf2e1db4950317aadeff604ae51ac75e8281b1ea22e7f17349757c71dca21d03fd88dafa9a26e9c8f617b5de3e1a6021092dbff5c1fdb5d8efaeecb1f333565c020303b8a363b96519cb0eed132d807f6e42ea35f115584c443a74b14a19bbeac463d7038247bf369e033fcc19a5797960f1387f04f80cf396babac560060887288632db0203032a893ec5bee53177a40777945189164675444d0087d703c8129196df58b4ffd10384203647fe683ea28ce78395875f0bc39f1fe1ce6c9670b8393161514dab47010203039cd9d80315aa2688e25cdcc210d01a64e57404ec24bd81538fcfd3880c7a1485031ced4693c4d71b2e97ea6287a2d22ed1af991abfe52dd764bfcdb56f3084e85e0203039067a4614e2e410a883b1cf0ccfdc298c978614ca1a472330e5d63e1ea9ae095035bfc8cc6e977317e3dea3bdea3406975ae2384e72f6e5e09ebc3ff358e4d9725020303f30c3bcd31fed704d2c67d83ece97bb8fc518746b11b291f9ff5c12ea436f92703800f22b2fc6b77bdb96880866086a8f4d621ef386020c90fe2a678b1bc3a063d02030203035d2afadc42d28ae8d74c7b5f96e56bcaecc01423bc9555ef9d9686271ddd238b033852af41b0f8f922418b3f525cd77f039ce8a0e41034e8a9c51de2daf331a7cc02030203020303dedf2c8185299a3cdcf5805fd538243eafabea31d99f30e0c56119453cbe0595035fd0c51fc95c362deb97f4d34a367c56d9c3bae67f33a75541d47299bf8c85d002030203033a34a2ec21ba01bdffa3e14bdc6234b1177f58fb0f8d20ded1e0d337abc9097303f2a2ca0856cfc4409a556f408436e6112049837ca240449b521ce77ded9bbb4502030203020302030355b79241b57362ed5a29e40e42647066862077123d3363d2776ae9a5235aa625031a0d841893cc3c11eefec6fcff6687e1f2d52c667b72e9896d185cfac2d52f200203020303267a5ba100569955e1248dd2758afbe9cabcc9fb5256aeadf2d9de2bd50fa9d3031c3657155c2172893ad0ceacfd6dbaac96e7450dd3572516816664bbad57307e0203020303bfdb95766031cea080daeba2879e20c2c9212e98699aa1a9ddd0f35b3f4f14d1031cb570e01fa4fd83227e9e7423cedcb4d1f2aa49a4b379bfb5532267cb41d1ed0203020303d26a86e0cde80dcb3dddc5689ee7aff7cc4aa63c69a65db071604f2d22821f5003453e11710c67ffb8aee8ecd4e3d9e482a3c3b6473055a8fda9141761be2a2cfd0203020303eed4e48df11288a42497f24b09a60c194737347e0f0324ace547906015c46763030f3541edd630e75e0ecfad8204446c4b04e707f29a911034b0d990df202058b6020302030357f21f30a7d272dc3b763a0ba582826c2888cd791ea5cfebf8c6eeba97688cff03942b80bd4855b40d077eb25e2677767cd9e3e32548b948133c53d5cfd98fb4120201020303039a912ac6df3a5af5d7cdbebd9c86dfc4d667901d38d17f5e265b4ec92851a3039a13ede5f8fe8fc936a9c053045c21b7cfac59232ed14acebe5a1270684c7ba402030366f89b9e4af2d9333431a7252441386158c0cd4f1416c432bbfeddeaf9a94fd303ea0e7f59ba22f8e1c16d8662786956816da4d6c44b3af63dbaeff9fa26ff58a8020303087927425293ead337b03b12cc3be21e324869c328321da791feace40848626c0320fde6ec582d5275f6c1b21b4ad7130f8e54c52d05483ef9effefa3cae9eaf51020303dd266e9e532095a3ef2479e8543f52ee9386405aadc619a2e962ad2f4ca7940003015c36f881ff87d7cdce55b31157699432c553b1c2be328b4b041688853ec960020303d58b82e1f5dc744c3e99a29dae08c0cacdd92b28e0966a5fb3b143479649353e0381584029a53e6c7f0dee68619e681482b9e36e43858e57bacb3554d7af2a8ad1020303f6ca9ca2515d3662f23cde1e54e67e0817607d0b9f501818a528ca1b43ffcce603bd381317706701d336e83e27c1cd699d0b616b349b0e28de4cd010cfec1a2bad0203020303af2d5e74e0ba57395bd0c11be5508a506eee906defac2ac84fba6ce7b577205703dddb21150e7c057c4df77ad73836cefe1e746adc52dfe903bcb543bea8eba9d502030203036cb57c550ffabdb39fe5531fac6c603b79b2551bfac7e208e7a1b1628607ff9303f46bdcac887fc8561c752bc45e1c98389a6a35cc0572575245a8f2ae513bea3f02030203035dff75d9bd1da1247aa0fc644e204d8fe7a916636d465210ba9a828a93bd8fde03f50e7e2741b63ce73e98ef6a769fa9339d941cf993b7e4b26305f22e9f18bc560203020303ec8a5f20ba3d3385c3ce7cd26702f5e40a4432f72ac566a3df649c1af87741fb036a000d8ceda0fcfe3ba4e6ae633e3abbd3deee0db83107e5ce0e0469b26e7324020302030203036058e9f8cd448caadf126fd3b7d50fbbdd8e2f7a8de9160a484ad79f8829bf5a03be9a1646b44327a504c96d0b2ac009d73adb23ba21ba3df5a5dfff32b74403680203020302030203020303ebee5c234bc2f660a9b3efe1bd2fb7d340182d904429b1f2a4e89bb51b1c47c903e51438724a9cf3725c22e07d59ba15acf0bbf473b37744164f122ac475ec42d20203020303bf9c131a0283cc46ca74d21b68d0b3a62d131dc9f4787ab60772569aaba63fd703f011de292bb236c3b08513f7b82ab7d311d0f80d4d3e173c2f8445028ed1cbf8020302030203020302030203020302030392af697495712b4013883b3f5ad2d370bdb93f0ed60416692b0267f10d9a3caa0386fa8ccd91ab622b232223f9a347f1785ca9c4b7323a2e0d19a7971c3afd63ff0203020303b4f12607fb8df583b854d7b235f4a64ccb2f4bc9819dc50f3a03ed0d4906910e038f64a125d14bb92752d65593faae8e41bb5e80e4f147b20f0c247078f6e7ca77070354000000000000000000000000000000005ca1ab1e58202d11035f2912c26c30c4f8957d3910a20622ea8709c8cd3e0ad87fa0f4460bbb5820c0bf0b2ab68768eaabe5fda7814227beaeaf4c4ee7e67f5d07aefaf5f0410ab80203034d5eb602925f13a2147a2c1439d43faa74e2561bb3d27811f02042466fb2804f035d9458bc537a1957fddbf6c5f13c6bfc9349abf1251df9d6dd48b5b574f6f48f020303bbf6401ad2a6b95a3e749f5b31224fc7fcdd083e7aeac9671ec3bebda312fe5c03393a914dd0b171b4cca2f5cef52cb4ed4b564278c0fb678e5e8f3d911b4addb302030356cdb16849ae7aff540b8724f73974149f71cd3f984360537159a273a5bfcc1d03791ad7bed137c9501bcee55dc6c9b030157a30c37fca14d39c25d9d5137ae88b020303e43916580d350f4da396c5763989f003085f6c468cf815846a95571702f1f53903e88243a0e60743a8285f13df8159992bd95c7f9546a8b5ef0ea2166fa211b8f70203039691d481d60a086435d9f914e8e2b5e5a68abfafb82dcc9d6de2176920c35ded03347f67f0fbbc63fa8a3b826c6491f42b13869a2abd2b6326d75d51cb30ea9cf1020303a06d3787a49c8745205aae2c80c6aed35adaa5a8e829f8ce8c29d55ffe8cadef032b843523c93d41eee41def0561be9ad7414c5bd9591d8e3723fcb0aea6170c72020303e56edd97325fff9e9a09d439d966a37ab63cdb3a3328b157445b60c3b91a86aa0381354b5bad8afeb2c183556c5f20e5d25c565cb8a738add05fc71bfb086737a102030301fa96c592fe444b2504b86acb3efb7befb3e241223f2d697c162be93668231d037f5346f59d4e0e4737f7b5cdde5494c43dcf2b583098022afa1d40024d434625020303299100220dba6b0afe91d1fb4a5c16f6cdc90da62bd73bd75b66063366a950f90315d7adf6a555d635edb76f96c7aeed7b5e3990ab1d13e0b01acd386ddeb43e0e0203034a527f4391b236f6ed15aeb5eb8839bca31aceadf3b8b5b7f5208d22f6a01b8903ecb9612fb023bcc161bfacadd2003a53d264c5555c4d65107fa01d984fc66017" + witness2 = "01020302030203020302030203034b4c181607792b3c46ea253af79666ab9bbfa3d29e8855be6c4e045b3424f6a503fdb52981685167cdab219ae57b3c5869e539e89eb29845d6406b3229247e982e020302030203020302030203020303dc378377acad40e16af2de6482d7a60c1e5f087d067fc716c2485742ac2e29330339535728bf0c5d72ec789110ff3691dfb9cf434399ad849a86ca6725977d3e4f0203020303481a1fc812bcc98ce37225fff9f28a6d8d0ea5c63aeda93b031e8e4603cc8e7c032952530fef71561f9028c37b944df439c0d2968c4f7e247a2ad12dd4969ffc8302030203031ce6733d3a496a34cb114cad924070b0dfad8ff6891f629ed2ae31326540fe120345057d6cbecce08aeecc475c91403549f4fe82bdb953895bdeded2fae6f8688a020302030203020303c4ac3ac799860160a30a3304b765c2c90bc414edc3739a5d098bb7e18009548a039042e98ef239f418f2bf7ad10868e1fa7d0f644458488adf684313dc3f683a5202030203020303949f805ade2be05694c8011fa17fab3646a43f38f96d868386f0ba9558ba5f960302aabd9fbeceb9711f46d634513830181412c8405aea579f470a19b477d090140203020303db978a462b93b2efa3aa3da09e03370b570db692c6d361d52ae1051bdb26a3a903916d67432c505e1dc33f3617e0743d761aba44785726309191e79cb18b666e7402030203033edca13bcadc1db9305f3b15322cc6d774682fffdfe2b509f81d00b16ce2dcd003dc94780e238944094e7856154e6d3e54fec28293a9a70eaf1cc2a81e874e22170203020302010203070354000000000000000000000000000000005ca1ab1e5820e72de8a1b9696dd30f7886b15c4cc9234d52c6b41b9c33e2baaf8d88fc5b7c9f5820f8fb80310ac041e7a5e79c138d7261cda5d8a988dc9268b5a8dc5318fb610a90070354000000000000000000000000000000005ca1ab1e5820000000000000000000000000000000000000000000000000000000000000000358206e82d18bde430935057c321f6c30812e0eae2122da6af753e25974c92f0d7b50020303c6cbb686c9d7a94f49dfbee076ae1b87f1c9bb5f33b7c98a71816c33b4731a3b037514c2021b2bb805e2a6060b265dd53c069a4587e19cd7d1af99d0e9c3d0e550020303784570292bffbc3ee00153e5806a317459fed4c1d84de0515dcefc177404865003f08c92f6786e67148e8fb2bcd4eb813a665e16a270b475605d1d84b587450ff102030344c5e2a1775873020ab4e5a588e95d6702878cd46012682196dc39738fd8780703a6d5ee17fe3be7e20050e4e66c54b5188febbdd3615f832c35b073078258b214020303a42b38dcef18f890c02cdb90473211c95582727b83af287cbfc8a3f10e29649103380623684a9b3b341e01ee65908a6aac96fdf1444ca255b9dd5193537d58709b020303476c478891a8f8d905ebf9e5c031ba1020ca1436538bf9b97c6eaa1b9512da97038c77314895fccd4edafbfd73b531f4dec6f4671b6acde83926907ab376982f310203036416706411fa678c78f77dbfb609d65f63d6b04a8aae3fae4cad23419f6e738b03b6ec59ff099f23c5a528e805fbd9457736b100ea0e96390eb536046b88da3db102030334468b79fd36c8bc812c6613d176983aa4be53642e7e56421faa4ef25031fc73032869ca46586018725007aac483055d85131fcc4432c9a72175a8c6263b65c1ed020303676f9f98ef2cdc44ec8d98d0153be2aeb90b08386286887c94567950df1216440385bdebccb7559d68f55e26ba0980bcf7120609c7bb43cfc1f701e92f670ac1280203031117969a5ad58cb9a441ddd498cf3bebc13ab5aea1ceb29ddb1a226c5343c6e703425597c542fab13f686a7053f6c1e2635a729f8d9da4c01d763ffe9965ddd63402030345f2b9e446c9e743f6899409a4567a9b7f8770f711d39e39773d8173c4ea3a0c03cbc17bc3c54426fc8cf2b13b1ddb800509579856ce251beae01d924a92a8edb8020302030203030560b956a67a313d6d8939eed4cd80cc385eb49f7b6dd269ccde33a145f1216e037b3e0569695b777df45db97a41b025b57c680ad61231b61225fc7825824c4c0502030203033f1ce9dde58980c5bc34a88467f3b8cfd334dab19f28050acc53f33aab0b366f036092ba2243e1d8e20c2aa4ba0aee9ca063e8e8e6da493269065c227232020a590203020303921f9061d1b4082d20b7f9df21566609ca6dc64cd0ffac2625e9ff3090ac73570371757934d2c7d4dfe9b7b1e5c71fe18b66cf56c540c3d04310873976f79ef9f602030203020303e8e045e00ad70f879f31e498fe49aa2fd4c81849b6c55dd98391681351aac7df036e509788bd99ed4034d5fa3901bbda4cb6f94d709b2d54eca545336569791f36020302030310125b6177d5086fcdca8e0607e49e6cb21bebc95404329c9769a7d3ed59e2c4034e9acfa4214d459d1c81a304d862b2dbd4d832e71ab851656bfcc0e9c5b3e6f60203020303ad656bdacec77a2e7e591bddde7b2c7ab9b928945ee65898ff35900e25f0c21f03e47d9766945c4649bd44422f5fa779e92267d76ce44f396ef0b672215e43ce7802030203020303d3858acf0781afe0adae49a25d1724b36c9989179cc884b9a9c6481f89e57706031da6fb50879ede58d816d1624292f0c60a8133bcbc671dd92d0f9cb8d50fc803020302030317221db9e049aebabc83cefc3ebe7040ec1e82022d104d2c78f796753f76f0120352124f3ffee53f7e0f9a0068d5c0b0abfca5aaa148371c91e2e81df5fba6f8bf0203020303e00ce2232f3e208dcf050f887d02c7b91170c4b98e1d098ec5238bb3d387a41e038a0379dab30ce84865bb4f0834a9c3fd7bb2da17994abf03e89fd8d754bf7aab0203020302030333f5853903cb36caedffa12d0aa0a01c39e91309629a97dddafa8da4f738fb3e038e1dc6012aecd5998053b5878c6e3a398c8a286c7ad4dc0b55f043e4b4210950020302030317e041d91830fe8051fc73631cfd56519fc5bb88b5369298da9807b40d93733703dc7745bf8dfa058bcf1328f79efc9441cf5ad5fb5763c75bdebf9492f88a6c8302030203020303bf9a2780e63ac26ffca6df07f182db72e3e65802b277ea6d40057c383d5a37e3036c1548eb1c956ece0a876fff4463cc3f2e9c3b6eef88379f07e6d71013eb4aad020302030202020103c2eebac9e260dad1f051fa75846558dcc0ed120d1a7699fd1d6b739a3419015b020103b9d64db96b69b035a74a90069691afa2b34a705f3ad3aa82f3757c7a6f2a9f17070354000000000000000000000000000000005ca1ab1e58207af492bfc857210d76ff8398a35a942af892e866b1f4c241746b3ee89ca002595820f889596e5b3d4dbe5bcab5cde2af26d3ad8d88bc086e8b4f929885f33f6eec77020303cd3edcf79c7e0e0d6b87ae523729611faeda92e77bbb59f739e9a6127d890cdf03009a5c01c3cb27d2f1ffbd3fd77ff38f66991f64174f5df1411ea21ae2e22f250203035334694d8b56a2f5e0d0ded81283e7f36b3da6fbf2e2d1b469c008d7414296be03953388b0cacc587c5ca452ba8e96a9071958ef94439690bc14866f556a35ebc1020303ac2aae05bc7a68d238e9a9bbd2d5d07a001f8f3651bb25f5a6d6dcbb155569090335b6f55bf3d56419cbc3a45d4fa6bed330d9e0391f8806c97a7aa4149d06725b0203033dfe4a2f0555ff318ad12e49515e712f339134af0237edaef08553d9d67e260b039cd50a46feb34ab47c24391a2579e601956897ad6299bd14a4c8d9628a37d46e02030348f01fedf98979a5fb3df07daded956331fa6a02f697dfe29dd26e71111de5540387829f9a96ed82303e86550747e311d5dbfe94cc71113600595360abb512cb7b02030203020302030203020303eac48d9dbf7d162797293e0acd54382d4fd53e80e29c9c43c51dafb05c0880060306b13c75c66a6e267236b6579bcca576ff889e323ac6ffd0ee317e07623a3866020302030203020302030352e23af8570aeca858a6aa5bd20d2c63a92eb08529a9e6e5fb245aa72c5b72ce0334d7cfef6cb28d62f63cf907e3273d76a8bb858423c6ef446b056fb4f06210e002030203020302030315bf4cd3a7f33296bb4778e216bd18adacf25c97f8f4df9e1052dcba7b6edf2203b3637d0b1cf58c5272f28f8354a764d3cd48ff7c04f807237da8f4a1e2ef5db5020302030203020302030203020303c018dfe606efd5d17f3d45e91d33d3d7ef57d92a1d509291b1556bbb7e78dd0803b08ff5bb304aa8741af608415c541440edcd98bbc0fc849fe2a89c2c783341d502030203031e0eb0ac5664b1d0267d3c51dd66d1828c0d01a0903d599a317e4578926d4e3503330e2ccc546a3db52e796943aa8960d6d483a3b941ae0caa21cc7b9f7a3c2bbc070354a40d5f56745a118d0906a34e69aec8c0db1cb8fa582000000000000000000000000000000000000000000000000000000000000000015820421c2cc0dce9b0fbdb85cbe43bd6c2a1af5a6f5da756cdb8b6f2bb948e3a90da020303ac874a6acbf6de628134cd74ad9f336206e7aadb1ef09456b6267a770612485703ef323528b761720ce04927a54b81025a935f070420d655217e40eb2e084bd170020303a48199a63a429cf44fb39fdbb73098a49dc171e03d32800f483780adb9aa06580388796e8ab2076fc77f00a5096317ceff8a54da310b014a0310504bcd76f8b8da020303f8698a6f24140e0e37f49032fb2da6db2c8bcaea8961a6e1976baded0d9a8bd80371b277886f0d14b6f82cfd063ecddab10fb5da5e0666e040992469d09a6bc8b0020303dee2b54587eeb7db2fe9ef0dc262b6ae679a5bfff89c8f403d181a1d79107b1d032aff27f522ef5fd88213c3865e01c7b4c1720d56778d1bd0e48e6a86fb3b07970203037d0d29240ad72800831a91d8e54019da745c6c6a630a625167723ace857bbb810305edd49a8cfb1eea157734968e95e8b8620c474c3cfc6f3285d3dad36893114302030349b1bd34664838889a2133d716143cb8707a15745738917bfbbeecbe871e6e90035ba74ef0008ce80ac6d199cc4d217aaa9b8a5fd58f2d329aba4e061c16d99b620203030d600bfcd6581d405aaed26aa7cee976fbb2bb9c1c1390bd3eb14cf5f661022303acf3369b84f876dc556ed93718d616864020b1969d24170f4970ddbd944e1bd9020303d5bb847f016f33d8cac460756aad70173d8d6e37c37d1b69e1b1b45c52e5996103c3777105bd00820b49c89486e7589f0ccd0244ab6fd4b1409ba86dece7506f9102030356b2929dbde358b52b652bc842c7a42aea162f0d79bd7d653b5cfee34e9f0e6c03656a686adb3bff7a9d8841d3e296b0dc61c389b399677222ebbd70cf0c19e70a020303e5bf4a0779ccfa6d42a01e532bb6120b168699bfd3f4f44a62780481d5f86588036efb82ef530fb604bdff43bf1ad1a7dde41522bf8a7f5e724dd3074562b0c0ef020303036a50ac7a6e425842820d2a4e07a80f416706903e9d88b5824559515a901aa80303e3c58c1dfb4f5a5a5d1180dd010ceb33a42a0ff7cab200ced5562202261aa0020302030385d60697f5b4482fcbecfaf5f53111681c9b48ed7bbd2cdb1a257bb7f26db9d103ae21f016eadf6448b913ba498fe3d678b8bcdf9569b026053de69bd24563ef0202030203032fd50f1a5b8eddbd5ccb90e37d9c190092927af9b26a1cf8b4576d7982476fb603436882f441f09768b000722da7ec7c74b6f0252c24e16b9e6461ce4f4eeb791d02030203034eb00b994d3a8d439f47981f68baf7fb0f0e88e2167243c6b005de3c48b5c3ec03ac5626fd3f4030d088d0f41834de11510b59739353238241138d70bd8e05c22e02030203030a51165872abbe7260a6777cbbd2f6d81dfcd07c1b7c0783659bf8e8ca8e77b9032f78c81c54fd31d1a25214fa464424ae6e6399f15c1bd8987825f1f0d0dfccde020302030203020303215472473dede3eebcfdd93b1ee898e4d6cf33261a1fba12ff77dff2fb8a0f27037938ac733af661730414e88da9633c04a8914c9ae4263a4f8cea7066e6cefb840203020302030203034d6713bc006056011f31ac6f935e71e33ab8045353e9e138ec9743e8574a8d2f03fcaee2f22e1561702d029c465b755ff5491e4114264dfdf16fe9efd34864a83802030203037cc278f9b41fd17fb5eb3c839a725fcd1ef6000189fcebcb4214303f45dcd2d60386c3bc64da300f1a87efa2eb2724553e41348057fc99d5c23b5b20e216fde46d020302030376bf2ddfddca9910df80bb0785add76937d1e90e029c02b04c0cf421622a232803ba2219bc37e93a89b0effdfc3601f58645c1cb7e818f2254c8fd16fea4ba84440203020303fdf1c4799edef5fe2960f9148627fff521e591247a224eb8d05eae3f51675b560372adafa8e298a14d0da0a71e645a12a23def78db8e81f7a68ef92aac7d5700b40203020303f5a794d38718b283b47993f3bbcd67c76f84c47fcf2d35373fcb7f8a0f43a06b03a14b12d1f03790ac75797e463a8a7edcfb2bc80b65a7dc8d1b15d00cefb315d5020302010312f8462573dc83d436d2498e68019babdcc911f482e1e00c1b3fda70e1a166e40203020303adcfa2154e38e2cdbafd5d56bdaa5dca90a5bfb9c36bfbe140bb31ec0e66716503b5aaf1a6fa2e80ad8f4e49c51808d2898fd74f539ec5de974b57c27466f5e7490203070354000000000000000000000000000000005ca1ab1e58205956a0b12f607189a063054545ab26ce76ea5eb4c9bc1e8d8161646c93ac66515820da6aba51eaf87e14a7585e52e23cc0b789c61b3e808d2aef704ae932bb2ab49d070354ee5a4826068c5326a7f06fd6c7cbf816f096846c5820701c251f0448beefca8b47dce2e42f136d224b8e89e4900d24681e46a70e7448510237426c03237429400000000067445fb8020303dd24d6adc0d7b321eb19905b22f1780707b0d7e30026716c3b0d7ea311cbfeab03e498dca26358e5fd56e5464288da82073a17cbbd112e322488c12bff1661b49b020303413600069144f3379227184b3d365f22778695ad2b812ffe56bdec80df882877033b1e22049401430c9208943101b5ef3e70d99c9853e08591c4729e0f31f4bf56020303ffce2337b88b26e7b0582d1679484fa995b68c0418d72f650531db342e25f12e03493c1bb3f993e9aa63e2736b9e0826f1309ed298bd95bfc169f89b6a62cbed420203031bacbf380b1eafbec9c534577f8972d28087bc6e94bc276ec91e66a11396f07903bb137addf6042ee1a1eb0170ac09f0a092b2f7682f718d5986152d56d192b347020303b89984a9ec10a5bc5835efef55fbf26f3477d21372a55ae4abd26c55ee5e323d035ab47c29775484efde5ad8cfb1a399e9008bcb66f6cd77f28c255980633aeb5d0203037902d8528b89dce0e6a41ff89888121f42520936f3684bdc8481094f1e046b4f03cedf898a501b7bc036d92797f971bf9caa1028994a8d6b15ceb79e4ca532e7cc02030203020303a366f69c8b19d47be34a2a6333298d705692f65daf3fba95d6f48b9676b6cd3b0351f190ff80b28f339034b6be161060cbe4837cf22e0c01b3d5a77b8f349c4f1d02030203038d8eae2b45a21838dbf9f517dae99ff0bac7a25d4756a7a3315c43cfa7dbfb9803785e2e17b8cdb9628ca4c2f963eb5722918462cf75f91dd6fd00ae84d17ba2a90203020302030312a3a949b95a27ae6f73e9d879bc9c9c6eb6757f1c20ee76d1f52e1d4c9ec4eb03d38f8911a661255b0ebcabbadd44e38903841386863c97499f3e57a06bc5c3e702030203020303763e3e4c8cc4a4b30afaaae229ff20ac282d74c923a88be140293d62b2d812bb03b4b4e3386c676de1012a2bdced3714094e57803a98920b0eefe63e186abdd4d902030203032ee550fc2b119e46e3338c971e6f44ea838020e442fce0c4a34b359306a00379038c72343f5e2ac968c7f1edfd71f18128db6b52aa476fbec372eaa58a2acf45220203020303221a1371f01a251478f2a6673db891a3c412d954dc9e741ea2bfd249abf428bf0325059126652b0c2c46d78a02eba6c4df473b674ed378b17827c634bd119f5422020302030203020303313abcaaf43f5d42589a57c6fc0bec04526b43a3dc139415af1de50f8846c004037ee72e1eb97ffd7dfe0c7d40b575103edd3e62c030b86362c41630c6e97bf6bf020302030203020303508d990b34daf5a3f925c435daac3d293f6d861094cc2d343a92c62428fa66da032f8b40a9211667e9c44328d6440091ecb3a46bc15832f7d7cdfa8ec130b527fc0203020303f993f7eae6e45a6f8557c6c5d0e912cb41b71d2bf37f38affc0b2d8e054193220315eeb3ab754628ce727cd0b7028ff8ed3291de7566b99066e127185d043f595702030203032f2c132f32f21e267ab64271e8f2c0c39fedbcc509c4589616cffec21d7332eb03839857347599c19c43a0acfe53e1bb5bbe0d68ddb49cee05f1b24c5acac24a150203020303dcd0869ad1107856680f6bf164623fc709d26d1a0842bb9c60a383f255c0ec2403c92cb1692742c4e2b6a91d13c3b371a9dccd29f898d8f6457ad052b1da9efcf6020302030203031408a1feb1c4cefd2e71c1d7ce58e6b4c2d139d48c67037d40dc0d60390af539039c51675ab13cc260ab6875b12824ed60903c1755add14024e27508ac0a3b9d81020102030376fdbe16ba7e2f048d9c311cb1f99291b4f624717ddd7e9f2aa653099d19314f032ebe85ea3fef7c7033338d1ed98e187eddf75dff4772a23e19392ce61690f77f020303f901f2ba5a7a95db9ea7106268f17f341206944377d1f006921211069cf8a0a103f43daf24401f9ed2d0691570a8ccdcd016c90b722786ff590276f7cd5933ff3d02030378ab72606d2d32782ceccc9c11af9496f599dec259281c01f0c18a3b875518ed0355b4984426bd4db31ca5d70798a18280a4d319786bd897a29365d2db7489b32d02030335706adc0febe81255c960be521ae4c7a6201b2db502fb7016a5d4d9ba36c58803ef3e9f16053b7f799f207451eb3403eb95301e9c9e721dfde0c41ebd8362485c0203033b59831b753c1ca3ed58d3293aab0099027f87ff97f3f7e92d9dfb095839497a03821fc506f41f2a0bcce20367ebd6ae4b461e110e1788d190416c8345ec72c364020303cf6d91b6b57705a8f02a367997e807f49dba00a5bb3e8d0de25eacad5486b88f03abc64c2300b90b30ae3b11fb71095675d1a62860a6471a1a2defcf624b8bb4d4020303be890b95c3a4c5c381f1a00d6d98da4cd8467e002746a8c52f2564e41319d3780394b620da3f2c277f0d4a70c7a54a7245503ed2e808bf722cce0b503e242ae7d10203039f6bac7e82bf632c8b003eed17f050a49d2ea83b6a93e09295b3b3c51c55ada6038d01937127f83a85e3e655363f467385226f7e406409528791f6e2375184ef5e02030203020303e2ba22bcf2fd6923a2ffd1ae073bcffad33e81f4a7cb9cab82e130c63a213b6e031dd2e6a82a0638b027a1f15eac2bceca26ef1519de70dc99bd5275791bab4bb0020302030203031d0be4b4d178c76d39a7689aaa3a9866e63b999a2d11dbec2f04787c714dabbe03e5880788e24aeb6314512538d4cf7382b37132d4d2870122f47de8ac0d09eb020203020303b9af076d8b0e683e730de94273fbcdb5d2ac9f29273a9ffb38875892722f439903e22b2cbffaa7b1ed370a3d8b87199e1f1485703145dd3de0945cede9629702600203020303a019468f5d28919dfcc2d7bfd844492f2ab1df6400a17627b31c29ea02d583f5038dd13cd4ecf8c4151cebaf6e2637913a2310a81d4ecbd5f5fd2f4a4c315558ac0203020303167bb488d1aff473f1027bdeadb8e0e7a439f6a589c78caae1a3d045e78da60303ddda65ddb3f7e0fe430faaeb49419075391fd2559659f2ba88d3655454e079e802030203020302030203020302030203037a46bc17ebfbc47f6d99661de00074c9958e0f7fd66df7c77c236b89b165472e034b58bfe7c7506d2891367c270ca350269dfc0a08b7466ec2496c6330dd602bb302030203039b58b0df7fae59a4cef25184d849214bc145cda115b2c0dfd85fd470ecdea70f0330923d4d299efbc138d4442519d30cd33a7827557231388b81a6ea9c65eabe6f0203020303af3fee608c2e8e5a30ffc6345d86ec1b2d55f10e518b4da5e8eb59abde07b59803c2016682405d3a953eba254601d5fc0b8966a33efaa51918a4a41b8e0acbeb4602030203034b1387aa6d0ab944e2ec65ce38c8643a0ddfca5c3059718f398dee501291569603528cbab25216c4397a402fcb572f0b512a773dfeafa59e401989a4da13406bfe02030203070354000000000000000000000000000000005ca1ab1e582054b6c4d9862a1658dedebe99a0f61d94c5d1515fd031d0dfe9ebce6a1454f5c658203f14693500ccd0260659fd9eaf69570edc0504867134ac88f871d91d388b63690203070354914e7547b9051ea6226c30495190a2efa15930c95820ffffffffffffffffffffffffffffffffffffffffffffffffffffffff74873927548382be7cc5c2cd8b14f44108444ced6745c5fecb02030311ab6695ec969171698c1f56d4c05373d8505a2c7299fb05cda1d4351e22bfa403478b94ae515fbd01728835b532c7c45ccc78d200d3d004da6917337e139eb729020303ffd14369e7c7f7aec3a890a20234885f2c9fb9802ec318d8434ebcd58a696153030ddae742090ea458c3f232dc894bd8cd3378b4b4590a0523e09a44e0439fe0db020303b1688d8c7806365d931579ccac8dbf7a8d7705ac393159dfd9c0395ab7b5ca5b036a6c978a565b15267de4330de7b6166014082043c5cc80370953767ac501ccf2020303f181e47adf88e965d55e1153d76b731c261ad7d7720823919fc11d98bc144d2a03c480f344ef22a4532900fb9d7cb9d8b5ce1e4f11a231e682142f9ffe1962807d0203032db40fdeb2c5256d5a237b6134f844646b325bfc12c687916327e21a65b1ae6a03c73ddb4116e07a00066b925a207dda51fbbfadce21a7459c6c2ae7f598721089020303e81fa28f73bf124de71b54c67334292e397e000428de699c89947d793cacb9da03173e567d72ac2c265860d9103e791fdfe3cad72a9a1dae15d9bec6687eb506d702030305a683365eb32bb92967becff0dba79d1c23ff75b2fc3d40f9a1573b993747b703b8b1075b12927a8f483dc7b802c96483206f98c640e49e22d4b426f9a9eb750f0203031276db0802c8235f9f248bbafaa6cbabb75baead95ede989894ea6d8585c3c8703527ea0179a8814d423775e1f381cc8eee0797216d71c79729ab186714e4daf3702030330b1e1f7a1f7dcbf5cd00932de20748e546bc1a8da9381fa3d5066f3c02b61da033f7308aca0fa70a938e45539d5dcd1864bc233ef232c6d38fa1dd331e536a400020303ad8fe61eca50a88286f382461ecaa93dc71e9aed12e91a2e9930325e5ffd1d7903fd046a02679f734a91031aacb4194ada537220167cfa68306b651433026e6478020302030203020303b7e72973952f51f913dc6818649ddb3c5619982f21e56347003ebe3b3788eadb0384757ebf158021f4bfc0d9a1bf844d13747328fd367727cb0a2d9b7c91926c400203020303593dd6ef2d4c6f8ab3253bec454072a6cf779b5acd194d43cf4d30191d4b24fe03d80a7ee4528b16cb482fd73c259b2e6e4fde5d5d31be6b97703fbbb17c3e61d20203020303992d90fe15b918f58e8dac35e96d0ebf33834ccacc8a69b6a075b263d0df655e0301b8df4b987fcf3a98000ca00d3191fd2292dc9210d7f1ab382035b2e2d02be9020302030328797f5226ad9a63c859dc61073e8ef33fe15094e61db64bcde0379f055f733403b50fe3e685c2e442a3a81715f64a840afaf1f81b49ed21b3fc2ead0620f6caae020302030203020303189a1bc58c5621e4845025a9c534fb9ad2bb2f5be276faee403d59266561d652038325fb098a4b3a402690994212511e710d20cb7966fb26b3687fea719eca217a0203020303ca11813aa459d051b0411eeddd18070506e8fe2054a2e22a763b05454e87cefd03b2cb46d28f3bcf15305b0654ca442442420ccc1b28e44e2e2c84498571b5375a02030203039385ca432e99a05cca8aa7fe5868222cdb6c928c8bbdd7eb13c22c5abe1b11cd03e8cb7cbe434eae4b8b7910183b3b006a1b3df70ae7b30248fef24d64a004c3c90203020302030203035fb731b403c8979aa552e74b3534a247c638547dc7c957467a4b08855b29b74703d49a5d90635d403354f849daf9976a4f4dfd7dab5517b254638eb893511ebcaa02030203032fddd404fe9317d561378c78f3afbe75e18c27face10d4e6ea03fc2888b22e33033c8c390d481f51cf6b43c22677a971beae0e62e8b2ecfdaaed05b48ac0f60294020302030203020302030203070054000000000000000000000000000000005ca1ab1e45e8d4a5100002010341305ecddd1b56329ac9f09a1235eec6ce6be69492a9788db13e9187dc21e9dc020303fb1c6d1aa6d3f3bef7a0bf4130218b3b168f9447e69ebcd3b68c2b2f41d9b2ef03652ba6f9b69aee3d28404079416c2f8fba4078d66b558c7a8d9615cfe7d3bd30020303d9f042d0d2f152e24d8cde02d3a7d7a1fa234efc5dc259572d412a2e607215ba03c5b76ff595e1d74a22eb44a5aed94f3225b6126c2c28ef04bb75e1d3804925ad02030314a2b125da4db5ba673cd5c0aaae8c5bf0857fd45728b868cff3f40eaf9f82790393e93c4f4b58f6f9d397d136319a29aa6b691b652651513bfc2297107379ce62020303f00359907dd68b2ae8e2d252d3313f3ba2bba16d21995333b2162b24c9bbeac4036435af585f0f75e60d362629108f6768756f7b39f1c70ab7f79e6b4e1bd9f08f020303929e2f8eb833089a3773b497247338865ef336de61e7da4a362eb3e5d5601a7203323197b010e3205d910c230463758f39cd6c01258db0a11b9b47f4c278db0494020303ab4bdc2dbea0c00b12cedf9e968135b62101bc1e20e270a1f694ae6a4686627c03140686262c769436fdaece3afe58e8a4423cbf381295a85237e52fac66c57879020303295e1973d07a067f281e3337e756bacf10dcc295f7074564874ea4401eb2a4e503cfec4348d3a697dd4f1835bc31c2615f56f92a02c1935cceec2501c12b8628f10203033892c29a2de6aee7888c2448fdbb3252d32b426bf74edf79223e4ee886fc0f6b03ef287d8ccaa574ebdac646e6d35bfb3ce52b00eda1e671d7d7bbf31bd59ff7ee020303c58f22b2dc782f914b31e3b87185b727a0bd2e2dcc41481e31ab1b26f222fdf703f0dcf8a2ce85de4d96bdc4c1a9c52a7ec54cc771750f0ed7d6c1113b93df65ce02030203039a7c26055306c8884baf96dccb2e3bb3cb30deceafdc73491bbdf0333400efc0036ee70bfe41de62ab49a9a63ca415bb881a92980f87fc044f2f5ae2e84185dfea0203020303c4332d86dee9e03fbda2dc0eb81cb20a6f6a20c7df95090f09e47d8e7efa1d7b03a698f30a106768bc9d451fd96a6808beb2b799deec6423688d02a9ba34b4af280203020302030203020303398dee7348cac5f07e4865c2049207722ec9572e2ae69b21a8cbd1c053c44a0e03612d7861c014aed261de20fd1109fc86ae090eb2c37a02b8a6072bba1c77c8b50203020302030203020302030203031f28ae8c421086878704ec730445ebf2ff23d186ffed24802f0ae24259c8d21403a8e38716cdd8a09095a7036c686009bd8236b1c7eb9507540fb981baa9a8bc4b020302030203030fe638892efa1dbdc2881def87e77dbbba95d91c8debdd9b242bbf0745455a7403e5554fbb47341d48f82f64a26d175a7d3559378657e77cf2de2eff917b95be300203020303512c2cf0ab4340a1623bdddc301aa586932f9413ea9bf8c0f1849d2d70d5d0ff0375d0cc499c7f76c70939fd8d633c658747eebf0eb138c15d902c34f0de9098030203020303b78dcbd59a3668396357cbda038d7e5bc77aac4acdb3cd3a75e96eb05079a6bf03ceb3ed2850bca5df0bd69ce2e85e9daff43cdb4c79f58685340f521521a0943f0203020302030201034b33ab5a3b8d3b01c374c1d7fcfc714398b7f0704ba2f1ee757670269fd5a7f7020302020203070354000000000000000000000000000000005ca1ab1e5820000000000000000000000000000000000000000000000000000000000000000043840c77070354000000000000000000000000000000005ca1ab1e582010c18923d58801103b7e76ccd81e81a281713d174575a74b2ef0341f6b9a42fd5820b8b76bb549992d9bfc44e3b36d087a175b2e78b9584fc752eaa3013e0bdd31e8070354000000000000000000000000000000005ca1ab1e58209bd14ac8c1cf553e0ad3a2c109b9871eb74f3c116bf0bf492ef04d2983722555582090629dad0a40430445b7d2b25a8d19c5d7a929608ed7890877a499aaca01ca5002030315edee06840e36ef17d13817ab5475d12f7bd50113984febf31e2cd80c08952c03d360a7d78676862429feb7c95d052c1e63379b8ad3becf085a21baa353ab93d30203037a2fb952c2cf8e85d9706bcbcb5a69b83b13403b58f06b0767f4204acc5917930310de142eb3b2790cf1e3694b72eecc7e8ab3860f543c15cc24274ff69570f009020303879875563fe8a079ef71e84b6840b187c681499095de9d02d8b101c9dfcd111e0395e9fc3b000e49b65678f256d247786f72c91494c960d117b7668045c35502720203033d99bf4088229f273fa4910aad8f0aae6f7de8fd1832ddd14c8fa3d083aac51603b40316099ecb013c6dcc6ac2a3e831521afa35ea0ee52485c2e8cd40bd81fd870203030b576686ff79ae37ff5ae2d4240131369472a24104dcabaaf3c348da66a638bf03dddfa8283748687718b9672b0a69a6b7758ce10eff383d83986c1a2aca2910e002030313ac1b1da5a5a4232e4e2b766aaba01f45f9444b926476f01882e30d4cc6ae1a0323f57d2012e1874436ddc007ea8bc6dcbeae6e0dac6fd044c8375d2fe593904502030381ee4d8ef714022c3c5fad435af845d213cb988ef7561ddf65929553b70dd69a03178f9fbf18b1d12feb522330c82fe96d15bc4964e1c1053093c4903149652e6b02030323cdd6298c89fc39f87595dedfd8bae3ae7a40b66f312333394482169297dc8d033517a6ff26c035b9f822da8d2abd642c858696e0d970b1026cb524cb0844195a02030391dd21f4c52970493d439537192b245ccd2e4e3e8e16d90dc74e417718b12f9103d56b5ff3ad5ab9205b2d6f9c508e744643224a7ebca8c1a4aea71f01e48b186b02030304375ae3a357e874c2a10fe3596adee75d0ccb96e63838d8db70c9e402663e9903bd8d2e9ed97a66281cbb0733a92dcc92158740088acc7a9c834d8204c0acc1da0203033c9cd711a378c8153572663cfc686ea0324eaabf0feca614928eab900755299f030bdcf7033e475ad4a147377e1bb9ed8619b0c88b728f7935ecbe7bcd2fa82c7c02030203020302030344d578674b6be511832af115d2669571dda0918b6cc734aa4acda37260458f3303fa95439e418988542df6cc8a12cd2e59ddd44643f035364c14b146d8665ab538020302030203034f9b9d5ccea861bd0aa15a5fb7fecc1a6d49b66bc7eb1e8905701e3e5728957003a6bfd6ce49840bddcf6502294487dcf1e2b5d6b06100a0b1259dbe8c8bd8e44f0203020303dc08ac42d157ac7d835fabb64048b54993bf6636eff62863d99d2c8905f1e6050362a972a91cfac6bfbaf2c40c724f947a405ce6e647aac0a61ea8f467a49b41cc020302030203020303a4be360a2b33a98faf83a47c1611d2114b590f1a626d48f700e1b0d8361f65f6030e4a6c2e589051b01393778e2bd41764d047b0394238c514e3cff1bcd9f17fde0203020303a19150f49f5fa1e3a3e09c7b8e3a80ad9f901086b2acacc8a5712c945ab79d3903374e7d15b75adda866c38fbbe1cb5bcad247ad095de306706d40855b922df14f020302030203020302030354772bf7e2a00683456b752e674df624b9b8419fd126964d66a82f8ba678977a03dd8f48954ed2bb272c5b94a49d1ef09d545062536065580bbd306776bc135f8e02030203032108ea8ac4227399387099ff7faacb8c1e424f5543edb67d7d8ed0f04a4e0dfb0392659304959ceea896f45666a76214b0f96c0d0ac9ddb78a96f9a0271e7b579a02030203020303870c4f9820964a725c45a91364107661534dff05c30e966b1946f2157844ec0603bf64c46a8bfb74f75acb660d0a43078c21cdab2627c014fd463a56ad85cb7e6a020302030203034b81bf62e5171445bc7bb3e154c4236543feb39907364512e7f8bf3010d0bcd103c1e217970454c195c8cefeedb6eb556772703cdfcbb9473b1251407e3af45d4d0203020203a8dd420db1a92952522be68028b8762b9c2c45f11efe01d4e2b2a17a8aeca76202020203037c03317c701ee7c858e7c429134f07bc4f3bb40047681a2995924386b065a44003eeb2124d66ad9fe030707b71b337ead87239fbfec018f78a36cf83ffe6c1f3090203034479d72706bfadbfc681e4b1e0c17fd702e94ff5cce085697fa4915b9ddf8e5503978f813e60f47989d365c08ad74b7b5697ac63a4d729225fef5cbbf858dd9e360203031e3fe72c68bad17795f3ab1c89427a9db9297c750e25a03f4d5cc7f4300ccf25033477174075c81e1ea46067ae9766ac42b6e37b0122ca757914f2d38d5a5b0fd90203037c82934570e0e51dadfe294202f68ff1baa30ec7f3d972fd309af51bb73233b003c73c4ff799c5d7f7900bab9bed27acfd777778f080034d266e4b3a8cb275180e0203032ec060cb265b14a46177f0b9263af186c22d8fad7466efd3dda1a76839916f720322d842fbac43297665301e5a01f595b5961a7617045e6f90903794e64ae970f3020303bd26ad01b4a6d5fc9578bb889728e38b0cd1929f289dd0733beea3122035d8050305574e7ff67c46b4d58103152ffd94f950e5bf9a67a405de972849bfaa7a335e0203033c9f565b7511511ebda8b766512d87d572c4958008f933b4e55604a5f3c36e82036a24bb5153ae46e102a28f022b5305705a84d70a4d2d5b399a09ae90bec4c86d020303ca003945b6df159b5c900df34d54d18e81551ef946e6ec76aa5105912bd41228031937941108c7513a3bcf7e078b1b35a9816cf095dc7413079922c0eef235cd950203032c581d00b2b34c68be72f5453d8d67f30797a26d4b0df66f004fc0075cc8eb1003e71d380a7d686d28aca8fa3508c37b30fb5e30bcd348e19dfa6b547f2fda4fb602030203020303ac0a0e8df4bc9026b7b241c34d72dce10c8424eacea17d1670103c8ded2446be03f7a62663d338b5b7e9219d01266b1772ca3720daf925bd302b7dafcf8abebcba0203020302030366c76d1cd3e3136e15f9f29f3dcc4bded2c760f251e06403ea022bf5d67ae2d503c574a334d06a611fa0340a1213e317efb125ace4eda7a487ea00075e9a6b67a902030203020303ed1e8a1502eb462bb7f836f6d72486908e1b2cce7cb00e387cc1aadc827874d103bdbfb8f970bcc72256ac4e1eca0809217c625c6412289a6dc5dff7c91454436602030203020302030203020303e8bb2dae757d043417195292bac773cda990500845f91a94d00179fe89525c3e03e385244b15001ddd43b0180922bbccf4040a86bd116ade66fc3aced03dffecff02030203037d8dcb012bdde19a0dd178c1de91d21cc866a76b9b6315554fec4bc4f5daa79203bc46b61f50585799762df053271c52844c6fe83156fde628c8bc4369c4fed18202030203020303c998c4c602a03cfa0c92a1542165567f11d23f2ae5fb91d04e02292f8e297548039447848097d9500f21ebe819789f98461e01aff7cfcd442c8afe8e07b87a95690203020303f6441de6ba2bc5cc9ead4300519a94a14a80b85f2fb0fa41e2211d7c02af0e6703703a99e4c2133e89a6e892863f14f143cf5f2ad94bd527081c8be6143f14f3db020302030203031da626649ee092857f195eb0059639309d744972389a4e748c471f16b0fc3c2e03f4072cd036d5bbb777ad24fa0b1a235806ef25737404d7ce4f83babb76bf090802030203020303c7f6a9615846a2d627db4c940098479bce3c61c8fc1170d0b7f8c9ac2bec5ea4033664667108158e9377b49cf3632952090b6eab3ba6eaed4f48ca9d5beb273fd002010203070354000000000000000000000000000000005ca1ab1e5820eff9a5f21a1dc5ce907981aedce8e1f0d94116f871970a4c9488b2a6813ffd41582021bb230cc7b5a15416d28b65e85327b729b384a46e7d1208f17d1d74e498f445020102030203070354000000000000000000000000000000005ca1ab1e5820cfe58c94626d82e54c34444223348c504ae148f1e79868731da9b44fc91ddfd4582040fc722808ecb16a4f1cb2e145abfb2b8eb9d731283dbb46fe013c0e3441dfbc070354000000000000000000000000000000005ca1ab1e58200000000000000000000000000000000000000000000000000000000000000002446746e71f070354000000000000000000000000000000005ca1ab1e5820bc32590bee71a54e9565024aca9df30307a01cf8f23561baed0ef54b30f5be68582007b7aa19b2ab0ca977cf557ea4cec4c0b84b00a9430cfe0323877011fa03269c020203e752c67cd3ffa4dacba7194a973b10998b056b5b936d17da2a8eae591a8e175e020303abdf2e1db4950317aadeff604ae51ac75e8281b1ea22e7f17349757c71dca21d03fd88dafa9a26e9c8f617b5de3e1a6021092dbff5c1fdb5d8efaeecb1f333565c020303b8a363b96519cb0eed132d807f6e42ea35f115584c443a74b14a19bbeac463d7038247bf369e033fcc19a5797960f1387f04f80cf396babac560060887288632db0203032a893ec5bee53177a40777945189164675444d0087d703c8129196df58b4ffd10384203647fe683ea28ce78395875f0bc39f1fe1ce6c9670b8393161514dab47010203039cd9d80315aa2688e25cdcc210d01a64e57404ec24bd81538fcfd3880c7a1485031ced4693c4d71b2e97ea6287a2d22ed1af991abfe52dd764bfcdb56f3084e85e0203039067a4614e2e410a883b1cf0ccfdc298c978614ca1a472330e5d63e1ea9ae095035bfc8cc6e977317e3dea3bdea3406975ae2384e72f6e5e09ebc3ff358e4d9725020303f30c3bcd31fed704d2c67d83ece97bb8fc518746b11b291f9ff5c12ea436f92703800f22b2fc6b77bdb96880866086a8f4d621ef386020c90fe2a678b1bc3a063d020303fb752c12ae75e534126c45ee4aaa0e80c44afa5f5ac85f491d47c6c232479cb203f4091664c7e58a48ec6c8343fd713184f2195f17153a9b10439f3aa99461a425020303d58b82e1f5dc744c3e99a29dae08c0cacdd92b28e0966a5fb3b143479649353e0381584029a53e6c7f0dee68619e681482b9e36e43858e57bacb3554d7af2a8ad1020303f6ca9ca2515d3662f23cde1e54e67e0817607d0b9f501818a528ca1b43ffcce603bd381317706701d336e83e27c1cd699d0b616b349b0e28de4cd010cfec1a2bad0203020303af2d5e74e0ba57395bd0c11be5508a506eee906defac2ac84fba6ce7b577205703dddb21150e7c057c4df77ad73836cefe1e746adc52dfe903bcb543bea8eba9d502030203036cb57c550ffabdb39fe5531fac6c603b79b2551bfac7e208e7a1b1628607ff9303f46bdcac887fc8561c752bc45e1c98389a6a35cc0572575245a8f2ae513bea3f02030203035dff75d9bd1da1247aa0fc644e204d8fe7a916636d465210ba9a828a93bd8fde03f50e7e2741b63ce73e98ef6a769fa9339d941cf993b7e4b26305f22e9f18bc560203020303ec8a5f20ba3d3385c3ce7cd26702f5e40a4432f72ac566a3df649c1af87741fb036a000d8ceda0fcfe3ba4e6ae633e3abbd3deee0db83107e5ce0e0469b26e7324020302030203036058e9f8cd448caadf126fd3b7d50fbbdd8e2f7a8de9160a484ad79f8829bf5a03be9a1646b44327a504c96d0b2ac009d73adb23ba21ba3df5a5dfff32b74403680203020302030203020303ebee5c234bc2f660a9b3efe1bd2fb7d340182d904429b1f2a4e89bb51b1c47c903e51438724a9cf3725c22e07d59ba15acf0bbf473b37744164f122ac475ec42d20203020303bf9c131a0283cc46ca74d21b68d0b3a62d131dc9f4787ab60772569aaba63fd703f011de292bb236c3b08513f7b82ab7d311d0f80d4d3e173c2f8445028ed1cbf8020302030203020302030203020302030392af697495712b4013883b3f5ad2d370bdb93f0ed60416692b0267f10d9a3caa0386fa8ccd91ab622b232223f9a347f1785ca9c4b7323a2e0d19a7971c3afd63ff0203020303b4f12607fb8df583b854d7b235f4a64ccb2f4bc9819dc50f3a03ed0d4906910e038f64a125d14bb92752d65593faae8e41bb5e80e4f147b20f0c247078f6e7ca77070354000000000000000000000000000000005ca1ab1e58202d11035f2912c26c30c4f8957d3910a20622ea8709c8cd3e0ad87fa0f4460bbb5820c0bf0b2ab68768eaabe5fda7814227beaeaf4c4ee7e67f5d07aefaf5f0410ab80203034d5eb602925f13a2147a2c1439d43faa74e2561bb3d27811f02042466fb2804f035d9458bc537a1957fddbf6c5f13c6bfc9349abf1251df9d6dd48b5b574f6f48f020303bbf6401ad2a6b95a3e749f5b31224fc7fcdd083e7aeac9671ec3bebda312fe5c03393a914dd0b171b4cca2f5cef52cb4ed4b564278c0fb678e5e8f3d911b4addb302030356cdb16849ae7aff540b8724f73974149f71cd3f984360537159a273a5bfcc1d03791ad7bed137c9501bcee55dc6c9b030157a30c37fca14d39c25d9d5137ae88b020303e43916580d350f4da396c5763989f003085f6c468cf815846a95571702f1f53903e88243a0e60743a8285f13df8159992bd95c7f9546a8b5ef0ea2166fa211b8f70203039691d481d60a086435d9f914e8e2b5e5a68abfafb82dcc9d6de2176920c35ded03347f67f0fbbc63fa8a3b826c6491f42b13869a2abd2b6326d75d51cb30ea9cf1020303a06d3787a49c8745205aae2c80c6aed35adaa5a8e829f8ce8c29d55ffe8cadef032b843523c93d41eee41def0561be9ad7414c5bd9591d8e3723fcb0aea6170c72020303e56edd97325fff9e9a09d439d966a37ab63cdb3a3328b157445b60c3b91a86aa0381354b5bad8afeb2c183556c5f20e5d25c565cb8a738add05fc71bfb086737a102030301fa96c592fe444b2504b86acb3efb7befb3e241223f2d697c162be93668231d037f5346f59d4e0e4737f7b5cdde5494c43dcf2b583098022afa1d40024d434625020303299100220dba6b0afe91d1fb4a5c16f6cdc90da62bd73bd75b66063366a950f90315d7adf6a555d635edb76f96c7aeed7b5e3990ab1d13e0b01acd386ddeb43e0e0203034a527f4391b236f6ed15aeb5eb8839bca31aceadf3b8b5b7f5208d22f6a01b8903ecb9612fb023bcc161bfacadd2003a53d264c5555c4d65107fa01d984fc66017" + + resultWitness = "01020302030203020302030203034b4c181607792b3c46ea253af79666ab9bbfa3d29e8855be6c4e045b3424f6a503fdb52981685167cdab219ae57b3c5869e539e89eb29845d6406b3229247e982e020302030203020302030203020303dc378377acad40e16af2de6482d7a60c1e5f087d067fc716c2485742ac2e29330339535728bf0c5d72ec789110ff3691dfb9cf434399ad849a86ca6725977d3e4f0203020303481a1fc812bcc98ce37225fff9f28a6d8d0ea5c63aeda93b031e8e4603cc8e7c032952530fef71561f9028c37b944df439c0d2968c4f7e247a2ad12dd4969ffc8302030203031ce6733d3a496a34cb114cad924070b0dfad8ff6891f629ed2ae31326540fe120345057d6cbecce08aeecc475c91403549f4fe82bdb953895bdeded2fae6f8688a020302030203020303c4ac3ac799860160a30a3304b765c2c90bc414edc3739a5d098bb7e18009548a039042e98ef239f418f2bf7ad10868e1fa7d0f644458488adf684313dc3f683a5202030203020303949f805ade2be05694c8011fa17fab3646a43f38f96d868386f0ba9558ba5f960302aabd9fbeceb9711f46d634513830181412c8405aea579f470a19b477d090140203020303db978a462b93b2efa3aa3da09e03370b570db692c6d361d52ae1051bdb26a3a903916d67432c505e1dc33f3617e0743d761aba44785726309191e79cb18b666e7402030203033edca13bcadc1db9305f3b15322cc6d774682fffdfe2b509f81d00b16ce2dcd003dc94780e238944094e7856154e6d3e54fec28293a9a70eaf1cc2a81e874e22170203020302010203070354000000000000000000000000000000005ca1ab1e5820e72de8a1b9696dd30f7886b15c4cc9234d52c6b41b9c33e2baaf8d88fc5b7c9f5820f8fb80310ac041e7a5e79c138d7261cda5d8a988dc9268b5a8dc5318fb610a90070354000000000000000000000000000000005ca1ab1e5820000000000000000000000000000000000000000000000000000000000000000358206e82d18bde430935057c321f6c30812e0eae2122da6af753e25974c92f0d7b50020303c6cbb686c9d7a94f49dfbee076ae1b87f1c9bb5f33b7c98a71816c33b4731a3b037514c2021b2bb805e2a6060b265dd53c069a4587e19cd7d1af99d0e9c3d0e550020303784570292bffbc3ee00153e5806a317459fed4c1d84de0515dcefc177404865003f08c92f6786e67148e8fb2bcd4eb813a665e16a270b475605d1d84b587450ff102030344c5e2a1775873020ab4e5a588e95d6702878cd46012682196dc39738fd8780703a6d5ee17fe3be7e20050e4e66c54b5188febbdd3615f832c35b073078258b214020303a42b38dcef18f890c02cdb90473211c95582727b83af287cbfc8a3f10e29649103380623684a9b3b341e01ee65908a6aac96fdf1444ca255b9dd5193537d58709b020303476c478891a8f8d905ebf9e5c031ba1020ca1436538bf9b97c6eaa1b9512da97038c77314895fccd4edafbfd73b531f4dec6f4671b6acde83926907ab376982f310203036416706411fa678c78f77dbfb609d65f63d6b04a8aae3fae4cad23419f6e738b03b6ec59ff099f23c5a528e805fbd9457736b100ea0e96390eb536046b88da3db102030334468b79fd36c8bc812c6613d176983aa4be53642e7e56421faa4ef25031fc73032869ca46586018725007aac483055d85131fcc4432c9a72175a8c6263b65c1ed020303676f9f98ef2cdc44ec8d98d0153be2aeb90b08386286887c94567950df1216440385bdebccb7559d68f55e26ba0980bcf7120609c7bb43cfc1f701e92f670ac1280203031117969a5ad58cb9a441ddd498cf3bebc13ab5aea1ceb29ddb1a226c5343c6e703425597c542fab13f686a7053f6c1e2635a729f8d9da4c01d763ffe9965ddd63402030345f2b9e446c9e743f6899409a4567a9b7f8770f711d39e39773d8173c4ea3a0c03cbc17bc3c54426fc8cf2b13b1ddb800509579856ce251beae01d924a92a8edb8020302030203030560b956a67a313d6d8939eed4cd80cc385eb49f7b6dd269ccde33a145f1216e037b3e0569695b777df45db97a41b025b57c680ad61231b61225fc7825824c4c0502030203033f1ce9dde58980c5bc34a88467f3b8cfd334dab19f28050acc53f33aab0b366f036092ba2243e1d8e20c2aa4ba0aee9ca063e8e8e6da493269065c227232020a590203020303921f9061d1b4082d20b7f9df21566609ca6dc64cd0ffac2625e9ff3090ac73570371757934d2c7d4dfe9b7b1e5c71fe18b66cf56c540c3d04310873976f79ef9f602030203020303e8e045e00ad70f879f31e498fe49aa2fd4c81849b6c55dd98391681351aac7df036e509788bd99ed4034d5fa3901bbda4cb6f94d709b2d54eca545336569791f36020302030310125b6177d5086fcdca8e0607e49e6cb21bebc95404329c9769a7d3ed59e2c4034e9acfa4214d459d1c81a304d862b2dbd4d832e71ab851656bfcc0e9c5b3e6f60203020303ad656bdacec77a2e7e591bddde7b2c7ab9b928945ee65898ff35900e25f0c21f03e47d9766945c4649bd44422f5fa779e92267d76ce44f396ef0b672215e43ce7802030203020303d3858acf0781afe0adae49a25d1724b36c9989179cc884b9a9c6481f89e57706031da6fb50879ede58d816d1624292f0c60a8133bcbc671dd92d0f9cb8d50fc803020302030317221db9e049aebabc83cefc3ebe7040ec1e82022d104d2c78f796753f76f0120352124f3ffee53f7e0f9a0068d5c0b0abfca5aaa148371c91e2e81df5fba6f8bf0203020303e00ce2232f3e208dcf050f887d02c7b91170c4b98e1d098ec5238bb3d387a41e038a0379dab30ce84865bb4f0834a9c3fd7bb2da17994abf03e89fd8d754bf7aab0203020302030333f5853903cb36caedffa12d0aa0a01c39e91309629a97dddafa8da4f738fb3e038e1dc6012aecd5998053b5878c6e3a398c8a286c7ad4dc0b55f043e4b4210950020302030317e041d91830fe8051fc73631cfd56519fc5bb88b5369298da9807b40d93733703dc7745bf8dfa058bcf1328f79efc9441cf5ad5fb5763c75bdebf9492f88a6c8302030203020303bf9a2780e63ac26ffca6df07f182db72e3e65802b277ea6d40057c383d5a37e3036c1548eb1c956ece0a876fff4463cc3f2e9c3b6eef88379f07e6d71013eb4aad020302030202020103c2eebac9e260dad1f051fa75846558dcc0ed120d1a7699fd1d6b739a3419015b020103b9d64db96b69b035a74a90069691afa2b34a705f3ad3aa82f3757c7a6f2a9f17070354000000000000000000000000000000005ca1ab1e58207af492bfc857210d76ff8398a35a942af892e866b1f4c241746b3ee89ca002595820f889596e5b3d4dbe5bcab5cde2af26d3ad8d88bc086e8b4f929885f33f6eec77020303cd3edcf79c7e0e0d6b87ae523729611faeda92e77bbb59f739e9a6127d890cdf03009a5c01c3cb27d2f1ffbd3fd77ff38f66991f64174f5df1411ea21ae2e22f250203035334694d8b56a2f5e0d0ded81283e7f36b3da6fbf2e2d1b469c008d7414296be03953388b0cacc587c5ca452ba8e96a9071958ef94439690bc14866f556a35ebc1020303ac2aae05bc7a68d238e9a9bbd2d5d07a001f8f3651bb25f5a6d6dcbb155569090335b6f55bf3d56419cbc3a45d4fa6bed330d9e0391f8806c97a7aa4149d06725b0203033dfe4a2f0555ff318ad12e49515e712f339134af0237edaef08553d9d67e260b039cd50a46feb34ab47c24391a2579e601956897ad6299bd14a4c8d9628a37d46e02030348f01fedf98979a5fb3df07daded956331fa6a02f697dfe29dd26e71111de5540387829f9a96ed82303e86550747e311d5dbfe94cc71113600595360abb512cb7b02030203020302030203020303eac48d9dbf7d162797293e0acd54382d4fd53e80e29c9c43c51dafb05c0880060306b13c75c66a6e267236b6579bcca576ff889e323ac6ffd0ee317e07623a3866020302030203020302030352e23af8570aeca858a6aa5bd20d2c63a92eb08529a9e6e5fb245aa72c5b72ce0334d7cfef6cb28d62f63cf907e3273d76a8bb858423c6ef446b056fb4f06210e002030203020302030315bf4cd3a7f33296bb4778e216bd18adacf25c97f8f4df9e1052dcba7b6edf2203b3637d0b1cf58c5272f28f8354a764d3cd48ff7c04f807237da8f4a1e2ef5db5020302030203020302030203020303c018dfe606efd5d17f3d45e91d33d3d7ef57d92a1d509291b1556bbb7e78dd0803b08ff5bb304aa8741af608415c541440edcd98bbc0fc849fe2a89c2c783341d502030203031e0eb0ac5664b1d0267d3c51dd66d1828c0d01a0903d599a317e4578926d4e3503330e2ccc546a3db52e796943aa8960d6d483a3b941ae0caa21cc7b9f7a3c2bbc070354a40d5f56745a118d0906a34e69aec8c0db1cb8fa582000000000000000000000000000000000000000000000000000000000000000015820421c2cc0dce9b0fbdb85cbe43bd6c2a1af5a6f5da756cdb8b6f2bb948e3a90da020303ac874a6acbf6de628134cd74ad9f336206e7aadb1ef09456b6267a770612485703ef323528b761720ce04927a54b81025a935f070420d655217e40eb2e084bd170020303a48199a63a429cf44fb39fdbb73098a49dc171e03d32800f483780adb9aa06580388796e8ab2076fc77f00a5096317ceff8a54da310b014a0310504bcd76f8b8da020303f8698a6f24140e0e37f49032fb2da6db2c8bcaea8961a6e1976baded0d9a8bd80371b277886f0d14b6f82cfd063ecddab10fb5da5e0666e040992469d09a6bc8b0020303dee2b54587eeb7db2fe9ef0dc262b6ae679a5bfff89c8f403d181a1d79107b1d032aff27f522ef5fd88213c3865e01c7b4c1720d56778d1bd0e48e6a86fb3b07970203037d0d29240ad72800831a91d8e54019da745c6c6a630a625167723ace857bbb810305edd49a8cfb1eea157734968e95e8b8620c474c3cfc6f3285d3dad36893114302030349b1bd34664838889a2133d716143cb8707a15745738917bfbbeecbe871e6e90035ba74ef0008ce80ac6d199cc4d217aaa9b8a5fd58f2d329aba4e061c16d99b620203030d600bfcd6581d405aaed26aa7cee976fbb2bb9c1c1390bd3eb14cf5f661022303acf3369b84f876dc556ed93718d616864020b1969d24170f4970ddbd944e1bd9020303d5bb847f016f33d8cac460756aad70173d8d6e37c37d1b69e1b1b45c52e5996103c3777105bd00820b49c89486e7589f0ccd0244ab6fd4b1409ba86dece7506f9102030356b2929dbde358b52b652bc842c7a42aea162f0d79bd7d653b5cfee34e9f0e6c03656a686adb3bff7a9d8841d3e296b0dc61c389b399677222ebbd70cf0c19e70a020303e5bf4a0779ccfa6d42a01e532bb6120b168699bfd3f4f44a62780481d5f86588036efb82ef530fb604bdff43bf1ad1a7dde41522bf8a7f5e724dd3074562b0c0ef020303036a50ac7a6e425842820d2a4e07a80f416706903e9d88b5824559515a901aa80303e3c58c1dfb4f5a5a5d1180dd010ceb33a42a0ff7cab200ced5562202261aa0020302030385d60697f5b4482fcbecfaf5f53111681c9b48ed7bbd2cdb1a257bb7f26db9d103ae21f016eadf6448b913ba498fe3d678b8bcdf9569b026053de69bd24563ef0202030203032fd50f1a5b8eddbd5ccb90e37d9c190092927af9b26a1cf8b4576d7982476fb603436882f441f09768b000722da7ec7c74b6f0252c24e16b9e6461ce4f4eeb791d02030203034eb00b994d3a8d439f47981f68baf7fb0f0e88e2167243c6b005de3c48b5c3ec03ac5626fd3f4030d088d0f41834de11510b59739353238241138d70bd8e05c22e02030203030a51165872abbe7260a6777cbbd2f6d81dfcd07c1b7c0783659bf8e8ca8e77b9032f78c81c54fd31d1a25214fa464424ae6e6399f15c1bd8987825f1f0d0dfccde020302030203020303215472473dede3eebcfdd93b1ee898e4d6cf33261a1fba12ff77dff2fb8a0f27037938ac733af661730414e88da9633c04a8914c9ae4263a4f8cea7066e6cefb840203020302030203034d6713bc006056011f31ac6f935e71e33ab8045353e9e138ec9743e8574a8d2f03fcaee2f22e1561702d029c465b755ff5491e4114264dfdf16fe9efd34864a83802030203037cc278f9b41fd17fb5eb3c839a725fcd1ef6000189fcebcb4214303f45dcd2d60386c3bc64da300f1a87efa2eb2724553e41348057fc99d5c23b5b20e216fde46d020302030376bf2ddfddca9910df80bb0785add76937d1e90e029c02b04c0cf421622a232803ba2219bc37e93a89b0effdfc3601f58645c1cb7e818f2254c8fd16fea4ba84440203020303fdf1c4799edef5fe2960f9148627fff521e591247a224eb8d05eae3f51675b560372adafa8e298a14d0da0a71e645a12a23def78db8e81f7a68ef92aac7d5700b40203020303f5a794d38718b283b47993f3bbcd67c76f84c47fcf2d35373fcb7f8a0f43a06b03a14b12d1f03790ac75797e463a8a7edcfb2bc80b65a7dc8d1b15d00cefb315d5020302010312f8462573dc83d436d2498e68019babdcc911f482e1e00c1b3fda70e1a166e40203020303adcfa2154e38e2cdbafd5d56bdaa5dca90a5bfb9c36bfbe140bb31ec0e66716503b5aaf1a6fa2e80ad8f4e49c51808d2898fd74f539ec5de974b57c27466f5e7490203070354000000000000000000000000000000005ca1ab1e58205956a0b12f607189a063054545ab26ce76ea5eb4c9bc1e8d8161646c93ac66515820da6aba51eaf87e14a7585e52e23cc0b789c61b3e808d2aef704ae932bb2ab49d070354ee5a4826068c5326a7f06fd6c7cbf816f096846c5820701c251f0448beefca8b47dce2e42f136d224b8e89e4900d24681e46a70e7448510237426c03237429400000000067445fb8020303dd24d6adc0d7b321eb19905b22f1780707b0d7e30026716c3b0d7ea311cbfeab03e498dca26358e5fd56e5464288da82073a17cbbd112e322488c12bff1661b49b020303413600069144f3379227184b3d365f22778695ad2b812ffe56bdec80df882877033b1e22049401430c9208943101b5ef3e70d99c9853e08591c4729e0f31f4bf56020303ffce2337b88b26e7b0582d1679484fa995b68c0418d72f650531db342e25f12e03493c1bb3f993e9aa63e2736b9e0826f1309ed298bd95bfc169f89b6a62cbed420203031bacbf380b1eafbec9c534577f8972d28087bc6e94bc276ec91e66a11396f07903bb137addf6042ee1a1eb0170ac09f0a092b2f7682f718d5986152d56d192b347020303b89984a9ec10a5bc5835efef55fbf26f3477d21372a55ae4abd26c55ee5e323d035ab47c29775484efde5ad8cfb1a399e9008bcb66f6cd77f28c255980633aeb5d0203037902d8528b89dce0e6a41ff89888121f42520936f3684bdc8481094f1e046b4f03cedf898a501b7bc036d92797f971bf9caa1028994a8d6b15ceb79e4ca532e7cc02030203020303a366f69c8b19d47be34a2a6333298d705692f65daf3fba95d6f48b9676b6cd3b0351f190ff80b28f339034b6be161060cbe4837cf22e0c01b3d5a77b8f349c4f1d02030203038d8eae2b45a21838dbf9f517dae99ff0bac7a25d4756a7a3315c43cfa7dbfb9803785e2e17b8cdb9628ca4c2f963eb5722918462cf75f91dd6fd00ae84d17ba2a90203020302030312a3a949b95a27ae6f73e9d879bc9c9c6eb6757f1c20ee76d1f52e1d4c9ec4eb03d38f8911a661255b0ebcabbadd44e38903841386863c97499f3e57a06bc5c3e702030203020303763e3e4c8cc4a4b30afaaae229ff20ac282d74c923a88be140293d62b2d812bb03b4b4e3386c676de1012a2bdced3714094e57803a98920b0eefe63e186abdd4d902030203032ee550fc2b119e46e3338c971e6f44ea838020e442fce0c4a34b359306a00379038c72343f5e2ac968c7f1edfd71f18128db6b52aa476fbec372eaa58a2acf45220203020303221a1371f01a251478f2a6673db891a3c412d954dc9e741ea2bfd249abf428bf0325059126652b0c2c46d78a02eba6c4df473b674ed378b17827c634bd119f5422020302030203020303313abcaaf43f5d42589a57c6fc0bec04526b43a3dc139415af1de50f8846c004037ee72e1eb97ffd7dfe0c7d40b575103edd3e62c030b86362c41630c6e97bf6bf020302030203020303508d990b34daf5a3f925c435daac3d293f6d861094cc2d343a92c62428fa66da032f8b40a9211667e9c44328d6440091ecb3a46bc15832f7d7cdfa8ec130b527fc0203020303f993f7eae6e45a6f8557c6c5d0e912cb41b71d2bf37f38affc0b2d8e054193220315eeb3ab754628ce727cd0b7028ff8ed3291de7566b99066e127185d043f595702030203032f2c132f32f21e267ab64271e8f2c0c39fedbcc509c4589616cffec21d7332eb03839857347599c19c43a0acfe53e1bb5bbe0d68ddb49cee05f1b24c5acac24a150203020303dcd0869ad1107856680f6bf164623fc709d26d1a0842bb9c60a383f255c0ec2403c92cb1692742c4e2b6a91d13c3b371a9dccd29f898d8f6457ad052b1da9efcf6020302030203031408a1feb1c4cefd2e71c1d7ce58e6b4c2d139d48c67037d40dc0d60390af539039c51675ab13cc260ab6875b12824ed60903c1755add14024e27508ac0a3b9d81020102030376fdbe16ba7e2f048d9c311cb1f99291b4f624717ddd7e9f2aa653099d19314f032ebe85ea3fef7c7033338d1ed98e187eddf75dff4772a23e19392ce61690f77f020303f901f2ba5a7a95db9ea7106268f17f341206944377d1f006921211069cf8a0a103f43daf24401f9ed2d0691570a8ccdcd016c90b722786ff590276f7cd5933ff3d02030378ab72606d2d32782ceccc9c11af9496f599dec259281c01f0c18a3b875518ed0355b4984426bd4db31ca5d70798a18280a4d319786bd897a29365d2db7489b32d02030335706adc0febe81255c960be521ae4c7a6201b2db502fb7016a5d4d9ba36c58803ef3e9f16053b7f799f207451eb3403eb95301e9c9e721dfde0c41ebd8362485c0203033b59831b753c1ca3ed58d3293aab0099027f87ff97f3f7e92d9dfb095839497a03821fc506f41f2a0bcce20367ebd6ae4b461e110e1788d190416c8345ec72c364020303cf6d91b6b57705a8f02a367997e807f49dba00a5bb3e8d0de25eacad5486b88f03abc64c2300b90b30ae3b11fb71095675d1a62860a6471a1a2defcf624b8bb4d4020303be890b95c3a4c5c381f1a00d6d98da4cd8467e002746a8c52f2564e41319d3780394b620da3f2c277f0d4a70c7a54a7245503ed2e808bf722cce0b503e242ae7d10203039f6bac7e82bf632c8b003eed17f050a49d2ea83b6a93e09295b3b3c51c55ada6038d01937127f83a85e3e655363f467385226f7e406409528791f6e2375184ef5e02030203020303e2ba22bcf2fd6923a2ffd1ae073bcffad33e81f4a7cb9cab82e130c63a213b6e031dd2e6a82a0638b027a1f15eac2bceca26ef1519de70dc99bd5275791bab4bb0020302030203031d0be4b4d178c76d39a7689aaa3a9866e63b999a2d11dbec2f04787c714dabbe03e5880788e24aeb6314512538d4cf7382b37132d4d2870122f47de8ac0d09eb020203020303b9af076d8b0e683e730de94273fbcdb5d2ac9f29273a9ffb38875892722f439903e22b2cbffaa7b1ed370a3d8b87199e1f1485703145dd3de0945cede9629702600203020303a019468f5d28919dfcc2d7bfd844492f2ab1df6400a17627b31c29ea02d583f5038dd13cd4ecf8c4151cebaf6e2637913a2310a81d4ecbd5f5fd2f4a4c315558ac0203020303167bb488d1aff473f1027bdeadb8e0e7a439f6a589c78caae1a3d045e78da60303ddda65ddb3f7e0fe430faaeb49419075391fd2559659f2ba88d3655454e079e802030203020302030203020302030203037a46bc17ebfbc47f6d99661de00074c9958e0f7fd66df7c77c236b89b165472e034b58bfe7c7506d2891367c270ca350269dfc0a08b7466ec2496c6330dd602bb302030203039b58b0df7fae59a4cef25184d849214bc145cda115b2c0dfd85fd470ecdea70f0330923d4d299efbc138d4442519d30cd33a7827557231388b81a6ea9c65eabe6f0203020303af3fee608c2e8e5a30ffc6345d86ec1b2d55f10e518b4da5e8eb59abde07b59803c2016682405d3a953eba254601d5fc0b8966a33efaa51918a4a41b8e0acbeb4602030203034b1387aa6d0ab944e2ec65ce38c8643a0ddfca5c3059718f398dee501291569603528cbab25216c4397a402fcb572f0b512a773dfeafa59e401989a4da13406bfe02030203070354000000000000000000000000000000005ca1ab1e582054b6c4d9862a1658dedebe99a0f61d94c5d1515fd031d0dfe9ebce6a1454f5c658203f14693500ccd0260659fd9eaf69570edc0504867134ac88f871d91d388b63690203070354914e7547b9051ea6226c30495190a2efa15930c95820ffffffffffffffffffffffffffffffffffffffffffffffffffffffff74873927548382be7cc5c2cd8b14f44108444ced6745c5fecb02030311ab6695ec969171698c1f56d4c05373d8505a2c7299fb05cda1d4351e22bfa403478b94ae515fbd01728835b532c7c45ccc78d200d3d004da6917337e139eb729020303ffd14369e7c7f7aec3a890a20234885f2c9fb9802ec318d8434ebcd58a696153030ddae742090ea458c3f232dc894bd8cd3378b4b4590a0523e09a44e0439fe0db020303b1688d8c7806365d931579ccac8dbf7a8d7705ac393159dfd9c0395ab7b5ca5b036a6c978a565b15267de4330de7b6166014082043c5cc80370953767ac501ccf2020303f181e47adf88e965d55e1153d76b731c261ad7d7720823919fc11d98bc144d2a03c480f344ef22a4532900fb9d7cb9d8b5ce1e4f11a231e682142f9ffe1962807d0203032db40fdeb2c5256d5a237b6134f844646b325bfc12c687916327e21a65b1ae6a03c73ddb4116e07a00066b925a207dda51fbbfadce21a7459c6c2ae7f598721089020303e81fa28f73bf124de71b54c67334292e397e000428de699c89947d793cacb9da03173e567d72ac2c265860d9103e791fdfe3cad72a9a1dae15d9bec6687eb506d702030305a683365eb32bb92967becff0dba79d1c23ff75b2fc3d40f9a1573b993747b703b8b1075b12927a8f483dc7b802c96483206f98c640e49e22d4b426f9a9eb750f0203031276db0802c8235f9f248bbafaa6cbabb75baead95ede989894ea6d8585c3c8703527ea0179a8814d423775e1f381cc8eee0797216d71c79729ab186714e4daf3702030330b1e1f7a1f7dcbf5cd00932de20748e546bc1a8da9381fa3d5066f3c02b61da033f7308aca0fa70a938e45539d5dcd1864bc233ef232c6d38fa1dd331e536a400020303ad8fe61eca50a88286f382461ecaa93dc71e9aed12e91a2e9930325e5ffd1d7903fd046a02679f734a91031aacb4194ada537220167cfa68306b651433026e6478020302030203020303b7e72973952f51f913dc6818649ddb3c5619982f21e56347003ebe3b3788eadb0384757ebf158021f4bfc0d9a1bf844d13747328fd367727cb0a2d9b7c91926c400203020303593dd6ef2d4c6f8ab3253bec454072a6cf779b5acd194d43cf4d30191d4b24fe03d80a7ee4528b16cb482fd73c259b2e6e4fde5d5d31be6b97703fbbb17c3e61d20203020303992d90fe15b918f58e8dac35e96d0ebf33834ccacc8a69b6a075b263d0df655e0301b8df4b987fcf3a98000ca00d3191fd2292dc9210d7f1ab382035b2e2d02be9020302030328797f5226ad9a63c859dc61073e8ef33fe15094e61db64bcde0379f055f733403b50fe3e685c2e442a3a81715f64a840afaf1f81b49ed21b3fc2ead0620f6caae020302030203020303189a1bc58c5621e4845025a9c534fb9ad2bb2f5be276faee403d59266561d652038325fb098a4b3a402690994212511e710d20cb7966fb26b3687fea719eca217a0203020303ca11813aa459d051b0411eeddd18070506e8fe2054a2e22a763b05454e87cefd03b2cb46d28f3bcf15305b0654ca442442420ccc1b28e44e2e2c84498571b5375a02030203039385ca432e99a05cca8aa7fe5868222cdb6c928c8bbdd7eb13c22c5abe1b11cd03e8cb7cbe434eae4b8b7910183b3b006a1b3df70ae7b30248fef24d64a004c3c90203020302030203035fb731b403c8979aa552e74b3534a247c638547dc7c957467a4b08855b29b74703d49a5d90635d403354f849daf9976a4f4dfd7dab5517b254638eb893511ebcaa02030203032fddd404fe9317d561378c78f3afbe75e18c27face10d4e6ea03fc2888b22e33033c8c390d481f51cf6b43c22677a971beae0e62e8b2ecfdaaed05b48ac0f60294020302030203020302030203070054000000000000000000000000000000005ca1ab1e45e8d4a5100002010341305ecddd1b56329ac9f09a1235eec6ce6be69492a9788db13e9187dc21e9dc020303fb1c6d1aa6d3f3bef7a0bf4130218b3b168f9447e69ebcd3b68c2b2f41d9b2ef03652ba6f9b69aee3d28404079416c2f8fba4078d66b558c7a8d9615cfe7d3bd30020303d9f042d0d2f152e24d8cde02d3a7d7a1fa234efc5dc259572d412a2e607215ba03c5b76ff595e1d74a22eb44a5aed94f3225b6126c2c28ef04bb75e1d3804925ad02030314a2b125da4db5ba673cd5c0aaae8c5bf0857fd45728b868cff3f40eaf9f82790393e93c4f4b58f6f9d397d136319a29aa6b691b652651513bfc2297107379ce62020303f00359907dd68b2ae8e2d252d3313f3ba2bba16d21995333b2162b24c9bbeac4036435af585f0f75e60d362629108f6768756f7b39f1c70ab7f79e6b4e1bd9f08f020303929e2f8eb833089a3773b497247338865ef336de61e7da4a362eb3e5d5601a7203323197b010e3205d910c230463758f39cd6c01258db0a11b9b47f4c278db0494020303ab4bdc2dbea0c00b12cedf9e968135b62101bc1e20e270a1f694ae6a4686627c03140686262c769436fdaece3afe58e8a4423cbf381295a85237e52fac66c57879020303295e1973d07a067f281e3337e756bacf10dcc295f7074564874ea4401eb2a4e503cfec4348d3a697dd4f1835bc31c2615f56f92a02c1935cceec2501c12b8628f10203033892c29a2de6aee7888c2448fdbb3252d32b426bf74edf79223e4ee886fc0f6b03ef287d8ccaa574ebdac646e6d35bfb3ce52b00eda1e671d7d7bbf31bd59ff7ee020303c58f22b2dc782f914b31e3b87185b727a0bd2e2dcc41481e31ab1b26f222fdf703f0dcf8a2ce85de4d96bdc4c1a9c52a7ec54cc771750f0ed7d6c1113b93df65ce02030203039a7c26055306c8884baf96dccb2e3bb3cb30deceafdc73491bbdf0333400efc0036ee70bfe41de62ab49a9a63ca415bb881a92980f87fc044f2f5ae2e84185dfea0203020303c4332d86dee9e03fbda2dc0eb81cb20a6f6a20c7df95090f09e47d8e7efa1d7b03a698f30a106768bc9d451fd96a6808beb2b799deec6423688d02a9ba34b4af280203020302030203020303398dee7348cac5f07e4865c2049207722ec9572e2ae69b21a8cbd1c053c44a0e03612d7861c014aed261de20fd1109fc86ae090eb2c37a02b8a6072bba1c77c8b50203020302030203020302030203031f28ae8c421086878704ec730445ebf2ff23d186ffed24802f0ae24259c8d21403a8e38716cdd8a09095a7036c686009bd8236b1c7eb9507540fb981baa9a8bc4b020302030203030fe638892efa1dbdc2881def87e77dbbba95d91c8debdd9b242bbf0745455a7403e5554fbb47341d48f82f64a26d175a7d3559378657e77cf2de2eff917b95be300203020303512c2cf0ab4340a1623bdddc301aa586932f9413ea9bf8c0f1849d2d70d5d0ff0375d0cc499c7f76c70939fd8d633c658747eebf0eb138c15d902c34f0de9098030203020303b78dcbd59a3668396357cbda038d7e5bc77aac4acdb3cd3a75e96eb05079a6bf03ceb3ed2850bca5df0bd69ce2e85e9daff43cdb4c79f58685340f521521a0943f0203020302030201034b33ab5a3b8d3b01c374c1d7fcfc714398b7f0704ba2f1ee757670269fd5a7f7020302020203070354000000000000000000000000000000005ca1ab1e5820000000000000000000000000000000000000000000000000000000000000000043840c77070354000000000000000000000000000000005ca1ab1e582010c18923d58801103b7e76ccd81e81a281713d174575a74b2ef0341f6b9a42fd5820b8b76bb549992d9bfc44e3b36d087a175b2e78b9584fc752eaa3013e0bdd31e8070354000000000000000000000000000000005ca1ab1e58209bd14ac8c1cf553e0ad3a2c109b9871eb74f3c116bf0bf492ef04d2983722555582090629dad0a40430445b7d2b25a8d19c5d7a929608ed7890877a499aaca01ca5002030315edee06840e36ef17d13817ab5475d12f7bd50113984febf31e2cd80c08952c03d360a7d78676862429feb7c95d052c1e63379b8ad3becf085a21baa353ab93d30203037a2fb952c2cf8e85d9706bcbcb5a69b83b13403b58f06b0767f4204acc5917930310de142eb3b2790cf1e3694b72eecc7e8ab3860f543c15cc24274ff69570f009020303879875563fe8a079ef71e84b6840b187c681499095de9d02d8b101c9dfcd111e0395e9fc3b000e49b65678f256d247786f72c91494c960d117b7668045c35502720203033d99bf4088229f273fa4910aad8f0aae6f7de8fd1832ddd14c8fa3d083aac51603b40316099ecb013c6dcc6ac2a3e831521afa35ea0ee52485c2e8cd40bd81fd870203030b576686ff79ae37ff5ae2d4240131369472a24104dcabaaf3c348da66a638bf03dddfa8283748687718b9672b0a69a6b7758ce10eff383d83986c1a2aca2910e002030313ac1b1da5a5a4232e4e2b766aaba01f45f9444b926476f01882e30d4cc6ae1a0323f57d2012e1874436ddc007ea8bc6dcbeae6e0dac6fd044c8375d2fe593904502030381ee4d8ef714022c3c5fad435af845d213cb988ef7561ddf65929553b70dd69a03178f9fbf18b1d12feb522330c82fe96d15bc4964e1c1053093c4903149652e6b02030323cdd6298c89fc39f87595dedfd8bae3ae7a40b66f312333394482169297dc8d033517a6ff26c035b9f822da8d2abd642c858696e0d970b1026cb524cb0844195a02030391dd21f4c52970493d439537192b245ccd2e4e3e8e16d90dc74e417718b12f9103d56b5ff3ad5ab9205b2d6f9c508e744643224a7ebca8c1a4aea71f01e48b186b02030304375ae3a357e874c2a10fe3596adee75d0ccb96e63838d8db70c9e402663e9903bd8d2e9ed97a66281cbb0733a92dcc92158740088acc7a9c834d8204c0acc1da0203033c9cd711a378c8153572663cfc686ea0324eaabf0feca614928eab900755299f030bdcf7033e475ad4a147377e1bb9ed8619b0c88b728f7935ecbe7bcd2fa82c7c02030203020302030344d578674b6be511832af115d2669571dda0918b6cc734aa4acda37260458f3303fa95439e418988542df6cc8a12cd2e59ddd44643f035364c14b146d8665ab538020302030203034f9b9d5ccea861bd0aa15a5fb7fecc1a6d49b66bc7eb1e8905701e3e5728957003a6bfd6ce49840bddcf6502294487dcf1e2b5d6b06100a0b1259dbe8c8bd8e44f0203020303dc08ac42d157ac7d835fabb64048b54993bf6636eff62863d99d2c8905f1e6050362a972a91cfac6bfbaf2c40c724f947a405ce6e647aac0a61ea8f467a49b41cc020302030203020303a4be360a2b33a98faf83a47c1611d2114b590f1a626d48f700e1b0d8361f65f6030e4a6c2e589051b01393778e2bd41764d047b0394238c514e3cff1bcd9f17fde0203020303a19150f49f5fa1e3a3e09c7b8e3a80ad9f901086b2acacc8a5712c945ab79d3903374e7d15b75adda866c38fbbe1cb5bcad247ad095de306706d40855b922df14f020302030203020302030354772bf7e2a00683456b752e674df624b9b8419fd126964d66a82f8ba678977a03dd8f48954ed2bb272c5b94a49d1ef09d545062536065580bbd306776bc135f8e02030203032108ea8ac4227399387099ff7faacb8c1e424f5543edb67d7d8ed0f04a4e0dfb0392659304959ceea896f45666a76214b0f96c0d0ac9ddb78a96f9a0271e7b579a02030203020303870c4f9820964a725c45a91364107661534dff05c30e966b1946f2157844ec0603bf64c46a8bfb74f75acb660d0a43078c21cdab2627c014fd463a56ad85cb7e6a020302030203034b81bf62e5171445bc7bb3e154c4236543feb39907364512e7f8bf3010d0bcd103c1e217970454c195c8cefeedb6eb556772703cdfcbb9473b1251407e3af45d4d0203020203a8dd420db1a92952522be68028b8762b9c2c45f11efe01d4e2b2a17a8aeca76202020203037c03317c701ee7c858e7c429134f07bc4f3bb40047681a2995924386b065a44003eeb2124d66ad9fe030707b71b337ead87239fbfec018f78a36cf83ffe6c1f3090203034479d72706bfadbfc681e4b1e0c17fd702e94ff5cce085697fa4915b9ddf8e5503978f813e60f47989d365c08ad74b7b5697ac63a4d729225fef5cbbf858dd9e360203031e3fe72c68bad17795f3ab1c89427a9db9297c750e25a03f4d5cc7f4300ccf25033477174075c81e1ea46067ae9766ac42b6e37b0122ca757914f2d38d5a5b0fd90203037c82934570e0e51dadfe294202f68ff1baa30ec7f3d972fd309af51bb73233b003c73c4ff799c5d7f7900bab9bed27acfd777778f080034d266e4b3a8cb275180e0203032ec060cb265b14a46177f0b9263af186c22d8fad7466efd3dda1a76839916f720322d842fbac43297665301e5a01f595b5961a7617045e6f90903794e64ae970f3020303bd26ad01b4a6d5fc9578bb889728e38b0cd1929f289dd0733beea3122035d8050305574e7ff67c46b4d58103152ffd94f950e5bf9a67a405de972849bfaa7a335e0203033c9f565b7511511ebda8b766512d87d572c4958008f933b4e55604a5f3c36e82036a24bb5153ae46e102a28f022b5305705a84d70a4d2d5b399a09ae90bec4c86d020303ca003945b6df159b5c900df34d54d18e81551ef946e6ec76aa5105912bd41228031937941108c7513a3bcf7e078b1b35a9816cf095dc7413079922c0eef235cd950203032c581d00b2b34c68be72f5453d8d67f30797a26d4b0df66f004fc0075cc8eb1003e71d380a7d686d28aca8fa3508c37b30fb5e30bcd348e19dfa6b547f2fda4fb602030203020303ac0a0e8df4bc9026b7b241c34d72dce10c8424eacea17d1670103c8ded2446be03f7a62663d338b5b7e9219d01266b1772ca3720daf925bd302b7dafcf8abebcba0203020302030366c76d1cd3e3136e15f9f29f3dcc4bded2c760f251e06403ea022bf5d67ae2d503c574a334d06a611fa0340a1213e317efb125ace4eda7a487ea00075e9a6b67a902030203020303ed1e8a1502eb462bb7f836f6d72486908e1b2cce7cb00e387cc1aadc827874d103bdbfb8f970bcc72256ac4e1eca0809217c625c6412289a6dc5dff7c91454436602030203020302030203020303e8bb2dae757d043417195292bac773cda990500845f91a94d00179fe89525c3e03e385244b15001ddd43b0180922bbccf4040a86bd116ade66fc3aced03dffecff02030203037d8dcb012bdde19a0dd178c1de91d21cc866a76b9b6315554fec4bc4f5daa79203bc46b61f50585799762df053271c52844c6fe83156fde628c8bc4369c4fed18202030203020303c998c4c602a03cfa0c92a1542165567f11d23f2ae5fb91d04e02292f8e297548039447848097d9500f21ebe819789f98461e01aff7cfcd442c8afe8e07b87a95690203020303f6441de6ba2bc5cc9ead4300519a94a14a80b85f2fb0fa41e2211d7c02af0e6703703a99e4c2133e89a6e892863f14f143cf5f2ad94bd527081c8be6143f14f3db020302030203031da626649ee092857f195eb0059639309d744972389a4e748c471f16b0fc3c2e03f4072cd036d5bbb777ad24fa0b1a235806ef25737404d7ce4f83babb76bf090802030203020303c7f6a9615846a2d627db4c940098479bce3c61c8fc1170d0b7f8c9ac2bec5ea4033664667108158e9377b49cf3632952090b6eab3ba6eaed4f48ca9d5beb273fd002010203070354000000000000000000000000000000005ca1ab1e5820eff9a5f21a1dc5ce907981aedce8e1f0d94116f871970a4c9488b2a6813ffd41582021bb230cc7b5a15416d28b65e85327b729b384a46e7d1208f17d1d74e498f445020102030203070354000000000000000000000000000000005ca1ab1e5820cfe58c94626d82e54c34444223348c504ae148f1e79868731da9b44fc91ddfd4582040fc722808ecb16a4f1cb2e145abfb2b8eb9d731283dbb46fe013c0e3441dfbc070354000000000000000000000000000000005ca1ab1e58200000000000000000000000000000000000000000000000000000000000000002446746e71f070354000000000000000000000000000000005ca1ab1e5820bc32590bee71a54e9565024aca9df30307a01cf8f23561baed0ef54b30f5be68582007b7aa19b2ab0ca977cf557ea4cec4c0b84b00a9430cfe0323877011fa03269c020203e752c67cd3ffa4dacba7194a973b10998b056b5b936d17da2a8eae591a8e175e020303abdf2e1db4950317aadeff604ae51ac75e8281b1ea22e7f17349757c71dca21d03fd88dafa9a26e9c8f617b5de3e1a6021092dbff5c1fdb5d8efaeecb1f333565c020303b8a363b96519cb0eed132d807f6e42ea35f115584c443a74b14a19bbeac463d7038247bf369e033fcc19a5797960f1387f04f80cf396babac560060887288632db0203032a893ec5bee53177a40777945189164675444d0087d703c8129196df58b4ffd10384203647fe683ea28ce78395875f0bc39f1fe1ce6c9670b8393161514dab47010203039cd9d80315aa2688e25cdcc210d01a64e57404ec24bd81538fcfd3880c7a1485031ced4693c4d71b2e97ea6287a2d22ed1af991abfe52dd764bfcdb56f3084e85e0203039067a4614e2e410a883b1cf0ccfdc298c978614ca1a472330e5d63e1ea9ae095035bfc8cc6e977317e3dea3bdea3406975ae2384e72f6e5e09ebc3ff358e4d9725020303f30c3bcd31fed704d2c67d83ece97bb8fc518746b11b291f9ff5c12ea436f92703800f22b2fc6b77bdb96880866086a8f4d621ef386020c90fe2a678b1bc3a063d02030203035d2afadc42d28ae8d74c7b5f96e56bcaecc01423bc9555ef9d9686271ddd238b033852af41b0f8f922418b3f525cd77f039ce8a0e41034e8a9c51de2daf331a7cc02030203020303dedf2c8185299a3cdcf5805fd538243eafabea31d99f30e0c56119453cbe0595035fd0c51fc95c362deb97f4d34a367c56d9c3bae67f33a75541d47299bf8c85d002030203033a34a2ec21ba01bdffa3e14bdc6234b1177f58fb0f8d20ded1e0d337abc9097303f2a2ca0856cfc4409a556f408436e6112049837ca240449b521ce77ded9bbb4502030203020302030355b79241b57362ed5a29e40e42647066862077123d3363d2776ae9a5235aa625031a0d841893cc3c11eefec6fcff6687e1f2d52c667b72e9896d185cfac2d52f200203020303267a5ba100569955e1248dd2758afbe9cabcc9fb5256aeadf2d9de2bd50fa9d3031c3657155c2172893ad0ceacfd6dbaac96e7450dd3572516816664bbad57307e0203020303bfdb95766031cea080daeba2879e20c2c9212e98699aa1a9ddd0f35b3f4f14d1031cb570e01fa4fd83227e9e7423cedcb4d1f2aa49a4b379bfb5532267cb41d1ed0203020303d26a86e0cde80dcb3dddc5689ee7aff7cc4aa63c69a65db071604f2d22821f5003453e11710c67ffb8aee8ecd4e3d9e482a3c3b6473055a8fda9141761be2a2cfd0203020303eed4e48df11288a42497f24b09a60c194737347e0f0324ace547906015c46763030f3541edd630e75e0ecfad8204446c4b04e707f29a911034b0d990df202058b6020302030357f21f30a7d272dc3b763a0ba582826c2888cd791ea5cfebf8c6eeba97688cff03942b80bd4855b40d077eb25e2677767cd9e3e32548b948133c53d5cfd98fb4120201020303039a912ac6df3a5af5d7cdbebd9c86dfc4d667901d38d17f5e265b4ec92851a3039a13ede5f8fe8fc936a9c053045c21b7cfac59232ed14acebe5a1270684c7ba402030366f89b9e4af2d9333431a7252441386158c0cd4f1416c432bbfeddeaf9a94fd303ea0e7f59ba22f8e1c16d8662786956816da4d6c44b3af63dbaeff9fa26ff58a8020303087927425293ead337b03b12cc3be21e324869c328321da791feace40848626c0320fde6ec582d5275f6c1b21b4ad7130f8e54c52d05483ef9effefa3cae9eaf51020303dd266e9e532095a3ef2479e8543f52ee9386405aadc619a2e962ad2f4ca7940003015c36f881ff87d7cdce55b31157699432c553b1c2be328b4b041688853ec960020303d58b82e1f5dc744c3e99a29dae08c0cacdd92b28e0966a5fb3b143479649353e0381584029a53e6c7f0dee68619e681482b9e36e43858e57bacb3554d7af2a8ad1020303f6ca9ca2515d3662f23cde1e54e67e0817607d0b9f501818a528ca1b43ffcce603bd381317706701d336e83e27c1cd699d0b616b349b0e28de4cd010cfec1a2bad0203020303af2d5e74e0ba57395bd0c11be5508a506eee906defac2ac84fba6ce7b577205703dddb21150e7c057c4df77ad73836cefe1e746adc52dfe903bcb543bea8eba9d502030203036cb57c550ffabdb39fe5531fac6c603b79b2551bfac7e208e7a1b1628607ff9303f46bdcac887fc8561c752bc45e1c98389a6a35cc0572575245a8f2ae513bea3f02030203035dff75d9bd1da1247aa0fc644e204d8fe7a916636d465210ba9a828a93bd8fde03f50e7e2741b63ce73e98ef6a769fa9339d941cf993b7e4b26305f22e9f18bc560203020303ec8a5f20ba3d3385c3ce7cd26702f5e40a4432f72ac566a3df649c1af87741fb036a000d8ceda0fcfe3ba4e6ae633e3abbd3deee0db83107e5ce0e0469b26e7324020302030203036058e9f8cd448caadf126fd3b7d50fbbdd8e2f7a8de9160a484ad79f8829bf5a03be9a1646b44327a504c96d0b2ac009d73adb23ba21ba3df5a5dfff32b74403680203020302030203020303ebee5c234bc2f660a9b3efe1bd2fb7d340182d904429b1f2a4e89bb51b1c47c903e51438724a9cf3725c22e07d59ba15acf0bbf473b37744164f122ac475ec42d20203020303bf9c131a0283cc46ca74d21b68d0b3a62d131dc9f4787ab60772569aaba63fd703f011de292bb236c3b08513f7b82ab7d311d0f80d4d3e173c2f8445028ed1cbf8020302030203020302030203020302030392af697495712b4013883b3f5ad2d370bdb93f0ed60416692b0267f10d9a3caa0386fa8ccd91ab622b232223f9a347f1785ca9c4b7323a2e0d19a7971c3afd63ff0203020303b4f12607fb8df583b854d7b235f4a64ccb2f4bc9819dc50f3a03ed0d4906910e038f64a125d14bb92752d65593faae8e41bb5e80e4f147b20f0c247078f6e7ca77070354000000000000000000000000000000005ca1ab1e58202d11035f2912c26c30c4f8957d3910a20622ea8709c8cd3e0ad87fa0f4460bbb5820c0bf0b2ab68768eaabe5fda7814227beaeaf4c4ee7e67f5d07aefaf5f0410ab80203034d5eb602925f13a2147a2c1439d43faa74e2561bb3d27811f02042466fb2804f035d9458bc537a1957fddbf6c5f13c6bfc9349abf1251df9d6dd48b5b574f6f48f020303bbf6401ad2a6b95a3e749f5b31224fc7fcdd083e7aeac9671ec3bebda312fe5c03393a914dd0b171b4cca2f5cef52cb4ed4b564278c0fb678e5e8f3d911b4addb302030356cdb16849ae7aff540b8724f73974149f71cd3f984360537159a273a5bfcc1d03791ad7bed137c9501bcee55dc6c9b030157a30c37fca14d39c25d9d5137ae88b020303e43916580d350f4da396c5763989f003085f6c468cf815846a95571702f1f53903e88243a0e60743a8285f13df8159992bd95c7f9546a8b5ef0ea2166fa211b8f70203039691d481d60a086435d9f914e8e2b5e5a68abfafb82dcc9d6de2176920c35ded03347f67f0fbbc63fa8a3b826c6491f42b13869a2abd2b6326d75d51cb30ea9cf1020303a06d3787a49c8745205aae2c80c6aed35adaa5a8e829f8ce8c29d55ffe8cadef032b843523c93d41eee41def0561be9ad7414c5bd9591d8e3723fcb0aea6170c72020303e56edd97325fff9e9a09d439d966a37ab63cdb3a3328b157445b60c3b91a86aa0381354b5bad8afeb2c183556c5f20e5d25c565cb8a738add05fc71bfb086737a102030301fa96c592fe444b2504b86acb3efb7befb3e241223f2d697c162be93668231d037f5346f59d4e0e4737f7b5cdde5494c43dcf2b583098022afa1d40024d434625020303299100220dba6b0afe91d1fb4a5c16f6cdc90da62bd73bd75b66063366a950f90315d7adf6a555d635edb76f96c7aeed7b5e3990ab1d13e0b01acd386ddeb43e0e0203034a527f4391b236f6ed15aeb5eb8839bca31aceadf3b8b5b7f5208d22f6a01b8903ecb9612fb023bcc161bfacadd2003a53d264c5555c4d65107fa01d984fc66017" +) diff --git a/zk/witness/witness_utils.go b/zk/witness/witness_utils.go new file mode 100644 index 00000000000..ce63342148e --- /dev/null +++ b/zk/witness/witness_utils.go @@ -0,0 +1,199 @@ +package witness + +import ( + "bytes" + "context" + "errors" + "fmt" + "math" + + "github.com/holiman/uint256" + coreState "github.com/ledgerwatch/erigon/core/state" + db2 "github.com/ledgerwatch/erigon/smt/pkg/db" + "github.com/ledgerwatch/erigon/smt/pkg/smt" + "github.com/ledgerwatch/erigon/turbo/trie" + + "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon-lib/common/datadir" + "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon-lib/state" + corestate "github.com/ledgerwatch/erigon/core/state" + + "github.com/ledgerwatch/erigon/core/rawdb" + eritypes "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/eth/stagedsync" + dstypes "github.com/ledgerwatch/erigon/zk/datastream/types" + zkSmt "github.com/ledgerwatch/erigon/zk/smt" + zkUtils "github.com/ledgerwatch/erigon/zk/utils" + "github.com/ledgerwatch/log/v3" +) + +var ( + ErrNoWitnesses = errors.New("witness count is 0") +) + +func UnwindForWitness(ctx context.Context, tx kv.RwTx, startBlock, latestBlock uint64, dirs datadir.Dirs, historyV3 bool, agg *state.Aggregator) (err error) { + unwindState := &stagedsync.UnwindState{UnwindPoint: startBlock - 1} + stageState := &stagedsync.StageState{BlockNumber: latestBlock} + + hashStageCfg := stagedsync.StageHashStateCfg(nil, dirs, historyV3, agg) + if err := stagedsync.UnwindHashStateStage(unwindState, stageState, tx, hashStageCfg, ctx, log.New(), true); err != nil { + return fmt.Errorf("UnwindHashStateStage: %w", err) + } + + var expectedRootHash common.Hash + syncHeadHeader, err := rawdb.ReadHeaderByNumber_zkevm(tx, unwindState.UnwindPoint) + if err != nil { + return fmt.Errorf("ReadHeaderByNumber_zkevm for block %d: %v", unwindState.UnwindPoint, err) + } + + if syncHeadHeader == nil { + log.Warn("header not found for block number", "block", unwindState.UnwindPoint) + } else { + expectedRootHash = syncHeadHeader.Root + } + + if _, err := zkSmt.UnwindZkSMT(ctx, "api.generateWitness", stageState.BlockNumber, unwindState.UnwindPoint, tx, true, &expectedRootHash, true); err != nil { + return fmt.Errorf("UnwindZkSMT: %w", err) + } + + return nil +} + +type gerForWitnessDb interface { + GetBatchNoByL2Block(blockNum uint64) (uint64, error) + GetBatchGlobalExitRoots(lastBatch, currentBatch uint64) (*[]dstypes.GerUpdate, error) + GetBlockGlobalExitRoot(blockNum uint64) (common.Hash, error) +} + +func PrepareGersForWitness(block *eritypes.Block, db gerForWitnessDb, tds *coreState.TrieDbState, trieStateWriter *coreState.TrieStateWriter) error { + blockNum := block.NumberU64() + //[zkevm] get batches between last block and this one + // plus this blocks ger + lastBatchInserted, err := db.GetBatchNoByL2Block(blockNum - 1) + if err != nil { + return fmt.Errorf("GetBatchNoByL2Block for block %d: %w", blockNum-1, err) + } + + currentBatch, err := db.GetBatchNoByL2Block(blockNum) + if err != nil { + return fmt.Errorf("GetBatchNoByL2Block for block %d: %v", blockNum, err) + } + + gersInBetween, err := db.GetBatchGlobalExitRoots(lastBatchInserted, currentBatch) + if err != nil { + return fmt.Errorf("GetBatchGlobalExitRoots for block %d: %v", blockNum, err) + } + + var globalExitRoots []dstypes.GerUpdate + + if gersInBetween != nil { + globalExitRoots = append(globalExitRoots, *gersInBetween...) + } + + blockGer, err := db.GetBlockGlobalExitRoot(blockNum) + if err != nil { + return fmt.Errorf("GetBlockGlobalExitRoot for block %d: %v", blockNum, err) + } + emptyHash := common.Hash{} + + if blockGer != emptyHash { + blockGerUpdate := dstypes.GerUpdate{ + GlobalExitRoot: blockGer, + Timestamp: block.Header().Time, + } + globalExitRoots = append(globalExitRoots, blockGerUpdate) + } + + for _, ger := range globalExitRoots { + // [zkevm] - add GER if there is one for this batch + if err := zkUtils.WriteGlobalExitRoot(tds, trieStateWriter, ger.GlobalExitRoot, ger.Timestamp); err != nil { + return fmt.Errorf("WriteGlobalExitRoot: %w", err) + } + } + + return nil +} + +type trieDbState interface { + ResolveSMTRetainList(inclusion map[common.Address][]common.Hash) (*trie.RetainList, error) +} + +func BuildWitnessFromTrieDbState(ctx context.Context, tx kv.Tx, tds trieDbState, reader *corestate.PlainState, forcedContracts []common.Address, witnessFull bool) (witness *trie.Witness, err error) { + var rl trie.RetainDecider + // if full is true, we will send all the nodes to the witness + rl = &trie.AlwaysTrueRetainDecider{} + + if !witnessFull { + inclusion := make(map[common.Address][]common.Hash) + for _, contract := range forcedContracts { + err = reader.ForEachStorage(contract, common.Hash{}, func(key, secKey common.Hash, value uint256.Int) bool { + inclusion[contract] = append(inclusion[contract], key) + return false + }, math.MaxInt64) + if err != nil { + return nil, err + } + } + + rl, err = tds.ResolveSMTRetainList(inclusion) + if err != nil { + return nil, err + } + } + + eridb := db2.NewRoEriDb(tx) + smtTrie := smt.NewRoSMT(eridb) + + if witness, err = smtTrie.BuildWitness(rl, ctx); err != nil { + return nil, fmt.Errorf("BuildWitness: %w", err) + } + + return +} + +func GetWitnessBytes(witness *trie.Witness, debug bool) ([]byte, error) { + var buf bytes.Buffer + if _, err := witness.WriteInto(&buf, debug); err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +func ParseWitnessFromBytes(input []byte, trace bool) (*trie.Witness, error) { + return trie.NewWitnessFromReader(bytes.NewReader(input), trace) +} + +// merges witnesses into one +// corresponds to a witness built on a range of blocks +// input witnesses should be ordered by consequent blocks +// it replaces values from 2,3,4 into the first witness +func MergeWitnesses(ctx context.Context, witnesses []*trie.Witness) (*trie.Witness, error) { + if len(witnesses) == 0 { + return nil, ErrNoWitnesses + } + + if len(witnesses) == 1 { + return witnesses[0], nil + } + + baseSmt, err := smt.BuildSMTFromWitness(witnesses[0]) + if err != nil { + return nil, fmt.Errorf("BuildSMTfromWitness: %w", err) + } + for i := 1; i < len(witnesses); i++ { + if err := smt.AddWitnessToSMT(baseSmt, witnesses[i]); err != nil { + return nil, fmt.Errorf("AddWitnessToSMT: %w", err) + } + } + + // if full is true, we will send all the nodes to the witness + rl := &trie.AlwaysTrueRetainDecider{} + + witness, err := baseSmt.BuildWitness(rl, ctx) + if err != nil { + return nil, fmt.Errorf("BuildWitness: %w", err) + } + + return witness, nil +} diff --git a/zk/witness/witness_utils_test.go b/zk/witness/witness_utils_test.go new file mode 100644 index 00000000000..5e14e1abbe7 --- /dev/null +++ b/zk/witness/witness_utils_test.go @@ -0,0 +1,203 @@ +package witness + +import ( + "bytes" + "context" + "encoding/binary" + "encoding/hex" + "fmt" + "math/big" + "math/rand" + "testing" + + "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon/crypto" + "github.com/ledgerwatch/erigon/smt/pkg/smt" + "github.com/ledgerwatch/erigon/turbo/trie" + "github.com/status-im/keycard-go/hexutils" + "github.com/stretchr/testify/assert" +) + +func TestMergeWitnesses(t *testing.T) { + smt1 := smt.NewSMT(nil, false) + smt2 := smt.NewSMT(nil, false) + smtFull := smt.NewSMT(nil, false) + + random := rand.New(rand.NewSource(0)) + + numberOfAccounts := 500 + + for i := 0; i < numberOfAccounts; i++ { + a := getAddressForIndex(i) + addressBytes := crypto.Keccak256(a[:]) + address := common.BytesToAddress(addressBytes).String() + balance := new(big.Int).Rand(random, new(big.Int).Exp(common.Big2, common.Big256, nil)) + nonce := new(big.Int).Rand(random, new(big.Int).Exp(common.Big2, common.Big256, nil)) + bytecode := "afafaf" + contractStorage := make(map[string]string) + for j := 0; j < 10; j++ { + storageKey := genRandomByteArrayOfLen(32) + storageValue := genRandomByteArrayOfLen(32) + contractStorage[common.BytesToHash(storageKey).Hex()] = common.BytesToHash(storageValue).Hex() + } + var smtPart *smt.SMT + + if i&1 == 0 { + smtPart = smt1 + } else { + smtPart = smt2 + } + + if _, err := smtPart.SetAccountBalance(address, balance); err != nil { + t.Error(err) + return + } + if _, err := smtPart.SetAccountNonce(address, nonce); err != nil { + t.Error(err) + return + } + if err := smtPart.SetContractBytecode(address, bytecode); err != nil { + t.Error(err) + return + } + if err := smtPart.Db.AddCode(hexutils.HexToBytes(bytecode)); err != nil { + t.Error(err) + return + } + if _, err := smtPart.SetContractStorage(address, contractStorage, nil); err != nil { + t.Error(err) + return + } + + if _, err := smtFull.SetAccountBalance(address, balance); err != nil { + t.Error(err) + return + } + if _, err := smtFull.SetAccountNonce(address, nonce); err != nil { + t.Error(err) + return + } + if err := smtFull.SetContractBytecode(address, bytecode); err != nil { + t.Error(err) + return + } + if err := smtFull.Db.AddCode(hexutils.HexToBytes(bytecode)); err != nil { + t.Error(err) + return + } + if _, err := smtFull.SetContractStorage(address, contractStorage, nil); err != nil { + t.Error(err) + return + } + } + + rl1 := &trie.AlwaysTrueRetainDecider{} + rl2 := &trie.AlwaysTrueRetainDecider{} + rlFull := &trie.AlwaysTrueRetainDecider{} + witness1, err := smt1.BuildWitness(rl1, context.Background()) + if err != nil { + t.Error(err) + return + } + + witness2, err := smt2.BuildWitness(rl2, context.Background()) + if err != nil { + t.Error(err) + return + } + + witnessFull, err := smtFull.BuildWitness(rlFull, context.Background()) + if err != nil { + t.Error(err) + return + } + mergedWitness, err := MergeWitnesses(context.Background(), []*trie.Witness{witness1, witness2}) + assert.Nil(t, err, "should successfully merge witnesses") + + //create writer + var buff bytes.Buffer + mergedWitness.WriteDiff(witnessFull, &buff) + diff := buff.String() + assert.Equal(t, 0, len(diff), "witnesses should be equal") + if len(diff) > 0 { + fmt.Println(diff) + } +} + +func getAddressForIndex(index int) [20]byte { + var address [20]byte + binary.BigEndian.PutUint32(address[:], uint32(index)) + return address +} + +func genRandomByteArrayOfLen(length uint) []byte { + array := make([]byte, length) + for i := uint(0); i < length; i++ { + array[i] = byte(rand.Intn(256)) + } + return array +} + +func TestMergeRealWitnesses(t *testing.T) { + witnessBytes1, err := hex.DecodeString(witness1) + assert.NoError(t, err, "error decoding witness1") + witnessBytes2, err := hex.DecodeString(witness2) + assert.NoError(t, err, "error decoding witness2") + expectedWitnessBytes, err := hex.DecodeString(resultWitness) + assert.NoError(t, err, "error decoding expectedWitness") + + blockWitness1, err := ParseWitnessFromBytes(witnessBytes1, false) + assert.NoError(t, err, "error parsing witness1") + blockWitness2, err := ParseWitnessFromBytes(witnessBytes2, false) + assert.NoError(t, err, "error parsing witness2") + expectedWitness, err := ParseWitnessFromBytes(expectedWitnessBytes, false) + assert.NoError(t, err, "error parsing expectedWitness") + + mergedWitness, err := MergeWitnesses(context.Background(), []*trie.Witness{blockWitness1, blockWitness2}) + assert.NoError(t, err, "error merging witnesses") + + //create writer + var buff bytes.Buffer + expectedWitness.WriteDiff(mergedWitness, &buff) + diff := buff.String() + if len(diff) > 0 { + fmt.Println(diff) + } + assert.Equal(t, 0, len(diff), "witnesses should be equal") +} + +func TestMergeWitnessesWithHashNodes(t *testing.T) { + smt1 := smt.NewSMT(nil, false) + smt2 := smt.NewSMT(nil, false) + smtFull := smt.NewSMT(nil, false) + + _, err := smt1.InsertHashNode([]int{0, 0, 0}, new(big.Int).SetUint64(1)) + assert.NoError(t, err, "error inserting hash node") + _, err = smt2.InsertHashNode([]int{0, 0}, new(big.Int).SetUint64(2)) + assert.NoError(t, err, "error inserting hash node") + _, err = smtFull.InsertHashNode([]int{0, 0, 0}, new(big.Int).SetUint64(1)) + assert.NoError(t, err, "error inserting hash node") + + // get witnesses + rl1 := &trie.AlwaysTrueRetainDecider{} + rl2 := &trie.AlwaysTrueRetainDecider{} + rlFull := &trie.AlwaysTrueRetainDecider{} + blockWitness1, err := smt1.BuildWitness(rl1, context.Background()) + assert.NoError(t, err, "error building witness") + blockWitness2, err := smt2.BuildWitness(rl2, context.Background()) + assert.NoError(t, err, "error building witness") + expectedWitness, err := smtFull.BuildWitness(rlFull, context.Background()) + assert.NoError(t, err, "error building witness") + + mergedWitness, err := MergeWitnesses(context.Background(), []*trie.Witness{blockWitness1, blockWitness2}) + assert.NoError(t, err, "error merging witnesses") + + //create writer + var buff bytes.Buffer + expectedWitness.WriteDiff(mergedWitness, &buff) + diff := buff.String() + if len(diff) > 0 { + fmt.Println(diff) + } + assert.Equal(t, 0, len(diff), "witnesses should be equal") +}