diff --git a/.github/workflows/ci_benchmarks_macos.yaml b/.github/workflows/ci_benchmarks_macos.yaml index 6e7103a751..24fc63eb14 100644 --- a/.github/workflows/ci_benchmarks_macos.yaml +++ b/.github/workflows/ci_benchmarks_macos.yaml @@ -48,7 +48,7 @@ jobs: ci_benchmarks_macos: name: ci_benchmarks_macos needs: prologue - runs-on: macos-11 + runs-on: macos-12 steps: - uses: actions/checkout@v3 - run: | diff --git a/.github/workflows/ci_integration_tests_macos.yaml b/.github/workflows/ci_integration_tests_macos.yaml index a8d4ba99db..ae0fc371ef 100644 --- a/.github/workflows/ci_integration_tests_macos.yaml +++ b/.github/workflows/ci_integration_tests_macos.yaml @@ -51,7 +51,7 @@ jobs: name: ci_integration_tests_macos needs: prologue timeout-minutes: 140 - runs-on: macos-11 + runs-on: macos-12 steps: - uses: actions/checkout@v3 - run: | diff --git a/.github/workflows/ci_linters_macos.yaml b/.github/workflows/ci_linters_macos.yaml index 722972b858..5999c4f622 100644 --- a/.github/workflows/ci_linters_macos.yaml +++ b/.github/workflows/ci_linters_macos.yaml @@ -48,7 +48,7 @@ jobs: ci_linters_macos: name: ci_linters_macos needs: prologue - runs-on: macos-11 + runs-on: macos-12 steps: - uses: actions/checkout@v3 - run: | diff --git a/.github/workflows/ci_quick_checks_macos.yaml b/.github/workflows/ci_quick_checks_macos.yaml index 91e8668946..4e0faea5ae 100644 --- a/.github/workflows/ci_quick_checks_macos.yaml +++ b/.github/workflows/ci_quick_checks_macos.yaml @@ -48,7 +48,7 @@ jobs: ci_quick_checks_macos: name: ci_quick_checks_macos needs: prologue - runs-on: macos-11 + runs-on: macos-12 steps: - uses: actions/checkout@v3 - run: | diff --git a/.github/workflows/ci_unit_tests_macos.yaml b/.github/workflows/ci_unit_tests_macos.yaml index 53063513a0..563d49b0f7 100644 --- a/.github/workflows/ci_unit_tests_macos.yaml +++ b/.github/workflows/ci_unit_tests_macos.yaml @@ -48,7 +48,7 @@ jobs: ci_unit_tests_macos: name: ci_unit_tests_macos needs: prologue - runs-on: macos-11 + runs-on: macos-12 steps: - name: Install nextest uses: taiki-e/install-action@nextest diff --git a/.github/workflows/package.yaml b/.github/workflows/package.yaml index 9132abf38e..c00935e1ff 100644 --- a/.github/workflows/package.yaml +++ b/.github/workflows/package.yaml @@ -11,12 +11,12 @@ concurrency: on: push: branches: - - 'pkg/*' + - "pkg/*" env: CARGO_TERM_COLOR: always RUST_BACKTRACE: full - CKB_CLI_VERSION: v1.9.0 + CKB_CLI_VERSION: v1.11.0 jobs: create-release: @@ -52,35 +52,35 @@ jobs: - rel_pkg: "x86_64-unknown-linux-gnu-portable.tar.gz" build_target: "prod_portable" steps: - - uses: actions/checkout@v3 - - name: Set Env - run: | - export GIT_TAG_NAME=` echo ${{ github.ref }} | awk -F '/' '{print $4}' ` - echo "GIT_TAG_NAME=$GIT_TAG_NAME" >> $GITHUB_ENV - - name: Build CKB and Package CKB - env: - LARGE_SECRET_PASSPHRASE: ${{ secrets.LARGE_SECRET_PASSPHRASE }} - QINIU_ACCESS_KEY: ${{ secrets.QINIU_ACCESS_KEY }} - QINIU_SECRET_KEY: ${{ secrets.QINIU_SECRET_KEY }} - GPG_SIGNER: ${{ secrets.GPG_SIGNER }} - run: | - export GIT_TAG_NAME=` echo ${{ github.ref }} | awk -F '/' '{print $4}' ` - docker run --rm -i -w /ckb -v $(pwd):/ckb $BUILDER_IMAGE make ${{ matrix.build_target }} - gpg --quiet --batch --yes --decrypt --passphrase="$LARGE_SECRET_PASSPHRASE" --output devtools/ci/signer.asc devtools/ci/signer.asc.gpg - gpg --import devtools/ci/signer.asc - devtools/ci/package.sh target/prod/ckb - mv ${{ github.workspace }}/releases/ckb_${{env.GIT_TAG_NAME }}_${{env.REL_PKG }} ${{ github.workspace }} - mv ${{ github.workspace }}/releases/ckb_${{env.GIT_TAG_NAME }}_${{env.REL_PKG }}.asc ${{ github.workspace }} - - name: upload-zip-file - uses: actions/upload-artifact@v2 - with: - name: ckb_${{env.GIT_TAG_NAME }}_${{env.REL_PKG }} - path: ckb_${{env.GIT_TAG_NAME }}_${{env.REL_PKG }} - - name: upload-asc-file - uses: actions/upload-artifact@v2 - with: - name: ckb_${{env.GIT_TAG_NAME }}_${{env.REL_PKG }}.asc - path: ckb_${{env.GIT_TAG_NAME }}_${{env.REL_PKG }}.asc + - uses: actions/checkout@v3 + - name: Set Env + run: | + export GIT_TAG_NAME=` echo ${{ github.ref }} | awk -F '/' '{print $4}' ` + echo "GIT_TAG_NAME=$GIT_TAG_NAME" >> $GITHUB_ENV + - name: Build CKB and Package CKB + env: + LARGE_SECRET_PASSPHRASE: ${{ secrets.LARGE_SECRET_PASSPHRASE }} + QINIU_ACCESS_KEY: ${{ secrets.QINIU_ACCESS_KEY }} + QINIU_SECRET_KEY: ${{ secrets.QINIU_SECRET_KEY }} + GPG_SIGNER: ${{ secrets.GPG_SIGNER }} + run: | + export GIT_TAG_NAME=` echo ${{ github.ref }} | awk -F '/' '{print $4}' ` + docker run --rm -i -w /ckb -v $(pwd):/ckb $BUILDER_IMAGE make ${{ matrix.build_target }} + gpg --quiet --batch --yes --decrypt --passphrase="$LARGE_SECRET_PASSPHRASE" --output devtools/ci/signer.asc devtools/ci/signer.asc.gpg + gpg --import devtools/ci/signer.asc + devtools/ci/package.sh target/prod/ckb + mv ${{ github.workspace }}/releases/ckb_${{env.GIT_TAG_NAME }}_${{env.REL_PKG }} ${{ github.workspace }} + mv ${{ github.workspace }}/releases/ckb_${{env.GIT_TAG_NAME }}_${{env.REL_PKG }}.asc ${{ github.workspace }} + - name: upload-zip-file + uses: actions/upload-artifact@v2 + with: + name: ckb_${{env.GIT_TAG_NAME }}_${{env.REL_PKG }} + path: ckb_${{env.GIT_TAG_NAME }}_${{env.REL_PKG }} + - name: upload-asc-file + uses: actions/upload-artifact@v2 + with: + name: ckb_${{env.GIT_TAG_NAME }}_${{env.REL_PKG }}.asc + path: ckb_${{env.GIT_TAG_NAME }}_${{env.REL_PKG }}.asc env: BUILDER_IMAGE: nervos/ckb-docker-builder:bionic-rust-1.75.0 REL_PKG: ${{ matrix.rel_pkg }} @@ -89,42 +89,42 @@ jobs: name: package-for-linux-aarch64 runs-on: ubuntu-20.04 steps: - - uses: actions/checkout@v3 - - name: Set Env - run: | - export GIT_TAG_NAME=` echo ${{ github.ref }} | awk -F '/' '{print $4}' ` - echo "GIT_TAG_NAME=$GIT_TAG_NAME" >> $GITHUB_ENV - - name: Add rust target - run: rustup target add aarch64-unknown-linux-gnu - - name: Install dependencies - run: sudo apt-get update && sudo apt-get install -y gcc-multilib && sudo apt-get install -y build-essential clang gcc-aarch64-linux-gnu g++-aarch64-linux-gnu - - name: Build CKB and Package CKB - env: - LARGE_SECRET_PASSPHRASE: ${{ secrets.LARGE_SECRET_PASSPHRASE }} - QINIU_ACCESS_KEY: ${{ secrets.QINIU_ACCESS_KEY }} - QINIU_SECRET_KEY: ${{ secrets.QINIU_SECRET_KEY }} - GPG_SIGNER: ${{ secrets.GPG_SIGNER }} - SKIP_CKB_CLI: true - run: | - export GIT_TAG_NAME=` echo ${{ github.ref }} | awk -F '/' '{print $4}' ` + - uses: actions/checkout@v3 + - name: Set Env + run: | + export GIT_TAG_NAME=` echo ${{ github.ref }} | awk -F '/' '{print $4}' ` + echo "GIT_TAG_NAME=$GIT_TAG_NAME" >> $GITHUB_ENV + - name: Add rust target + run: rustup target add aarch64-unknown-linux-gnu + - name: Install dependencies + run: sudo apt-get update && sudo apt-get install -y gcc-multilib && sudo apt-get install -y build-essential clang gcc-aarch64-linux-gnu g++-aarch64-linux-gnu + - name: Build CKB and Package CKB + env: + LARGE_SECRET_PASSPHRASE: ${{ secrets.LARGE_SECRET_PASSPHRASE }} + QINIU_ACCESS_KEY: ${{ secrets.QINIU_ACCESS_KEY }} + QINIU_SECRET_KEY: ${{ secrets.QINIU_SECRET_KEY }} + GPG_SIGNER: ${{ secrets.GPG_SIGNER }} + SKIP_CKB_CLI: true + run: | + export GIT_TAG_NAME=` echo ${{ github.ref }} | awk -F '/' '{print $4}' ` - PKG_CONFIG_ALLOW_CROSS=1 CC=gcc CC_aarch64_unknown_linux_gnu=aarch64-linux-gnu-gcc CKB_BUILD_TARGET="--target=aarch64-unknown-linux-gnu" make prod_portable + PKG_CONFIG_ALLOW_CROSS=1 CC=gcc CC_aarch64_unknown_linux_gnu=aarch64-linux-gnu-gcc CKB_BUILD_TARGET="--target=aarch64-unknown-linux-gnu" make prod_portable - gpg --quiet --batch --yes --decrypt --passphrase="$LARGE_SECRET_PASSPHRASE" --output devtools/ci/signer.asc devtools/ci/signer.asc.gpg - gpg --import devtools/ci/signer.asc - devtools/ci/package.sh target/aarch64-unknown-linux-gnu/prod/ckb - mv ${{ github.workspace }}/releases/ckb_${{env.GIT_TAG_NAME }}_${{env.REL_PKG }} ${{ github.workspace }} - mv ${{ github.workspace }}/releases/ckb_${{env.GIT_TAG_NAME }}_${{env.REL_PKG }}.asc ${{ github.workspace }} - - name: upload-zip-file - uses: actions/upload-artifact@v2 - with: - name: ckb_${{env.GIT_TAG_NAME }}_${{env.REL_PKG }} - path: ckb_${{env.GIT_TAG_NAME }}_${{env.REL_PKG }} - - name: upload-asc-file - uses: actions/upload-artifact@v2 - with: - name: ckb_${{env.GIT_TAG_NAME }}_${{env.REL_PKG }}.asc - path: ckb_${{env.GIT_TAG_NAME }}_${{env.REL_PKG }}.asc + gpg --quiet --batch --yes --decrypt --passphrase="$LARGE_SECRET_PASSPHRASE" --output devtools/ci/signer.asc devtools/ci/signer.asc.gpg + gpg --import devtools/ci/signer.asc + devtools/ci/package.sh target/aarch64-unknown-linux-gnu/prod/ckb + mv ${{ github.workspace }}/releases/ckb_${{env.GIT_TAG_NAME }}_${{env.REL_PKG }} ${{ github.workspace }} + mv ${{ github.workspace }}/releases/ckb_${{env.GIT_TAG_NAME }}_${{env.REL_PKG }}.asc ${{ github.workspace }} + - name: upload-zip-file + uses: actions/upload-artifact@v2 + with: + name: ckb_${{env.GIT_TAG_NAME }}_${{env.REL_PKG }} + path: ckb_${{env.GIT_TAG_NAME }}_${{env.REL_PKG }} + - name: upload-asc-file + uses: actions/upload-artifact@v2 + with: + name: ckb_${{env.GIT_TAG_NAME }}_${{env.REL_PKG }}.asc + path: ckb_${{env.GIT_TAG_NAME }}_${{env.REL_PKG }}.asc env: REL_PKG: aarch64-unknown-linux-gnu.tar.gz @@ -139,40 +139,40 @@ jobs: - rel_pkg: "x86_64-unknown-centos-gnu-portable.tar.gz" build_target: "prod_portable" steps: - - uses: actions/checkout@v3 - - name: Set Env - run: | - export GIT_TAG_NAME=` echo ${{ github.ref }} | awk -F '/' '{print $4}' ` - echo "GIT_TAG_NAME=$GIT_TAG_NAME" >> $GITHUB_ENV - - name: Build CKB and Package CKB - env: - LARGE_SECRET_PASSPHRASE: ${{ secrets.LARGE_SECRET_PASSPHRASE }} - GPG_SIGNER: ${{ secrets.GPG_SIGNER }} - run: | - export GIT_TAG_NAME=` echo ${{ github.ref }} | awk -F '/' '{print $4}' ` - docker run --rm -i -w /ckb -v $(pwd):/ckb $BUILDER_IMAGE make ${{ matrix.build_target }} - gpg --quiet --batch --yes --decrypt --passphrase="$LARGE_SECRET_PASSPHRASE" --output devtools/ci/signer.asc devtools/ci/signer.asc.gpg - gpg --import devtools/ci/signer.asc - devtools/ci/package.sh target/prod/ckb - mv ${{ github.workspace }}/releases/ckb_${{env.GIT_TAG_NAME }}_${{env.REL_PKG }} ${{ github.workspace }} - mv ${{ github.workspace }}/releases/ckb_${{env.GIT_TAG_NAME }}_${{env.REL_PKG }}.asc ${{ github.workspace }} - - name: upload-zip-file - uses: actions/upload-artifact@v2 - with: - name: ckb_${{env.GIT_TAG_NAME }}_${{env.REL_PKG }} - path: ckb_${{env.GIT_TAG_NAME }}_${{env.REL_PKG }} - - name: upload-asc-file - uses: actions/upload-artifact@v2 - with: - name: ckb_${{env.GIT_TAG_NAME }}_${{env.REL_PKG }}.asc - path: ckb_${{env.GIT_TAG_NAME }}_${{env.REL_PKG }}.asc + - uses: actions/checkout@v3 + - name: Set Env + run: | + export GIT_TAG_NAME=` echo ${{ github.ref }} | awk -F '/' '{print $4}' ` + echo "GIT_TAG_NAME=$GIT_TAG_NAME" >> $GITHUB_ENV + - name: Build CKB and Package CKB + env: + LARGE_SECRET_PASSPHRASE: ${{ secrets.LARGE_SECRET_PASSPHRASE }} + GPG_SIGNER: ${{ secrets.GPG_SIGNER }} + run: | + export GIT_TAG_NAME=` echo ${{ github.ref }} | awk -F '/' '{print $4}' ` + docker run --rm -i -w /ckb -v $(pwd):/ckb $BUILDER_IMAGE make ${{ matrix.build_target }} + gpg --quiet --batch --yes --decrypt --passphrase="$LARGE_SECRET_PASSPHRASE" --output devtools/ci/signer.asc devtools/ci/signer.asc.gpg + gpg --import devtools/ci/signer.asc + devtools/ci/package.sh target/prod/ckb + mv ${{ github.workspace }}/releases/ckb_${{env.GIT_TAG_NAME }}_${{env.REL_PKG }} ${{ github.workspace }} + mv ${{ github.workspace }}/releases/ckb_${{env.GIT_TAG_NAME }}_${{env.REL_PKG }}.asc ${{ github.workspace }} + - name: upload-zip-file + uses: actions/upload-artifact@v2 + with: + name: ckb_${{env.GIT_TAG_NAME }}_${{env.REL_PKG }} + path: ckb_${{env.GIT_TAG_NAME }}_${{env.REL_PKG }} + - name: upload-asc-file + uses: actions/upload-artifact@v2 + with: + name: ckb_${{env.GIT_TAG_NAME }}_${{env.REL_PKG }}.asc + path: ckb_${{env.GIT_TAG_NAME }}_${{env.REL_PKG }}.asc env: BUILDER_IMAGE: nervos/ckb-docker-builder:centos-7-rust-1.75.0 REL_PKG: ${{ matrix.rel_pkg }} package-for-mac: name: package-for-mac - runs-on: macos-11 + runs-on: macos-12 strategy: matrix: include: @@ -181,35 +181,35 @@ jobs: - rel_pkg: "x86_64-apple-darwin-portable.zip" build_target: "prod_portable" steps: - - uses: actions/checkout@v3 - - name: Set Env - run: | - export GIT_TAG_NAME=` echo ${{ github.ref }} | awk -F '/' '{print $4}' ` - echo "GIT_TAG_NAME=$GIT_TAG_NAME" >> $GITHUB_ENV - - name: Build CKB and Package CKB - env: - LARGE_SECRET_PASSPHRASE: ${{ secrets.LARGE_SECRET_PASSPHRASE }} - GPG_SIGNER: ${{ secrets.GPG_SIGNER }} - run: | - export GIT_TAG_NAME=` echo ${{ github.ref }} | awk -F '/' '{print $4}' ` + - uses: actions/checkout@v3 + - name: Set Env + run: | + export GIT_TAG_NAME=` echo ${{ github.ref }} | awk -F '/' '{print $4}' ` + echo "GIT_TAG_NAME=$GIT_TAG_NAME" >> $GITHUB_ENV + - name: Build CKB and Package CKB + env: + LARGE_SECRET_PASSPHRASE: ${{ secrets.LARGE_SECRET_PASSPHRASE }} + GPG_SIGNER: ${{ secrets.GPG_SIGNER }} + run: | + export GIT_TAG_NAME=` echo ${{ github.ref }} | awk -F '/' '{print $4}' ` - make ${{ matrix.build_target }} + make ${{ matrix.build_target }} - gpg --quiet --batch --yes --decrypt --passphrase="$LARGE_SECRET_PASSPHRASE" --output devtools/ci/signer.asc devtools/ci/signer.asc.gpg - gpg --import devtools/ci/signer.asc - devtools/ci/package.sh target/prod/ckb - mv ${{ github.workspace }}/releases/ckb_${{env.GIT_TAG_NAME }}_${{env.REL_PKG }} ${{ github.workspace }} - mv ${{ github.workspace }}/releases/ckb_${{env.GIT_TAG_NAME }}_${{env.REL_PKG }}.asc ${{ github.workspace }} - - name: upload-zip-file - uses: actions/upload-artifact@v2 - with: - name: ckb_${{env.GIT_TAG_NAME }}_${{env.REL_PKG }} - path: ckb_${{env.GIT_TAG_NAME }}_${{env.REL_PKG }} - - name: upload-asc-file - uses: actions/upload-artifact@v2 - with: - name: ckb_${{env.GIT_TAG_NAME }}_${{env.REL_PKG }}.asc - path: ckb_${{env.GIT_TAG_NAME }}_${{env.REL_PKG }}.asc + gpg --quiet --batch --yes --decrypt --passphrase="$LARGE_SECRET_PASSPHRASE" --output devtools/ci/signer.asc devtools/ci/signer.asc.gpg + gpg --import devtools/ci/signer.asc + devtools/ci/package.sh target/prod/ckb + mv ${{ github.workspace }}/releases/ckb_${{env.GIT_TAG_NAME }}_${{env.REL_PKG }} ${{ github.workspace }} + mv ${{ github.workspace }}/releases/ckb_${{env.GIT_TAG_NAME }}_${{env.REL_PKG }}.asc ${{ github.workspace }} + - name: upload-zip-file + uses: actions/upload-artifact@v2 + with: + name: ckb_${{env.GIT_TAG_NAME }}_${{env.REL_PKG }} + path: ckb_${{env.GIT_TAG_NAME }}_${{env.REL_PKG }} + - name: upload-asc-file + uses: actions/upload-artifact@v2 + with: + name: ckb_${{env.GIT_TAG_NAME }}_${{env.REL_PKG }}.asc + path: ckb_${{env.GIT_TAG_NAME }}_${{env.REL_PKG }}.asc env: REL_PKG: ${{ matrix.rel_pkg }} @@ -224,48 +224,48 @@ jobs: - rel_pkg: "aarch64-apple-darwin-portable.zip" build_target: "prod_portable" steps: - - name: Setup PATH - run: | - echo /opt/homebrew/bin >> $GITHUB_PATH - echo /opt/homebrew/sbin >> $GITHUB_PATH - echo "$HOME/.cargo/bin" >> $GITHUB_PATH - - uses: actions/checkout@v3 - - name: Set Env - run: | - export GIT_TAG_NAME=` echo ${{ github.ref }} | awk -F '/' '{print $4}' ` - echo "GIT_TAG_NAME=$GIT_TAG_NAME" >> $GITHUB_ENV - - name: Install Depedencies - run: | - if ! type -f gpg &> /dev/null; then - brew install gnupg - fi - if ! [ -f "$HOME/.cargo/bin/rustup" ]; then - curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y - fi - - name: Build CKB and Package CKB - env: - LARGE_SECRET_PASSPHRASE: ${{ secrets.LARGE_SECRET_PASSPHRASE }} - GPG_SIGNER: ${{ secrets.GPG_SIGNER }} - run: | - export GIT_TAG_NAME=` echo ${{ github.ref }} | awk -F '/' '{print $4}' ` + - name: Setup PATH + run: | + echo /opt/homebrew/bin >> $GITHUB_PATH + echo /opt/homebrew/sbin >> $GITHUB_PATH + echo "$HOME/.cargo/bin" >> $GITHUB_PATH + - uses: actions/checkout@v3 + - name: Set Env + run: | + export GIT_TAG_NAME=` echo ${{ github.ref }} | awk -F '/' '{print $4}' ` + echo "GIT_TAG_NAME=$GIT_TAG_NAME" >> $GITHUB_ENV + - name: Install Depedencies + run: | + if ! type -f gpg &> /dev/null; then + brew install gnupg + fi + if ! [ -f "$HOME/.cargo/bin/rustup" ]; then + curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y + fi + - name: Build CKB and Package CKB + env: + LARGE_SECRET_PASSPHRASE: ${{ secrets.LARGE_SECRET_PASSPHRASE }} + GPG_SIGNER: ${{ secrets.GPG_SIGNER }} + run: | + export GIT_TAG_NAME=` echo ${{ github.ref }} | awk -F '/' '{print $4}' ` - make ${{ matrix.build_target }} + make ${{ matrix.build_target }} - gpg --quiet --batch --yes --decrypt --passphrase="$LARGE_SECRET_PASSPHRASE" --output devtools/ci/signer.asc devtools/ci/signer.asc.gpg - gpg --import devtools/ci/signer.asc - devtools/ci/package.sh target/prod/ckb - mv ${{ github.workspace }}/releases/ckb_${{env.GIT_TAG_NAME }}_${{env.REL_PKG }} ${{ github.workspace }} - mv ${{ github.workspace }}/releases/ckb_${{env.GIT_TAG_NAME }}_${{env.REL_PKG }}.asc ${{ github.workspace }} - - name: upload-zip-file - uses: actions/upload-artifact@v2 - with: - name: ckb_${{env.GIT_TAG_NAME }}_${{env.REL_PKG }} - path: ckb_${{env.GIT_TAG_NAME }}_${{env.REL_PKG }} - - name: upload-asc-file - uses: actions/upload-artifact@v2 - with: - name: ckb_${{env.GIT_TAG_NAME }}_${{env.REL_PKG }}.asc - path: ckb_${{env.GIT_TAG_NAME }}_${{env.REL_PKG }}.asc + gpg --quiet --batch --yes --decrypt --passphrase="$LARGE_SECRET_PASSPHRASE" --output devtools/ci/signer.asc devtools/ci/signer.asc.gpg + gpg --import devtools/ci/signer.asc + devtools/ci/package.sh target/prod/ckb + mv ${{ github.workspace }}/releases/ckb_${{env.GIT_TAG_NAME }}_${{env.REL_PKG }} ${{ github.workspace }} + mv ${{ github.workspace }}/releases/ckb_${{env.GIT_TAG_NAME }}_${{env.REL_PKG }}.asc ${{ github.workspace }} + - name: upload-zip-file + uses: actions/upload-artifact@v2 + with: + name: ckb_${{env.GIT_TAG_NAME }}_${{env.REL_PKG }} + path: ckb_${{env.GIT_TAG_NAME }}_${{env.REL_PKG }} + - name: upload-asc-file + uses: actions/upload-artifact@v2 + with: + name: ckb_${{env.GIT_TAG_NAME }}_${{env.REL_PKG }}.asc + path: ckb_${{env.GIT_TAG_NAME }}_${{env.REL_PKG }}.asc env: REL_PKG: ${{ matrix.rel_pkg }} @@ -273,59 +273,59 @@ jobs: name: package-for-windows runs-on: windows-2019 steps: - - name: Install Dependencies - run: | - Set-ExecutionPolicy RemoteSigned -scope CurrentUser - iwr -useb get.scoop.sh -outfile 'install-scoop.ps1' - .\install-scoop.ps1 -RunAsAdmin - scoop install llvm - echo ("GIT_TAG_NAME=" + $env:GITHUB_REF.replace('refs/heads/pkg/', '')) >> $env:GITHUB_ENV - echo "$env:USERPROFILE\scoop\shims" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append - echo "C:\msys64\mingw64\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append - echo "LIBCLANG_PATH=$env:USERPROFILE\scoop\apps\llvm\current\bin" >> $env:GITHUB_ENV - - uses: actions/checkout@v3 - - name: Build - run: | - devtools/windows/make prod - - name: Download ckb-cli - run: | - iwr -useb "https://github.com/nervosnetwork/ckb-cli/releases/download/$($env:CKB_CLI_VERSION)/ckb-cli_$($env:CKB_CLI_VERSION)_x86_64-pc-windows-msvc.zip" -outfile "ckb-cli_$($env:CKB_CLI_VERSION)_x86_64-pc-windows-msvc.zip" - - name: Prepare archive - run: | - $env:GIT_TAG_NAME=($env:GITHUB_REF -split '/')[3] - mkdir releases - mkdir releases/ckb_$($env:GIT_TAG_NAME)_x86_64-pc-windows-msvc - cp -r target/release/ckb.exe,README.md,CHANGELOG.md,COPYING,docs releases/ckb_$($env:GIT_TAG_NAME)_x86_64-pc-windows-msvc - cp devtools/windows/ckb-init-mainnet.bat,devtools/windows/ckb-reinit-mainnet.bat,devtools/windows/ckb-run.bat releases/ckb_${$env:GIT_TAG_NAME}_x86_64-pc-windows-msvc - cp rpc/README.md releases/ckb_$($env:GIT_TAG_NAME)_x86_64-pc-windows-msvc/docs/rpc.md - expand-archive -path ckb-cli_$($env:CKB_CLI_VERSION)_x86_64-pc-windows-msvc.zip -DestinationPath ${{ github.workspace }} - mv ckb-cli_$($env:CKB_CLI_VERSION)_x86_64-pc-windows-msvc/ckb-cli.exe releases/ckb_$( $env:GIT_TAG_NAME)_x86_64-pc-windows-msvc/ - - name: Archive Files - run: | - $env:GIT_TAG_NAME=($env:GITHUB_REF -split '/')[3] - Compress-Archive -Path releases/ckb_$( $env:GIT_TAG_NAME)_x86_64-pc-windows-msvc -DestinationPath releases/ckb_$($env:GIT_TAG_NAME)_$($env:REL_PKG) - - name: Sign Archive - env: - LARGE_SECRET_PASSPHRASE: ${{ secrets.LARGE_SECRET_PASSPHRASE }} - GPG_SIGNER: ${{ secrets.GPG_SIGNER }} - run: | - $CYGPWD = cygpath -u (Get-Location) - gpg --quiet --batch --yes --decrypt --passphrase="$env:LARGE_SECRET_PASSPHRASE" --output "$CYGPWD/devtools/ci/signer.asc" "$CYGPWD/devtools/ci/signer.asc.gpg" - gpg --import "$CYGPWD/devtools/ci/signer.asc" - $env:GIT_TAG_NAME=($env:GITHUB_REF -split '/')[3] - gpg -u "$env:GPG_SIGNER" -ab "$CYGPWD/releases/ckb_$($env:GIT_TAG_NAME)_$($env:REL_PKG)" - mv ${{ github.workspace }}/releases/ckb_$($env:GIT_TAG_NAME)_$($env:REL_PKG) ${{ github.workspace }} - mv ${{ github.workspace }}/releases/ckb_$($env:GIT_TAG_NAME)_$($env:REL_PKG).asc ${{ github.workspace }} - - name: upload-artifact - uses: actions/upload-artifact@v2 - with: - name: ckb_${{env.GIT_TAG_NAME }}_${{env.REL_PKG }} - path: ckb_${{env.GIT_TAG_NAME }}_${{env.REL_PKG }} - - name: upload-artifact - uses: actions/upload-artifact@v2 - with: - name: ckb_${{env.GIT_TAG_NAME }}_${{env.REL_PKG }}.asc - path: ckb_${{env.GIT_TAG_NAME }}_${{env.REL_PKG }}.asc + - name: Install Dependencies + run: | + Set-ExecutionPolicy RemoteSigned -scope CurrentUser + iwr -useb get.scoop.sh -outfile 'install-scoop.ps1' + .\install-scoop.ps1 -RunAsAdmin + scoop install llvm + echo ("GIT_TAG_NAME=" + $env:GITHUB_REF.replace('refs/heads/pkg/', '')) >> $env:GITHUB_ENV + echo "$env:USERPROFILE\scoop\shims" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append + echo "C:\msys64\mingw64\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append + echo "LIBCLANG_PATH=$env:USERPROFILE\scoop\apps\llvm\current\bin" >> $env:GITHUB_ENV + - uses: actions/checkout@v3 + - name: Build + run: | + devtools/windows/make prod + - name: Download ckb-cli + run: | + iwr -useb "https://github.com/nervosnetwork/ckb-cli/releases/download/$($env:CKB_CLI_VERSION)/ckb-cli_$($env:CKB_CLI_VERSION)_x86_64-pc-windows-msvc.zip" -outfile "ckb-cli_$($env:CKB_CLI_VERSION)_x86_64-pc-windows-msvc.zip" + - name: Prepare archive + run: | + $env:GIT_TAG_NAME=($env:GITHUB_REF -split '/')[3] + mkdir releases + mkdir releases/ckb_$($env:GIT_TAG_NAME)_x86_64-pc-windows-msvc + cp -r target/release/ckb.exe,README.md,CHANGELOG.md,COPYING,docs releases/ckb_$($env:GIT_TAG_NAME)_x86_64-pc-windows-msvc + cp devtools/windows/ckb-init-mainnet.bat,devtools/windows/ckb-reinit-mainnet.bat,devtools/windows/ckb-run.bat releases/ckb_${$env:GIT_TAG_NAME}_x86_64-pc-windows-msvc + cp rpc/README.md releases/ckb_$($env:GIT_TAG_NAME)_x86_64-pc-windows-msvc/docs/rpc.md + expand-archive -path ckb-cli_$($env:CKB_CLI_VERSION)_x86_64-pc-windows-msvc.zip -DestinationPath ${{ github.workspace }} + mv ckb-cli_$($env:CKB_CLI_VERSION)_x86_64-pc-windows-msvc/ckb-cli.exe releases/ckb_$( $env:GIT_TAG_NAME)_x86_64-pc-windows-msvc/ + - name: Archive Files + run: | + $env:GIT_TAG_NAME=($env:GITHUB_REF -split '/')[3] + Compress-Archive -Path releases/ckb_$( $env:GIT_TAG_NAME)_x86_64-pc-windows-msvc -DestinationPath releases/ckb_$($env:GIT_TAG_NAME)_$($env:REL_PKG) + - name: Sign Archive + env: + LARGE_SECRET_PASSPHRASE: ${{ secrets.LARGE_SECRET_PASSPHRASE }} + GPG_SIGNER: ${{ secrets.GPG_SIGNER }} + run: | + $CYGPWD = cygpath -u (Get-Location) + gpg --quiet --batch --yes --decrypt --passphrase="$env:LARGE_SECRET_PASSPHRASE" --output "$CYGPWD/devtools/ci/signer.asc" "$CYGPWD/devtools/ci/signer.asc.gpg" + gpg --import "$CYGPWD/devtools/ci/signer.asc" + $env:GIT_TAG_NAME=($env:GITHUB_REF -split '/')[3] + gpg -u "$env:GPG_SIGNER" -ab "$CYGPWD/releases/ckb_$($env:GIT_TAG_NAME)_$($env:REL_PKG)" + mv ${{ github.workspace }}/releases/ckb_$($env:GIT_TAG_NAME)_$($env:REL_PKG) ${{ github.workspace }} + mv ${{ github.workspace }}/releases/ckb_$($env:GIT_TAG_NAME)_$($env:REL_PKG).asc ${{ github.workspace }} + - name: upload-artifact + uses: actions/upload-artifact@v2 + with: + name: ckb_${{env.GIT_TAG_NAME }}_${{env.REL_PKG }} + path: ckb_${{env.GIT_TAG_NAME }}_${{env.REL_PKG }} + - name: upload-artifact + uses: actions/upload-artifact@v2 + with: + name: ckb_${{env.GIT_TAG_NAME }}_${{env.REL_PKG }}.asc + path: ckb_${{env.GIT_TAG_NAME }}_${{env.REL_PKG }}.asc env: REL_PKG: x86_64-pc-windows-msvc.zip @@ -354,37 +354,37 @@ jobs: - package-for-windows - package-for-centos steps: - - uses: actions/checkout@v3 - - name: Set tag - run: | - export GIT_TAG_NAME=` echo ${{ github.ref }} | awk -F '/' '{print $4}' ` - echo "GIT_TAG_NAME=$GIT_TAG_NAME" >> $GITHUB_ENV - - name: Prepare - Download tar - uses: actions/download-artifact@v2 - with: - name: ckb_${{env.GIT_TAG_NAME}}_${{ matrix.REL_PKG }} - - name: Prepare - Download asc - uses: actions/download-artifact@v2 - with: - name: ckb_${{env.GIT_TAG_NAME}}_${{ matrix.REL_PKG }}.asc - - name: Upload tar assets - uses: actions/upload-release-asset@v1 - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - with: - upload_url: ${{ needs.create-release.outputs.upload_url }} - asset_name: ckb_${{env.GIT_TAG_NAME}}_${{ matrix.REL_PKG }} - asset_path: ${{ github.workspace }}/ckb_${{env.GIT_TAG_NAME }}_${{ matrix.REL_PKG }} - asset_content_type: application/octet-stream - - name: Upload asc assets - uses: actions/upload-release-asset@v1 - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - with: - upload_url: ${{ needs.create-release.outputs.upload_url }} - asset_name: ckb_${{env.GIT_TAG_NAME}}_${{ matrix.REL_PKG }}.asc - asset_path: ${{ github.workspace }}/ckb_${{env.GIT_TAG_NAME }}_${{ matrix.REL_PKG }}.asc - asset_content_type: application/octet-stream + - uses: actions/checkout@v3 + - name: Set tag + run: | + export GIT_TAG_NAME=` echo ${{ github.ref }} | awk -F '/' '{print $4}' ` + echo "GIT_TAG_NAME=$GIT_TAG_NAME" >> $GITHUB_ENV + - name: Prepare - Download tar + uses: actions/download-artifact@v2 + with: + name: ckb_${{env.GIT_TAG_NAME}}_${{ matrix.REL_PKG }} + - name: Prepare - Download asc + uses: actions/download-artifact@v2 + with: + name: ckb_${{env.GIT_TAG_NAME}}_${{ matrix.REL_PKG }}.asc + - name: Upload tar assets + uses: actions/upload-release-asset@v1 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + with: + upload_url: ${{ needs.create-release.outputs.upload_url }} + asset_name: ckb_${{env.GIT_TAG_NAME}}_${{ matrix.REL_PKG }} + asset_path: ${{ github.workspace }}/ckb_${{env.GIT_TAG_NAME }}_${{ matrix.REL_PKG }} + asset_content_type: application/octet-stream + - name: Upload asc assets + uses: actions/upload-release-asset@v1 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + with: + upload_url: ${{ needs.create-release.outputs.upload_url }} + asset_name: ckb_${{env.GIT_TAG_NAME}}_${{ matrix.REL_PKG }}.asc + asset_path: ${{ github.workspace }}/ckb_${{env.GIT_TAG_NAME }}_${{ matrix.REL_PKG }}.asc + asset_content_type: application/octet-stream Trigger_smoking_test: name: Trigger_smoking_test @@ -392,13 +392,13 @@ jobs: needs: - Upload_File steps: - - uses: actions/checkout@v3 - - name: Set tag - run: | - export GIT_TAG_NAME=` echo ${{ github.ref }} | awk -F '/' '{print $4}' ` - echo "GIT_TAG_NAME=$GIT_TAG_NAME" >> $GITHUB_ENV - - uses: peter-evans/repository-dispatch@v1 - with: - token: ${{ secrets.REPO_ACCESS_TOKEN }} - event-type: smoking-test - client-payload: '{"CKB_linux_release_package": "${{secrets.QINIU_CDN_SITE}}/ckb_${{env.GIT_TAG_NAME}}_x86_64-unknown-linux-gnu.7z"}' + - uses: actions/checkout@v3 + - name: Set tag + run: | + export GIT_TAG_NAME=` echo ${{ github.ref }} | awk -F '/' '{print $4}' ` + echo "GIT_TAG_NAME=$GIT_TAG_NAME" >> $GITHUB_ENV + - uses: peter-evans/repository-dispatch@v1 + with: + token: ${{ secrets.REPO_ACCESS_TOKEN }} + event-type: smoking-test + client-payload: '{"CKB_linux_release_package": "${{secrets.QINIU_CDN_SITE}}/ckb_${{env.GIT_TAG_NAME}}_x86_64-unknown-linux-gnu.7z"}' diff --git a/CHANGELOG.md b/CHANGELOG.md index 221d0d744b..6fa79b7f29 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,29 @@ # CHANGELOG +## [v0.117.0](https://github.com/nervosnetwork/ckb/compare/v0.116.1...v0.117.0) (2024-07-29) + +## Features + +- #4454: Add `include_tx_pool: Option` param to `ChainRpcImpl::get_live_cell`' (@eval-exec) + + This is a breaking change to the RPC. + +- #4486: Add `assume_valid_target_reached: bool` to `NetRpc::sync_state` (@eval-exec) + + This is a breaking change to the RPC. + +## Bug Fixes + +- #4484: Fix rich indexer `partial` query by args performance issue (@EthanYuan) +- #4505: Fix websocket subscription performance issue (@chenyukang) + +## Improvements + +- #4487: tweak `max_ancestors_count` (@zhangsoledad) +- #4459: Use standalone runtime for RPC service (@chenyukang) +- #4511: Modify the record scope of tx-pool reject record and fix rule for orphan tx. (@zhangsoledad) +- #4531: Early return `process_fetch_cmd` if ckb received exit signal (@eval-exec) + ## [v0.116.1](https://github.com/nervosnetwork/ckb/compare/v0.115.0...v0.116.1) (2024-05-11) ### Features diff --git a/Cargo.lock b/Cargo.lock index 40f03b70a0..f95bde24b9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -585,7 +585,7 @@ dependencies = [ [[package]] name = "ckb" -version = "0.117.0-pre" +version = "0.118.0-pre" dependencies = [ "ckb-bin", "ckb-build-info", @@ -595,7 +595,7 @@ dependencies = [ [[package]] name = "ckb-app-config" -version = "0.117.0-pre" +version = "0.118.0-pre" dependencies = [ "ckb-build-info", "ckb-chain-spec", @@ -625,7 +625,7 @@ dependencies = [ [[package]] name = "ckb-async-runtime" -version = "0.117.0-pre" +version = "0.118.0-pre" dependencies = [ "ckb-logger", "ckb-spawn", @@ -634,7 +634,7 @@ dependencies = [ [[package]] name = "ckb-benches" -version = "0.117.0-pre" +version = "0.118.0-pre" dependencies = [ "ckb-app-config", "ckb-chain", @@ -661,7 +661,7 @@ dependencies = [ [[package]] name = "ckb-bin" -version = "0.117.0-pre" +version = "0.118.0-pre" dependencies = [ "base64 0.21.7", "ckb-app-config", @@ -701,12 +701,13 @@ dependencies = [ "serde_json", "serde_plain", "tempfile", + "tokio", "toml", ] [[package]] name = "ckb-block-filter" -version = "0.117.0-pre" +version = "0.118.0-pre" dependencies = [ "ckb-async-runtime", "ckb-logger", @@ -718,19 +719,23 @@ dependencies = [ [[package]] name = "ckb-build-info" -version = "0.117.0-pre" +version = "0.118.0-pre" [[package]] name = "ckb-chain" -version = "0.117.0-pre" +version = "0.118.0-pre" dependencies = [ "ckb-app-config", "ckb-chain-spec", "ckb-channel", + "ckb-constant", "ckb-dao-utils", + "ckb-db", + "ckb-db-schema", "ckb-error", "ckb-jsonrpc-types", "ckb-logger", + "ckb-logger-service", "ckb-merkle-mountain-range", "ckb-metrics", "ckb-network", @@ -743,18 +748,22 @@ dependencies = [ "ckb-test-chain-utils", "ckb-tx-pool", "ckb-types", + "ckb-util", "ckb-verification", "ckb-verification-contextual", "ckb-verification-traits", + "crossbeam", + "dashmap", "faux", "is_sorted", "lazy_static", + "minstant", "tempfile", ] [[package]] name = "ckb-chain-iter" -version = "0.117.0-pre" +version = "0.118.0-pre" dependencies = [ "ckb-store", "ckb-types", @@ -762,7 +771,7 @@ dependencies = [ [[package]] name = "ckb-chain-spec" -version = "0.117.0-pre" +version = "0.118.0-pre" dependencies = [ "cacache", "ckb-constant", @@ -784,18 +793,18 @@ dependencies = [ [[package]] name = "ckb-channel" -version = "0.117.0-pre" +version = "0.118.0-pre" dependencies = [ "crossbeam-channel", ] [[package]] name = "ckb-constant" -version = "0.117.0-pre" +version = "0.118.0-pre" [[package]] name = "ckb-crypto" -version = "0.117.0-pre" +version = "0.118.0-pre" dependencies = [ "ckb-fixed-hash", "faster-hex", @@ -807,7 +816,7 @@ dependencies = [ [[package]] name = "ckb-dao" -version = "0.117.0-pre" +version = "0.118.0-pre" dependencies = [ "byteorder", "ckb-chain-spec", @@ -822,7 +831,7 @@ dependencies = [ [[package]] name = "ckb-dao-utils" -version = "0.117.0-pre" +version = "0.118.0-pre" dependencies = [ "byteorder", "ckb-error", @@ -831,7 +840,7 @@ dependencies = [ [[package]] name = "ckb-db" -version = "0.117.0-pre" +version = "0.118.0-pre" dependencies = [ "ckb-app-config", "ckb-db-schema", @@ -844,7 +853,7 @@ dependencies = [ [[package]] name = "ckb-db-migration" -version = "0.117.0-pre" +version = "0.118.0-pre" dependencies = [ "ckb-app-config", "ckb-channel", @@ -861,11 +870,11 @@ dependencies = [ [[package]] name = "ckb-db-schema" -version = "0.117.0-pre" +version = "0.118.0-pre" [[package]] name = "ckb-error" -version = "0.117.0-pre" +version = "0.118.0-pre" dependencies = [ "anyhow", "ckb-occupied-capacity", @@ -875,7 +884,7 @@ dependencies = [ [[package]] name = "ckb-fixed-hash" -version = "0.117.0-pre" +version = "0.118.0-pre" dependencies = [ "ckb-fixed-hash-core", "ckb-fixed-hash-macros", @@ -883,7 +892,7 @@ dependencies = [ [[package]] name = "ckb-fixed-hash-core" -version = "0.117.0-pre" +version = "0.118.0-pre" dependencies = [ "ckb_schemars", "faster-hex", @@ -894,7 +903,7 @@ dependencies = [ [[package]] name = "ckb-fixed-hash-macros" -version = "0.117.0-pre" +version = "0.118.0-pre" dependencies = [ "ckb-fixed-hash-core", "proc-macro2", @@ -904,7 +913,7 @@ dependencies = [ [[package]] name = "ckb-freezer" -version = "0.117.0-pre" +version = "0.118.0-pre" dependencies = [ "ckb-error", "ckb-logger", @@ -920,7 +929,7 @@ dependencies = [ [[package]] name = "ckb-gen-types" -version = "0.117.0-pre" +version = "0.118.0-pre" dependencies = [ "cfg-if", "ckb-error", @@ -933,7 +942,7 @@ dependencies = [ [[package]] name = "ckb-hash" -version = "0.117.0-pre" +version = "0.118.0-pre" dependencies = [ "blake2b-ref", "blake2b-rs", @@ -941,7 +950,7 @@ dependencies = [ [[package]] name = "ckb-indexer" -version = "0.117.0-pre" +version = "0.118.0-pre" dependencies = [ "ckb-app-config", "ckb-async-runtime", @@ -958,7 +967,7 @@ dependencies = [ [[package]] name = "ckb-indexer-sync" -version = "0.117.0-pre" +version = "0.118.0-pre" dependencies = [ "ckb-app-config", "ckb-async-runtime", @@ -978,7 +987,7 @@ dependencies = [ [[package]] name = "ckb-instrument" -version = "0.117.0-pre" +version = "0.118.0-pre" dependencies = [ "ckb-chain", "ckb-chain-iter", @@ -991,7 +1000,7 @@ dependencies = [ [[package]] name = "ckb-jsonrpc-types" -version = "0.117.0-pre" +version = "0.118.0-pre" dependencies = [ "ckb-types", "ckb_schemars", @@ -1005,7 +1014,7 @@ dependencies = [ [[package]] name = "ckb-launcher" -version = "0.117.0-pre" +version = "0.118.0-pre" dependencies = [ "ckb-app-config", "ckb-async-runtime", @@ -1018,7 +1027,6 @@ dependencies = [ "ckb-logger", "ckb-network", "ckb-network-alert", - "ckb-proposal-table", "ckb-resource", "ckb-rpc", "ckb-shared", @@ -1046,7 +1054,7 @@ dependencies = [ [[package]] name = "ckb-light-client-protocol-server" -version = "0.117.0-pre" +version = "0.118.0-pre" dependencies = [ "ckb-app-config", "ckb-chain", @@ -1068,14 +1076,14 @@ dependencies = [ [[package]] name = "ckb-logger" -version = "0.117.0-pre" +version = "0.118.0-pre" dependencies = [ "log", ] [[package]] name = "ckb-logger-config" -version = "0.117.0-pre" +version = "0.118.0-pre" dependencies = [ "serde", "toml", @@ -1083,7 +1091,7 @@ dependencies = [ [[package]] name = "ckb-logger-service" -version = "0.117.0-pre" +version = "0.118.0-pre" dependencies = [ "backtrace", "ckb-channel", @@ -1102,7 +1110,7 @@ dependencies = [ [[package]] name = "ckb-memory-tracker" -version = "0.117.0-pre" +version = "0.118.0-pre" dependencies = [ "ckb-db", "ckb-logger", @@ -1124,7 +1132,7 @@ dependencies = [ [[package]] name = "ckb-metrics" -version = "0.117.0-pre" +version = "0.118.0-pre" dependencies = [ "once_cell", "prometheus", @@ -1133,14 +1141,14 @@ dependencies = [ [[package]] name = "ckb-metrics-config" -version = "0.117.0-pre" +version = "0.118.0-pre" dependencies = [ "serde", ] [[package]] name = "ckb-metrics-service" -version = "0.117.0-pre" +version = "0.118.0-pre" dependencies = [ "ckb-async-runtime", "ckb-logger", @@ -1154,7 +1162,7 @@ dependencies = [ [[package]] name = "ckb-migrate" -version = "0.117.0-pre" +version = "0.118.0-pre" dependencies = [ "ckb-app-config", "ckb-chain-spec", @@ -1173,7 +1181,7 @@ dependencies = [ [[package]] name = "ckb-migration-template" -version = "0.117.0-pre" +version = "0.118.0-pre" dependencies = [ "quote", "syn 1.0.109", @@ -1181,7 +1189,7 @@ dependencies = [ [[package]] name = "ckb-miner" -version = "0.117.0-pre" +version = "0.118.0-pre" dependencies = [ "base64 0.21.7", "ckb-app-config", @@ -1210,7 +1218,7 @@ dependencies = [ [[package]] name = "ckb-multisig" -version = "0.117.0-pre" +version = "0.118.0-pre" dependencies = [ "ckb-crypto", "ckb-error", @@ -1220,7 +1228,7 @@ dependencies = [ [[package]] name = "ckb-network" -version = "0.117.0-pre" +version = "0.118.0-pre" dependencies = [ "bitflags 1.3.2", "bloom-filters", @@ -1258,7 +1266,7 @@ dependencies = [ [[package]] name = "ckb-network-alert" -version = "0.117.0-pre" +version = "0.118.0-pre" dependencies = [ "ckb-app-config", "ckb-async-runtime", @@ -1281,7 +1289,7 @@ dependencies = [ [[package]] name = "ckb-notify" -version = "0.117.0-pre" +version = "0.118.0-pre" dependencies = [ "ckb-app-config", "ckb-async-runtime", @@ -1293,7 +1301,7 @@ dependencies = [ [[package]] name = "ckb-occupied-capacity" -version = "0.117.0-pre" +version = "0.118.0-pre" dependencies = [ "ckb-occupied-capacity-core", "ckb-occupied-capacity-macros", @@ -1301,14 +1309,14 @@ dependencies = [ [[package]] name = "ckb-occupied-capacity-core" -version = "0.117.0-pre" +version = "0.118.0-pre" dependencies = [ "serde", ] [[package]] name = "ckb-occupied-capacity-macros" -version = "0.117.0-pre" +version = "0.118.0-pre" dependencies = [ "ckb-occupied-capacity-core", "quote", @@ -1317,7 +1325,7 @@ dependencies = [ [[package]] name = "ckb-pow" -version = "0.117.0-pre" +version = "0.118.0-pre" dependencies = [ "byteorder", "ckb-hash", @@ -1329,7 +1337,7 @@ dependencies = [ [[package]] name = "ckb-proposal-table" -version = "0.117.0-pre" +version = "0.118.0-pre" dependencies = [ "ckb-chain-spec", "ckb-logger", @@ -1338,7 +1346,7 @@ dependencies = [ [[package]] name = "ckb-rational" -version = "0.117.0-pre" +version = "0.118.0-pre" dependencies = [ "numext-fixed-uint", "proptest", @@ -1347,7 +1355,7 @@ dependencies = [ [[package]] name = "ckb-resource" -version = "0.117.0-pre" +version = "0.118.0-pre" dependencies = [ "ckb-system-scripts", "ckb-types", @@ -1361,7 +1369,7 @@ dependencies = [ [[package]] name = "ckb-reward-calculator" -version = "0.117.0-pre" +version = "0.118.0-pre" dependencies = [ "ckb-chain-spec", "ckb-dao", @@ -1377,7 +1385,7 @@ dependencies = [ [[package]] name = "ckb-rich-indexer" -version = "0.117.0-pre" +version = "0.118.0-pre" dependencies = [ "anyhow", "ckb-app-config", @@ -1413,7 +1421,7 @@ dependencies = [ [[package]] name = "ckb-rpc" -version = "0.117.0-pre" +version = "0.118.0-pre" dependencies = [ "async-stream", "async-trait", @@ -1467,7 +1475,7 @@ dependencies = [ [[package]] name = "ckb-rpc-gen" -version = "0.117.0-pre" +version = "0.118.0-pre" dependencies = [ "ckb-rpc", "ckb_schemars", @@ -1480,7 +1488,7 @@ dependencies = [ [[package]] name = "ckb-script" -version = "0.117.0-pre" +version = "0.118.0-pre" dependencies = [ "byteorder", "ckb-chain-spec", @@ -1495,19 +1503,23 @@ dependencies = [ "ckb-traits", "ckb-types", "ckb-vm", + "daggy", "faster-hex", + "molecule", "proptest", "rand 0.8.5", "serde", "tempfile", "tiny-keccak", + "tokio", ] [[package]] name = "ckb-shared" -version = "0.117.0-pre" +version = "0.118.0-pre" dependencies = [ "arc-swap", + "bitflags 1.3.2", "ckb-app-config", "ckb-async-runtime", "ckb-chain-spec", @@ -1517,6 +1529,7 @@ dependencies = [ "ckb-db-schema", "ckb-error", "ckb-logger", + "ckb-metrics", "ckb-migrate", "ckb-notify", "ckb-proposal-table", @@ -1526,14 +1539,18 @@ dependencies = [ "ckb-systemtime", "ckb-tx-pool", "ckb-types", + "ckb-util", "ckb-verification", + "dashmap", "once_cell", + "sled", "tempfile", + "tokio", ] [[package]] name = "ckb-snapshot" -version = "0.117.0-pre" +version = "0.118.0-pre" dependencies = [ "arc-swap", "ckb-chain-spec", @@ -1549,11 +1566,11 @@ dependencies = [ [[package]] name = "ckb-spawn" -version = "0.117.0-pre" +version = "0.118.0-pre" [[package]] name = "ckb-stop-handler" -version = "0.117.0-pre" +version = "0.118.0-pre" dependencies = [ "ckb-async-runtime", "ckb-channel", @@ -1569,7 +1586,7 @@ dependencies = [ [[package]] name = "ckb-store" -version = "0.117.0-pre" +version = "0.118.0-pre" dependencies = [ "ckb-app-config", "ckb-chain-spec", @@ -1587,11 +1604,9 @@ dependencies = [ [[package]] name = "ckb-sync" -version = "0.117.0-pre" +version = "0.118.0-pre" dependencies = [ - "bitflags 1.3.2", "ckb-app-config", - "ckb-async-runtime", "ckb-chain", "ckb-chain-spec", "ckb-channel", @@ -1600,6 +1615,7 @@ dependencies = [ "ckb-dao-utils", "ckb-error", "ckb-logger", + "ckb-logger-service", "ckb-metrics", "ckb-network", "ckb-proposal-table", @@ -1625,7 +1641,6 @@ dependencies = [ "once_cell", "rand 0.8.5", "sentry", - "sled", "tempfile", "tokio", ] @@ -1645,11 +1660,11 @@ dependencies = [ [[package]] name = "ckb-systemtime" -version = "0.117.0-pre" +version = "0.118.0-pre" [[package]] name = "ckb-test-chain-utils" -version = "0.117.0-pre" +version = "0.118.0-pre" dependencies = [ "ckb-chain-spec", "ckb-dao", @@ -1668,14 +1683,14 @@ dependencies = [ [[package]] name = "ckb-traits" -version = "0.117.0-pre" +version = "0.118.0-pre" dependencies = [ "ckb-types", ] [[package]] name = "ckb-tx-pool" -version = "0.117.0-pre" +version = "0.118.0-pre" dependencies = [ "ckb-app-config", "ckb-async-runtime", @@ -1690,6 +1705,7 @@ dependencies = [ "ckb-metrics", "ckb-network", "ckb-reward-calculator", + "ckb-script", "ckb-snapshot", "ckb-stop-handler", "ckb-store", @@ -1701,6 +1717,7 @@ dependencies = [ "hyper", "lru", "multi_index_map", + "num_cpus", "rand 0.8.5", "rustc-hash", "sentry", @@ -1713,7 +1730,7 @@ dependencies = [ [[package]] name = "ckb-types" -version = "0.117.0-pre" +version = "0.118.0-pre" dependencies = [ "bit-vec", "bytes", @@ -1738,7 +1755,7 @@ dependencies = [ [[package]] name = "ckb-util" -version = "0.117.0-pre" +version = "0.118.0-pre" dependencies = [ "ckb-fixed-hash", "linked-hash-map", @@ -1749,7 +1766,7 @@ dependencies = [ [[package]] name = "ckb-verification" -version = "0.117.0-pre" +version = "0.118.0-pre" dependencies = [ "ckb-chain-spec", "ckb-dao", @@ -1765,11 +1782,12 @@ dependencies = [ "ckb-verification-traits", "derive_more", "lru", + "tokio", ] [[package]] name = "ckb-verification-contextual" -version = "0.117.0-pre" +version = "0.118.0-pre" dependencies = [ "ckb-async-runtime", "ckb-chain", @@ -1795,7 +1813,7 @@ dependencies = [ [[package]] name = "ckb-verification-traits" -version = "0.117.0-pre" +version = "0.118.0-pre" dependencies = [ "bitflags 1.3.2", "ckb-error", @@ -1803,9 +1821,9 @@ dependencies = [ [[package]] name = "ckb-vm" -version = "0.24.9" +version = "0.24.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2c3d68dc7f891e5555c7ebc054722b28ab005e51c5076f54c20d36002dc8e83" +checksum = "ddff96029d3298cb630e95f29d4b9a93384e938a0b75758684aa8794b53bdd1a" dependencies = [ "byteorder", "bytes", @@ -1821,9 +1839,9 @@ dependencies = [ [[package]] name = "ckb-vm-definitions" -version = "0.24.9" +version = "0.24.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2fdf9c8ee14409b2208d23b9ad88828242d7881153ddc04872b66d2e018a52f" +checksum = "c280bf1d589d23ab0358f58601c2187fc6be86a131644583ef72ea96a0a13ddd" dependencies = [ "paste", ] @@ -2077,6 +2095,19 @@ dependencies = [ "itertools 0.10.5", ] +[[package]] +name = "crossbeam" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1137cd7e7fc0fb5d3c5a8678be38ec56e819125d8d7907411fe24ccb943faca8" +dependencies = [ + "crossbeam-channel", + "crossbeam-deque", + "crossbeam-epoch", + "crossbeam-queue", + "crossbeam-utils", +] + [[package]] name = "crossbeam-channel" version = "0.5.13" @@ -2137,6 +2168,16 @@ dependencies = [ "typenum", ] +[[package]] +name = "ctor" +version = "0.1.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d2301688392eb071b0bf1a37be05c469d3cc4dbbd95df672fe28ab021e6a096" +dependencies = [ + "quote", + "syn 1.0.109", +] + [[package]] name = "ctrlc" version = "3.4.4" @@ -2188,6 +2229,15 @@ dependencies = [ "libc", ] +[[package]] +name = "daggy" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91a9304e55e9d601a39ae4deaba85406d5c0980e106f65afcf0460e9af1e7602" +dependencies = [ + "petgraph", +] + [[package]] name = "darling" version = "0.20.8" @@ -2260,15 +2310,15 @@ dependencies = [ [[package]] name = "derive_more" -version = "0.99.17" +version = "0.99.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321" +checksum = "5f33878137e4dafd7fa914ad4e259e18a4e8e532b9617a2d0150262bf53abfce" dependencies = [ "convert_case 0.4.0", "proc-macro2", "quote", "rustc_version", - "syn 1.0.109", + "syn 2.0.52", ] [[package]] @@ -2988,9 +3038,9 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" [[package]] name = "hyper" -version = "0.14.28" +version = "0.14.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf96e135eb83a2a8ddf766e426a841d8ddd7449d5f00d34ea02b41d2f19eef80" +checksum = "a152ddd61dfaec7273fe8419ab357f33aee0d914c5f4efbf0d96fa749eea5ec9" dependencies = [ "bytes", "futures-channel", @@ -3563,6 +3613,16 @@ dependencies = [ "adler", ] +[[package]] +name = "minstant" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fb9b5c752f145ac5046bccc3c4f62892e3c950c1d1eab80c5949cd68a2078db" +dependencies = [ + "ctor", + "web-time", +] + [[package]] name = "mio" version = "0.8.11" @@ -4597,9 +4657,9 @@ dependencies = [ [[package]] name = "rhai" -version = "1.17.1" +version = "1.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6273372244d04a8a4b0bec080ea1e710403e88c5d9d83f9808b2bfa64f0982a" +checksum = "61797318be89b1a268a018a92a7657096d83f3ecb31418b9e9c16dcbb043b702" dependencies = [ "ahash 0.8.11", "bitflags 2.4.2", @@ -4614,9 +4674,9 @@ dependencies = [ [[package]] name = "rhai_codegen" -version = "2.0.0" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9db7f8dc4c9d48183a17ce550574c42995252b82d267eaca3fcd1b979159856c" +checksum = "a5a11a05ee1ce44058fa3d5961d05194fdbe3ad6b40f904af764d81b86450e6b" dependencies = [ "proc-macro2", "quote", @@ -6092,9 +6152,9 @@ checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" [[package]] name = "url" -version = "2.5.0" +version = "2.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31e6302e3bb753d46e83516cae55ae196fc0c309407cf11ab35cc51a4c2a4633" +checksum = "22784dbdf76fdde8af1aeda5622b546b422b6fc585325248a2bf9f5e41e94d6c" dependencies = [ "form_urlencoded", "idna 0.5.0", @@ -6264,6 +6324,16 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "web-time" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a6580f308b1fad9207618087a65c04e7a10bc77e02c8e84e9b00dd4b12fa0bb" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + [[package]] name = "webpki" version = "0.22.4" diff --git a/Cargo.toml b/Cargo.toml index 4f04646ca8..146c00dce1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ckb" -version = "0.117.0-pre" +version = "0.118.0-pre" license = "MIT" authors = ["Nervos Core Dev "] edition = "2021" @@ -11,11 +11,11 @@ repository = "https://github.com/nervosnetwork/ckb" rust-version = "1.75.0" [build-dependencies] -ckb-build-info = { path = "util/build-info", version = "= 0.117.0-pre" } +ckb-build-info = { path = "util/build-info", version = "= 0.118.0-pre" } [dependencies] -ckb-build-info = { path = "util/build-info", version = "= 0.117.0-pre" } -ckb-bin = { path = "ckb-bin", version = "= 0.117.0-pre" } +ckb-build-info = { path = "util/build-info", version = "= 0.118.0-pre" } +ckb-bin = { path = "ckb-bin", version = "= 0.118.0-pre" } console-subscriber = { version = "0.2.0", optional = true } [dev-dependencies] diff --git a/Makefile b/Makefile index 8013f6c8a3..91ef1c2a5d 100644 --- a/Makefile +++ b/Makefile @@ -4,7 +4,7 @@ MOLC := moleculec MOLC_VERSION := 0.7.5 VERBOSE := $(if ${CI},--verbose,) CLIPPY_OPTS := -D warnings -D clippy::clone_on_ref_ptr -D clippy::redundant_clone -D clippy::enum_glob_use -D clippy::fallible_impl_from \ - -A clippy::mutable_key_type -A clippy::upper_case_acronyms + -A clippy::mutable_key_type -A clippy::upper_case_acronyms -A clippy::needless_return CKB_TEST_ARGS := -c 4 ${CKB_TEST_ARGS} CKB_FEATURES ?= deadlock_detection,with_sentry ALL_FEATURES := deadlock_detection,with_sentry,with_dns_seeding,profiling,march-native @@ -125,13 +125,13 @@ check: setup-ckb-test ## Runs all of the compiler's checks. build: ## Build binary with release profile. cargo build ${VERBOSE} --release -.PHONY: build-for-profiling-without-debug-symbols -build-for-profiling-without-debug-symbols: ## Build binary with for profiling without debug symbols. - JEMALLOC_SYS_WITH_MALLOC_CONF="prof:true" cargo build ${VERBOSE} --release --features "profiling" +.PHONY: profiling +profiling: ## Build binary with for profiling without debug symbols. + JEMALLOC_SYS_WITH_MALLOC_CONF="prof:true" cargo build ${VERBOSE} --profile prod --features "with_sentry,with_dns_seeding,profiling" -.PHONY: build-for-profiling +.PHONY: profiling-with-debug-symbols build-for-profiling: ## Build binary with for profiling. - devtools/release/make-with-debug-symbols build-for-profiling-without-debug-symbols + devtools/release/make-with-debug-symbols profiling .PHONY: prod prod: ## Build binary for production release. diff --git a/README.md b/README.md index 986a7f26f5..b737657846 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # [Nervos CKB](https://www.nervos.org/) - The Common Knowledge Base -[![Version](https://img.shields.io/badge/version-0.117.0--pre-orange.svg)](https://github.com/nervosnetwork/ckb/releases) +[![Version](https://img.shields.io/badge/version-0.118.0--pre-orange.svg)](https://github.com/nervosnetwork/ckb/releases) [![Nervos Talk](https://img.shields.io/badge/discuss-on%20Nervos%20Talk-3CC68A.svg)](https://talk.nervos.org/t/where-to-discuss-ckb-and-how-to-ask-for-support/6024) master | develop diff --git a/benches/Cargo.toml b/benches/Cargo.toml index 8d8fa37d6a..3e2dd26aa6 100644 --- a/benches/Cargo.toml +++ b/benches/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ckb-benches" -version = "0.117.0-pre" +version = "0.118.0-pre" license = "MIT" authors = ["Nervos Core Dev "] edition = "2021" @@ -13,25 +13,25 @@ repository = "https://github.com/nervosnetwork/ckb" [dev-dependencies] criterion = "0.5" -ckb-chain = { path = "../chain", version = "= 0.117.0-pre" } -ckb-types = { path = "../util/types", version = "= 0.117.0-pre" } -ckb-shared = { path = "../shared", version = "= 0.117.0-pre" } -ckb-store = { path = "../store", version = "= 0.117.0-pre" } -ckb-chain-spec = { path = "../spec", version = "= 0.117.0-pre" } +ckb-chain = { path = "../chain", version = "= 0.118.0-pre" } +ckb-types = { path = "../util/types", version = "= 0.118.0-pre" } +ckb-shared = { path = "../shared", version = "= 0.118.0-pre" } +ckb-store = { path = "../store", version = "= 0.118.0-pre" } +ckb-chain-spec = { path = "../spec", version = "= 0.118.0-pre" } rand = "0.8" -ckb-hash = { path = "../util/hash", version = "= 0.117.0-pre" } -ckb-test-chain-utils = { path = "../util/test-chain-utils", version = "= 0.117.0-pre" } -ckb-dao-utils = { path = "../util/dao/utils", version = "= 0.117.0-pre" } -ckb-dao = { path = "../util/dao", version = "= 0.117.0-pre" } +ckb-hash = { path = "../util/hash", version = "= 0.118.0-pre" } +ckb-test-chain-utils = { path = "../util/test-chain-utils", version = "= 0.118.0-pre" } +ckb-dao-utils = { path = "../util/dao/utils", version = "= 0.118.0-pre" } +ckb-dao = { path = "../util/dao", version = "= 0.118.0-pre" } ckb-system-scripts = { version = "= 0.5.4" } lazy_static = "1.3.0" -ckb-crypto = { path = "../util/crypto", version = "= 0.117.0-pre" } -ckb-jsonrpc-types = { path = "../util/jsonrpc-types", version = "= 0.117.0-pre" } -ckb-verification = { path = "../verification", version = "= 0.117.0-pre" } -ckb-verification-traits = { path = "../verification/traits", version = "= 0.117.0-pre" } -ckb-app-config = { path = "../util/app-config", version = "= 0.117.0-pre" } -ckb-resource = { path = "../resource", version = "= 0.117.0-pre" } -ckb-network = { path = "../network", version = "= 0.117.0-pre" } +ckb-crypto = { path = "../util/crypto", version = "= 0.118.0-pre" } +ckb-jsonrpc-types = { path = "../util/jsonrpc-types", version = "= 0.118.0-pre" } +ckb-verification = { path = "../verification", version = "= 0.118.0-pre" } +ckb-verification-traits = { path = "../verification/traits", version = "= 0.118.0-pre" } +ckb-app-config = { path = "../util/app-config", version = "= 0.118.0-pre" } +ckb-resource = { path = "../resource", version = "= 0.118.0-pre" } +ckb-network = { path = "../network", version = "= 0.118.0-pre" } tempfile.workspace = true [[bench]] diff --git a/benches/benches/benchmarks/always_success.rs b/benches/benches/benchmarks/always_success.rs index 111766000f..33ed8bda8e 100644 --- a/benches/benches/benchmarks/always_success.rs +++ b/benches/benches/benchmarks/always_success.rs @@ -32,7 +32,7 @@ fn bench(c: &mut Criterion) { (0..20).for_each(|_| { let block = gen_always_success_block(&mut blocks, &parent, shared2); chain2 - .internal_process_block( + .blocking_process_block_with_switch( Arc::new(block.clone()), Switch::DISABLE_ALL, ) @@ -44,7 +44,10 @@ fn bench(c: &mut Criterion) { |(chain, blocks)| { blocks.into_iter().skip(1).for_each(|block| { chain - .internal_process_block(Arc::new(block), Switch::DISABLE_EXTENSION) + .blocking_process_block_with_switch( + Arc::new(block), + Switch::DISABLE_EXTENSION, + ) .expect("process block OK"); }); }, @@ -77,14 +80,14 @@ fn bench(c: &mut Criterion) { (0..5).for_each(|i| { let block = gen_always_success_block(&mut blocks, &parent, shared2); chain2 - .internal_process_block( + .blocking_process_block_with_switch( Arc::new(block.clone()), Switch::DISABLE_ALL, ) .expect("process block OK"); if i < 2 { chain3 - .internal_process_block( + .blocking_process_block_with_switch( Arc::new(block.clone()), Switch::DISABLE_ALL, ) @@ -96,7 +99,7 @@ fn bench(c: &mut Criterion) { (0..2).for_each(|_| { let block = gen_always_success_block(&mut blocks, &parent, shared3); chain3 - .internal_process_block( + .blocking_process_block_with_switch( Arc::new(block.clone()), Switch::DISABLE_ALL, ) @@ -110,7 +113,10 @@ fn bench(c: &mut Criterion) { .take(5) .for_each(|block| { chain1 - .internal_process_block(Arc::new(block), Switch::DISABLE_ALL) + .blocking_process_block_with_switch( + Arc::new(block), + Switch::DISABLE_ALL, + ) .expect("process block OK"); }); (chain1.clone(), blocks) @@ -118,7 +124,7 @@ fn bench(c: &mut Criterion) { |(chain, blocks)| { blocks.into_iter().skip(6).for_each(|block| { chain - .process_block(Arc::new(block)) + .blocking_process_block(Arc::new(block)) .expect("process block OK"); }); }, @@ -152,11 +158,17 @@ fn bench(c: &mut Criterion) { let block = gen_always_success_block(&mut blocks, &parent, shared2); let arc_block = Arc::new(block.clone()); chain2 - .internal_process_block(Arc::clone(&arc_block), Switch::DISABLE_ALL) + .blocking_process_block_with_switch( + Arc::clone(&arc_block), + Switch::DISABLE_ALL, + ) .expect("process block OK"); if i < 2 { chain3 - .internal_process_block(arc_block, Switch::DISABLE_ALL) + .blocking_process_block_with_switch( + arc_block, + Switch::DISABLE_ALL, + ) .expect("process block OK"); } parent = block; @@ -165,7 +177,7 @@ fn bench(c: &mut Criterion) { (0..4).for_each(|_| { let block = gen_always_success_block(&mut blocks, &parent, shared3); chain3 - .internal_process_block( + .blocking_process_block_with_switch( Arc::new(block.clone()), Switch::DISABLE_ALL, ) @@ -179,7 +191,10 @@ fn bench(c: &mut Criterion) { .take(7) .for_each(|block| { chain1 - .internal_process_block(Arc::new(block), Switch::DISABLE_ALL) + .blocking_process_block_with_switch( + Arc::new(block), + Switch::DISABLE_ALL, + ) .expect("process block OK"); }); (chain1.clone(), blocks) @@ -187,7 +202,10 @@ fn bench(c: &mut Criterion) { |(chain, blocks)| { blocks.into_iter().skip(8).for_each(|block| { chain - .internal_process_block(Arc::new(block), Switch::DISABLE_EXTENSION) + .blocking_process_block_with_switch( + Arc::new(block), + Switch::DISABLE_EXTENSION, + ) .expect("process block OK"); }); }, diff --git a/benches/benches/benchmarks/overall.rs b/benches/benches/benchmarks/overall.rs index 2f966e0318..103cab0893 100644 --- a/benches/benches/benchmarks/overall.rs +++ b/benches/benches/benchmarks/overall.rs @@ -1,7 +1,7 @@ use crate::benchmarks::util::{create_2out_transaction, create_secp_tx, secp_cell}; use ckb_app_config::NetworkConfig; use ckb_app_config::{BlockAssemblerConfig, TxPoolConfig}; -use ckb_chain::chain::{ChainController, ChainService}; +use ckb_chain::{start_chain_services, ChainController}; use ckb_chain_spec::consensus::{ConsensusBuilder, ProposalWindow}; use ckb_dao_utils::genesis_dao_data; use ckb_jsonrpc_types::JsonBytes; @@ -133,8 +133,7 @@ pub fn setup_chain(txs_size: usize) -> (Shared, ChainController) { let network = dummy_network(&shared); pack.take_tx_pool_builder().start(network); - let chain_service = ChainService::new(shared.clone(), pack.take_proposal_table()); - let chain_controller = chain_service.start(Some("ChainService")); + let chain_controller = start_chain_services(pack.take_chain_services_builder()); (shared, chain_controller) } @@ -219,7 +218,10 @@ fn bench(c: &mut Criterion) { .expect("header verified"); chain - .internal_process_block(Arc::new(block), Switch::DISABLE_EXTENSION) + .blocking_process_block_with_switch( + Arc::new(block), + Switch::DISABLE_EXTENSION, + ) .expect("process_block"); i -= 1; } diff --git a/benches/benches/benchmarks/resolve.rs b/benches/benches/benchmarks/resolve.rs index 29ce56bc8c..37ec9d11c3 100644 --- a/benches/benches/benchmarks/resolve.rs +++ b/benches/benches/benchmarks/resolve.rs @@ -1,6 +1,6 @@ use crate::benchmarks::util::create_2out_transaction; use ckb_app_config::{BlockAssemblerConfig, TxPoolConfig}; -use ckb_chain::chain::{ChainController, ChainService}; +use ckb_chain::{start_chain_services, ChainController}; use ckb_chain_spec::{ChainSpec, IssuedCell}; use ckb_jsonrpc_types::JsonBytes; use ckb_resource::Resource; @@ -96,8 +96,7 @@ pub fn setup_chain(txs_size: usize) -> (Shared, ChainController) { .tx_pool_config(tx_pool_config) .build() .unwrap(); - let chain_service = ChainService::new(shared.clone(), pack.take_proposal_table()); - let chain_controller = chain_service.start(Some("ChainService")); + let chain_controller = start_chain_services(pack.take_chain_services_builder()); // FIXME: global cache !!! let _ret = setup_system_cell_cache( diff --git a/benches/benches/benchmarks/secp_2in2out.rs b/benches/benches/benchmarks/secp_2in2out.rs index 69c0705f4f..03ebab1685 100644 --- a/benches/benches/benchmarks/secp_2in2out.rs +++ b/benches/benches/benchmarks/secp_2in2out.rs @@ -32,7 +32,7 @@ fn bench(c: &mut Criterion) { (0..20).for_each(|_| { let block = gen_secp_block(&mut blocks, &parent, shared2); chain2 - .internal_process_block( + .blocking_process_block_with_switch( Arc::new(block.clone()), Switch::DISABLE_ALL, ) @@ -44,7 +44,10 @@ fn bench(c: &mut Criterion) { |(chain, blocks)| { blocks.into_iter().skip(1).for_each(|block| { chain - .internal_process_block(Arc::new(block), Switch::DISABLE_EXTENSION) + .blocking_process_block_with_switch( + Arc::new(block), + Switch::DISABLE_EXTENSION, + ) .expect("process block OK"); }); }, @@ -77,14 +80,14 @@ fn bench(c: &mut Criterion) { (0..5).for_each(|i| { let block = gen_secp_block(&mut blocks, &parent, shared2); chain2 - .internal_process_block( + .blocking_process_block_with_switch( Arc::new(block.clone()), Switch::DISABLE_ALL, ) .expect("process block OK"); if i < 2 { chain3 - .internal_process_block( + .blocking_process_block_with_switch( Arc::new(block.clone()), Switch::DISABLE_ALL, ) @@ -96,7 +99,7 @@ fn bench(c: &mut Criterion) { (0..2).for_each(|_| { let block = gen_secp_block(&mut blocks, &parent, shared3); chain3 - .internal_process_block( + .blocking_process_block_with_switch( Arc::new(block.clone()), Switch::DISABLE_ALL, ) @@ -110,7 +113,10 @@ fn bench(c: &mut Criterion) { .take(5) .for_each(|block| { chain1 - .internal_process_block(Arc::new(block), Switch::DISABLE_ALL) + .blocking_process_block_with_switch( + Arc::new(block), + Switch::DISABLE_ALL, + ) .expect("process block OK"); }); (chain1.clone(), blocks) @@ -118,7 +124,7 @@ fn bench(c: &mut Criterion) { |(chain, blocks)| { blocks.into_iter().skip(6).for_each(|block| { chain - .process_block(Arc::new(block)) + .blocking_process_block(Arc::new(block)) .expect("process block OK"); }); }, @@ -152,11 +158,17 @@ fn bench(c: &mut Criterion) { let block = gen_secp_block(&mut blocks, &parent, shared2); let arc_block = Arc::new(block.clone()); chain2 - .internal_process_block(Arc::clone(&arc_block), Switch::DISABLE_ALL) + .blocking_process_block_with_switch( + Arc::clone(&arc_block), + Switch::DISABLE_ALL, + ) .expect("process block OK"); if i < 2 { chain3 - .internal_process_block(arc_block, Switch::DISABLE_ALL) + .blocking_process_block_with_switch( + arc_block, + Switch::DISABLE_ALL, + ) .expect("process block OK"); } parent = block; @@ -165,7 +177,7 @@ fn bench(c: &mut Criterion) { (0..4).for_each(|_| { let block = gen_secp_block(&mut blocks, &parent, shared3); chain3 - .internal_process_block( + .blocking_process_block_with_switch( Arc::new(block.clone()), Switch::DISABLE_ALL, ) @@ -179,7 +191,10 @@ fn bench(c: &mut Criterion) { .take(7) .for_each(|block| { chain1 - .internal_process_block(Arc::new(block), Switch::DISABLE_ALL) + .blocking_process_block_with_switch( + Arc::new(block), + Switch::DISABLE_ALL, + ) .expect("process block OK"); }); (chain1.clone(), blocks) @@ -187,7 +202,10 @@ fn bench(c: &mut Criterion) { |(chain, blocks)| { blocks.into_iter().skip(8).for_each(|block| { chain - .internal_process_block(Arc::new(block), Switch::DISABLE_EXTENSION) + .blocking_process_block_with_switch( + Arc::new(block), + Switch::DISABLE_EXTENSION, + ) .expect("process block OK"); }); }, diff --git a/benches/benches/benchmarks/util.rs b/benches/benches/benchmarks/util.rs index 4b56411697..a9f64d0a50 100644 --- a/benches/benches/benchmarks/util.rs +++ b/benches/benches/benchmarks/util.rs @@ -1,4 +1,4 @@ -use ckb_chain::chain::{ChainController, ChainService}; +use ckb_chain::{start_chain_services, ChainController}; use ckb_chain_spec::consensus::{ConsensusBuilder, ProposalWindow}; use ckb_crypto::secp::Privkey; use ckb_dao::DaoCalculator; @@ -78,9 +78,9 @@ pub fn new_always_success_chain(txs_size: usize, chains_num: usize) -> Chains { .consensus(consensus.clone()) .build() .unwrap(); - let chain_service = ChainService::new(shared.clone(), pack.take_proposal_table()); + let chain_controller = start_chain_services(pack.take_chain_services_builder()); - chains.push((chain_service.start::<&str>(None), shared)); + chains.push((chain_controller, shared)); } chains @@ -296,9 +296,9 @@ pub fn new_secp_chain(txs_size: usize, chains_num: usize) -> Chains { .consensus(consensus.clone()) .build() .unwrap(); - let chain_service = ChainService::new(shared.clone(), pack.take_proposal_table()); + let chain_controller = start_chain_services(pack.take_chain_services_builder()); - chains.push((chain_service.start::<&str>(None), shared)); + chains.push((chain_controller, shared)); } chains diff --git a/block-filter/Cargo.toml b/block-filter/Cargo.toml index 65fe84100e..f74f72af12 100644 --- a/block-filter/Cargo.toml +++ b/block-filter/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ckb-block-filter" -version = "0.117.0-pre" +version = "0.118.0-pre" license = "MIT" authors = ["Nervos Core Dev "] edition = "2021" @@ -11,9 +11,9 @@ repository = "https://github.com/nervosnetwork/ckb" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -ckb-store = { path = "../store", version = "= 0.117.0-pre" } -ckb-shared = { path = "../shared", version = "= 0.117.0-pre" } -ckb-logger = { path = "../util/logger", version = "= 0.117.0-pre" } -ckb-types = { path = "../util/types", version = "= 0.117.0-pre" } -ckb-stop-handler = { path = "../util/stop-handler", version = "= 0.117.0-pre" } -ckb-async-runtime = { path = "../util/runtime", version = "= 0.117.0-pre" } +ckb-store = { path = "../store", version = "= 0.118.0-pre" } +ckb-shared = { path = "../shared", version = "= 0.118.0-pre" } +ckb-logger = { path = "../util/logger", version = "= 0.118.0-pre" } +ckb-types = { path = "../util/types", version = "= 0.118.0-pre" } +ckb-stop-handler = { path = "../util/stop-handler", version = "= 0.118.0-pre" } +ckb-async-runtime = { path = "../util/runtime", version = "= 0.118.0-pre" } diff --git a/chain/Cargo.toml b/chain/Cargo.toml index b52d5e1c88..c5f4d2c722 100644 --- a/chain/Cargo.toml +++ b/chain/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ckb-chain" -version = "0.117.0-pre" +version = "0.118.0-pre" license = "MIT" authors = ["Nervos Core Dev "] edition = "2021" @@ -9,35 +9,45 @@ homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" [dependencies] -ckb-logger = { path = "../util/logger", version = "= 0.117.0-pre" } -ckb-metrics = { path = "../util/metrics", version = "= 0.117.0-pre" } -ckb-types = { path = "../util/types", version = "= 0.117.0-pre" } -ckb-shared = { path = "../shared", version = "= 0.117.0-pre" } -ckb-chain-spec = { path = "../spec", version = "= 0.117.0-pre" } -ckb-store = { path = "../store", version = "= 0.117.0-pre" } -ckb-verification = { path = "../verification", version = "= 0.117.0-pre" } -ckb-verification-contextual = { path = "../verification/contextual", version = "= 0.117.0-pre" } -ckb-verification-traits = { path = "../verification/traits", version = "= 0.117.0-pre" } -ckb-systemtime = { path = "../util/systemtime", version = "= 0.117.0-pre" } -ckb-stop-handler = { path = "../util/stop-handler", version = "= 0.117.0-pre" } -ckb-proposal-table = { path = "../util/proposal-table", version = "= 0.117.0-pre" } -ckb-error = { path = "../error", version = "= 0.117.0-pre" } -ckb-app-config = { path = "../util/app-config", version = "= 0.117.0-pre" } -ckb-channel = { path = "../util/channel", version = "= 0.117.0-pre" } +ckb-logger = { path = "../util/logger", version = "= 0.118.0-pre" } +ckb-metrics = { path = "../util/metrics", version = "= 0.118.0-pre" } +ckb-types = { path = "../util/types", version = "= 0.118.0-pre" } +ckb-shared = { path = "../shared", version = "= 0.118.0-pre" } +ckb-chain-spec = { path = "../spec", version = "= 0.118.0-pre" } +ckb-store = { path = "../store", version = "= 0.118.0-pre" } +ckb-verification = { path = "../verification", version = "= 0.118.0-pre" } +ckb-verification-contextual = { path = "../verification/contextual", version = "= 0.118.0-pre" } +ckb-verification-traits = { path = "../verification/traits", version = "= 0.118.0-pre" } +ckb-systemtime = { path = "../util/systemtime", version = "= 0.118.0-pre" } +ckb-stop-handler = { path = "../util/stop-handler", version = "= 0.118.0-pre" } +ckb-proposal-table = { path = "../util/proposal-table", version = "= 0.118.0-pre" } +ckb-error = { path = "../error", version = "= 0.118.0-pre" } +ckb-app-config = { path = "../util/app-config", version = "= 0.118.0-pre" } +ckb-channel = { path = "../util/channel", version = "= 0.118.0-pre" } +ckb-db = { path = "../db", version = "= 0.118.0-pre" } +ckb-db-schema = { path = "../db-schema", version = "= 0.118.0-pre" } faux = { version = "^0.1", optional = true } ckb-merkle-mountain-range = "0.5.2" is_sorted = "0.1.1" +ckb-constant = { path = "../util/constant", version = "= 0.118.0-pre" } +ckb-util = { path = "../util", version = "= 0.118.0-pre" } +crossbeam = "0.8.2" +ckb-network = { path = "../network", version = "= 0.118.0-pre" } +ckb-tx-pool = { path = "../tx-pool", version = "= 0.118.0-pre" } +minstant = "0.1.4" +dashmap = "4.0" [dev-dependencies] -ckb-test-chain-utils = { path = "../util/test-chain-utils", version = "= 0.117.0-pre" } -ckb-dao-utils = { path = "../util/dao/utils", version = "= 0.117.0-pre" } -ckb-reward-calculator = { path = "../util/reward-calculator", version = "= 0.117.0-pre" } -ckb-tx-pool = { path = "../tx-pool", version = "= 0.117.0-pre", features = ["internal"] } -ckb-jsonrpc-types = { path = "../util/jsonrpc-types", version = "= 0.117.0-pre" } -ckb-network = { path = "../network", version = "= 0.117.0-pre" } +ckb-test-chain-utils = { path = "../util/test-chain-utils", version = "= 0.118.0-pre" } +ckb-dao-utils = { path = "../util/dao/utils", version = "= 0.118.0-pre" } +ckb-reward-calculator = { path = "../util/reward-calculator", version = "= 0.118.0-pre" } +ckb-tx-pool = { path = "../tx-pool", version = "= 0.118.0-pre", features = ["internal"] } +ckb-jsonrpc-types = { path = "../util/jsonrpc-types", version = "= 0.118.0-pre" } +ckb-network = { path = "../network", version = "= 0.118.0-pre" } lazy_static = "1.4" tempfile.workspace = true -ckb-systemtime = { path = "../util/systemtime", version = "= 0.117.0-pre" ,features = ["enable_faketime"]} +ckb-systemtime = { path = "../util/systemtime", version = "= 0.118.0-pre" ,features = ["enable_faketime"]} +ckb-logger-service = { path = "../util/logger-service", version = "= 0.118.0-pre" } [features] default = [] diff --git a/chain/src/chain_controller.rs b/chain/src/chain_controller.rs new file mode 100644 index 0000000000..5660f7934e --- /dev/null +++ b/chain/src/chain_controller.rs @@ -0,0 +1,135 @@ +//! CKB chain controller. +#![allow(missing_docs)] + +use crate::utils::orphan_block_pool::OrphanBlockPool; +use crate::{LonelyBlock, ProcessBlockRequest, RemoteBlock, TruncateRequest, VerifyResult}; +use ckb_channel::Sender; +use ckb_error::{Error, InternalErrorKind}; +use ckb_logger::{self, error}; +use ckb_store::ChainDB; +use ckb_types::{ + core::{service::Request, BlockView}, + packed::Byte32, +}; +use ckb_verification_traits::Switch; +use std::sync::atomic::AtomicBool; +use std::sync::Arc; + +/// Controller to the chain service. +/// +/// The controller is internally reference-counted and can be freely cloned. +/// +/// A controller can invoke ChainService methods. +#[cfg_attr(feature = "mock", faux::create)] +#[derive(Clone)] +pub struct ChainController { + process_block_sender: Sender, + truncate_sender: Sender, + orphan_block_broker: Arc, + + is_verifying_unverified_blocks_on_startup: Arc, +} + +#[cfg_attr(feature = "mock", faux::methods)] +impl ChainController { + pub(crate) fn new( + process_block_sender: Sender, + truncate_sender: Sender, + orphan_block_broker: Arc, + is_verifying_unverified_blocks_on_startup: Arc, + ) -> Self { + ChainController { + process_block_sender, + truncate_sender, + orphan_block_broker, + is_verifying_unverified_blocks_on_startup, + } + } + + pub fn is_verifying_unverified_blocks_on_startup(&self) -> bool { + self.is_verifying_unverified_blocks_on_startup + .load(std::sync::atomic::Ordering::Acquire) + } + + pub fn asynchronous_process_remote_block(&self, remote_block: RemoteBlock) { + let lonely_block = LonelyBlock { + block: remote_block.block, + verify_callback: Some(remote_block.verify_callback), + switch: None, + }; + self.asynchronous_process_lonely_block(lonely_block); + } + + pub fn asynchronous_process_lonely_block(&self, lonely_block: LonelyBlock) { + if Request::call(&self.process_block_sender, lonely_block).is_none() { + error!("Chain service has gone") + } + } + + /// MinerRpc::submit_block and `ckb import` need this blocking way to process block + pub fn blocking_process_block(&self, block: Arc) -> VerifyResult { + self.blocking_process_block_internal(block, None) + } + + /// `IntegrationTestRpcImpl::process_block_without_verify` need this + pub fn blocking_process_block_with_switch( + &self, + block: Arc, + switch: Switch, + ) -> VerifyResult { + self.blocking_process_block_internal(block, Some(switch)) + } + + fn blocking_process_block_internal( + &self, + block: Arc, + switch: Option, + ) -> VerifyResult { + let (verify_result_tx, verify_result_rx) = ckb_channel::oneshot::channel::(); + + let verify_callback = { + move |result: VerifyResult| { + if let Err(err) = verify_result_tx.send(result) { + error!( + "blocking send verify_result failed: {}, this shouldn't happen", + err + ) + } + } + }; + + let lonely_block = LonelyBlock { + block, + switch, + verify_callback: Some(Box::new(verify_callback)), + }; + + self.asynchronous_process_lonely_block(lonely_block); + verify_result_rx.recv().unwrap_or_else(|err| { + Err(InternalErrorKind::System + .other(format!("blocking recv verify_result failed: {}", err)) + .into()) + }) + } + + /// Truncate chain to specified target + /// + /// Should use for testing only + pub fn truncate(&self, target_tip_hash: Byte32) -> Result<(), Error> { + Request::call(&self.truncate_sender, target_tip_hash).unwrap_or_else(|| { + Err(InternalErrorKind::System + .other("Chain service has gone") + .into()) + }) + } + + /// `Relayer::reconstruct_block` need this + pub fn get_orphan_block(&self, store: &ChainDB, hash: &Byte32) -> Option> { + self.orphan_block_broker.get_block(store, hash) + } + + /// `NetRpcImpl::sync_state` rpc need this + pub fn orphan_blocks_len(&self) -> usize { + self.orphan_block_broker.len() + } +} diff --git a/chain/src/chain_service.rs b/chain/src/chain_service.rs new file mode 100644 index 0000000000..a1b3a05e2a --- /dev/null +++ b/chain/src/chain_service.rs @@ -0,0 +1,152 @@ +//! CKB chain service. +#![allow(missing_docs)] + +use crate::orphan_broker::OrphanBroker; +use crate::{LonelyBlock, ProcessBlockRequest}; +use ckb_channel::{select, Receiver}; +use ckb_error::{Error, InternalErrorKind}; +use ckb_logger::{self, debug, error, info, warn}; +use ckb_shared::block_status::BlockStatus; +use ckb_shared::shared::Shared; +use ckb_stop_handler::new_crossbeam_exit_rx; +use ckb_types::core::{service::Request, BlockView}; +use ckb_verification::{BlockVerifier, NonContextualBlockTxsVerifier}; +use ckb_verification_traits::Verifier; + +/// Chain background service to receive LonelyBlock and only do `non_contextual_verify` +pub(crate) struct ChainService { + shared: Shared, + process_block_rx: Receiver, + orphan_broker: OrphanBroker, +} +impl ChainService { + /// Create a new ChainService instance with shared. + pub(crate) fn new( + shared: Shared, + process_block_rx: Receiver, + consume_orphan: OrphanBroker, + ) -> ChainService { + ChainService { + shared, + process_block_rx, + orphan_broker: consume_orphan, + } + } + + /// Receive block from `process_block_rx` and do `non_contextual_verify` + pub(crate) fn start_process_block(self) { + let signal_receiver = new_crossbeam_exit_rx(); + + let clean_expired_orphan_timer = + crossbeam::channel::tick(std::time::Duration::from_secs(60)); + + loop { + select! { + recv(self.process_block_rx) -> msg => match msg { + Ok(Request { responder, arguments: lonely_block }) => { + // asynchronous_process_block doesn't interact with tx-pool, + // no need to pause tx-pool's chunk_process here. + let _trace_now = minstant::Instant::now(); + self.asynchronous_process_block(lonely_block); + if let Some(handle) = ckb_metrics::handle(){ + handle.ckb_chain_async_process_block_duration.observe(_trace_now.elapsed().as_secs_f64()) + } + let _ = responder.send(()); + }, + _ => { + error!("process_block_receiver closed"); + break; + }, + }, + recv(clean_expired_orphan_timer) -> _ => { + self.orphan_broker.clean_expired_orphans(); + }, + recv(signal_receiver) -> _ => { + info!("ChainService received exit signal, exit now"); + break; + } + } + } + } + + fn non_contextual_verify(&self, block: &BlockView) -> Result<(), Error> { + let consensus = self.shared.consensus(); + BlockVerifier::new(consensus).verify(block).map_err(|e| { + debug!("[process_block] BlockVerifier error {:?}", e); + e + })?; + + NonContextualBlockTxsVerifier::new(consensus) + .verify(block) + .map_err(|e| { + debug!( + "[process_block] NonContextualBlockTxsVerifier error {:?}", + e + ); + e + }) + .map(|_| ()) + } + + // `self.non_contextual_verify` is very fast. + fn asynchronous_process_block(&self, lonely_block: LonelyBlock) { + let block_number = lonely_block.block().number(); + let block_hash = lonely_block.block().hash(); + // Skip verifying a genesis block if its hash is equal to our genesis hash, + // otherwise, return error and ban peer. + if block_number < 1 { + if self.shared.genesis_hash() != block_hash { + warn!( + "receive 0 number block: 0-{}, expect genesis hash: {}", + block_hash, + self.shared.genesis_hash() + ); + self.shared + .insert_block_status(lonely_block.block().hash(), BlockStatus::BLOCK_INVALID); + let error = InternalErrorKind::System + .other("Invalid genesis block received") + .into(); + lonely_block.execute_callback(Err(error)); + } else { + warn!("receive 0 number block: 0-{}", block_hash); + lonely_block.execute_callback(Ok(false)); + } + return; + } + + if lonely_block.switch().is_none() + || matches!(lonely_block.switch(), Some(switch) if !switch.disable_non_contextual()) + { + let result = self.non_contextual_verify(lonely_block.block()); + if let Err(err) = result { + error!( + "block {}-{} verify failed: {:?}", + block_number, block_hash, err + ); + self.shared + .insert_block_status(lonely_block.block().hash(), BlockStatus::BLOCK_INVALID); + lonely_block.execute_callback(Err(err)); + return; + } + } + + if let Err(err) = self.insert_block(&lonely_block) { + error!( + "insert block {}-{} failed: {:?}", + block_number, block_hash, err + ); + self.shared.block_status_map().remove(&block_hash); + lonely_block.execute_callback(Err(err)); + return; + } + + self.orphan_broker.process_lonely_block(lonely_block.into()); + } + + fn insert_block(&self, lonely_block: &LonelyBlock) -> Result<(), ckb_error::Error> { + let db_txn = self.shared.store().begin_transaction(); + db_txn.insert_block(lonely_block.block())?; + db_txn.commit()?; + Ok(()) + } +} diff --git a/chain/src/chain.rs b/chain/src/consume_unverified.rs similarity index 63% rename from chain/src/chain.rs rename to chain/src/consume_unverified.rs index c1915ed48e..cf09abdaba 100644 --- a/chain/src/chain.rs +++ b/chain/src/consume_unverified.rs @@ -1,414 +1,261 @@ -//! CKB chain service. -#![allow(missing_docs)] - -use ckb_channel::{self as channel, select, Sender}; -use ckb_error::{Error, InternalErrorKind}; +use crate::{delete_unverified_block, UnverifiedBlock}; +use crate::{utils::forkchanges::ForkChanges, GlobalIndex, TruncateRequest, VerifyResult}; +use ckb_channel::{select, Receiver}; +use ckb_error::{is_internal_db_error, Error, InternalErrorKind}; +use ckb_logger::internal::{log_enabled, trace}; use ckb_logger::Level::Trace; -use ckb_logger::{ - self, debug, error, info, log_enabled, log_enabled_target, trace, trace_target, warn, -}; +use ckb_logger::{debug, error, info, log_enabled_target, trace_target}; use ckb_merkle_mountain_range::leaf_index_to_mmr_size; use ckb_proposal_table::ProposalTable; -use ckb_shared::shared::Shared; -use ckb_stop_handler::{new_crossbeam_exit_rx, register_thread}; +use ckb_shared::block_status::BlockStatus; +use ckb_shared::Shared; use ckb_store::{attach_block_cell, detach_block_cell, ChainStore, StoreTransaction}; use ckb_systemtime::unix_time_as_millis; -use ckb_types::{ - core::{ - cell::{ - resolve_transaction, BlockCellProvider, HeaderChecker, OverlayCellProvider, - ResolvedTransaction, - }, - hardfork::HardForks, - service::Request, - BlockExt, BlockNumber, BlockView, Cycle, HeaderView, - }, - packed::{Byte32, ProposalShortId}, - utilities::merkle_mountain_range::ChainRootMMR, - U256, +use ckb_tx_pool::TxPoolController; +use ckb_types::core::cell::{ + resolve_transaction, BlockCellProvider, HeaderChecker, OverlayCellProvider, ResolvedTransaction, }; +use ckb_types::core::{service::Request, BlockExt, BlockNumber, BlockView, Cycle, HeaderView}; +use ckb_types::packed::Byte32; +use ckb_types::utilities::merkle_mountain_range::ChainRootMMR; +use ckb_types::H256; use ckb_verification::cache::Completed; -use ckb_verification::{BlockVerifier, InvalidParentError, NonContextualBlockTxsVerifier}; +use ckb_verification::InvalidParentError; use ckb_verification_contextual::{ContextualBlockVerifier, VerifyContext}; -use ckb_verification_traits::{Switch, Verifier}; -#[cfg(debug_assertions)] -use is_sorted::IsSorted; -use std::collections::{HashSet, VecDeque}; +use ckb_verification_traits::Switch; +use dashmap::DashSet; +use std::cmp; +use std::collections::HashSet; use std::sync::Arc; -use std::time::Instant; -use std::{cmp, thread}; - -type ProcessBlockRequest = Request<(Arc, Switch), Result>; -type TruncateRequest = Request>; - -/// Controller to the chain service. -/// -/// The controller is internally reference-counted and can be freely cloned. -/// -/// A controller can invoke [`ChainService`] methods. -#[cfg_attr(feature = "mock", faux::create)] -#[derive(Clone)] -pub struct ChainController { - process_block_sender: Sender, - truncate_sender: Sender, // Used for testing only -} - -#[cfg_attr(feature = "mock", faux::methods)] -impl ChainController { - pub fn new( - process_block_sender: Sender, - truncate_sender: Sender, - ) -> Self { - ChainController { - process_block_sender, - truncate_sender, - } - } - /// Inserts the block into database. - /// - /// Expects the block's header to be valid and already verified. - /// - /// If the block already exists, does nothing and false is returned. - /// - /// [BlockVerifier] [NonContextualBlockTxsVerifier] [ContextualBlockVerifier] will performed - pub fn process_block(&self, block: Arc) -> Result { - self.internal_process_block(block, Switch::NONE) - } - - /// Internal method insert block for test - /// - /// switch bit flags for particular verify, make easier to generating test data - pub fn internal_process_block( - &self, - block: Arc, - switch: Switch, - ) -> Result { - Request::call(&self.process_block_sender, (block, switch)).unwrap_or_else(|| { - Err(InternalErrorKind::System - .other("Chain service has gone") - .into()) - }) - } - - /// Truncate chain to specified target - /// - /// Should use for testing only - pub fn truncate(&self, target_tip_hash: Byte32) -> Result<(), Error> { - Request::call(&self.truncate_sender, target_tip_hash).unwrap_or_else(|| { - Err(InternalErrorKind::System - .other("Chain service has gone") - .into()) - }) - } -} -/// The struct represent fork -#[derive(Debug, Default)] -pub struct ForkChanges { - /// Blocks attached to index after forks - pub(crate) attached_blocks: VecDeque, - /// Blocks detached from index after forks - pub(crate) detached_blocks: VecDeque, - /// HashSet with proposal_id detached to index after forks - pub(crate) detached_proposal_id: HashSet, - /// to be updated exts - pub(crate) dirty_exts: VecDeque, +pub(crate) struct ConsumeUnverifiedBlockProcessor { + pub(crate) shared: Shared, + pub(crate) is_pending_verify: Arc>, + pub(crate) proposal_table: ProposalTable, } -impl ForkChanges { - /// blocks attached to index after forks - pub fn attached_blocks(&self) -> &VecDeque { - &self.attached_blocks - } - - /// blocks detached from index after forks - pub fn detached_blocks(&self) -> &VecDeque { - &self.detached_blocks - } - - /// proposal_id detached to index after forks - pub fn detached_proposal_id(&self) -> &HashSet { - &self.detached_proposal_id - } - - /// are there any block should be detached - pub fn has_detached(&self) -> bool { - !self.detached_blocks.is_empty() - } - - /// cached verified attached block num - pub fn verified_len(&self) -> usize { - self.attached_blocks.len() - self.dirty_exts.len() - } - - /// assertion for make sure attached_blocks and detached_blocks are sorted - #[cfg(debug_assertions)] - pub fn is_sorted(&self) -> bool { - IsSorted::is_sorted_by_key(&mut self.attached_blocks().iter(), |blk| { - blk.header().number() - }) && IsSorted::is_sorted_by_key(&mut self.detached_blocks().iter(), |blk| { - blk.header().number() - }) - } - - pub fn during_hardfork(&self, hardfork_switch: &HardForks) -> bool { - let hardfork_during_detach = - self.check_if_hardfork_during_blocks(hardfork_switch, &self.detached_blocks); - let hardfork_during_attach = - self.check_if_hardfork_during_blocks(hardfork_switch, &self.attached_blocks); - - hardfork_during_detach || hardfork_during_attach - } +pub(crate) struct ConsumeUnverifiedBlocks { + tx_pool_controller: TxPoolController, - fn check_if_hardfork_during_blocks( - &self, - hardfork: &HardForks, - blocks: &VecDeque, - ) -> bool { - if blocks.is_empty() { - false - } else { - // This method assumes that the input blocks are sorted and unique. - let rfc_0049 = hardfork.ckb2023.rfc_0049(); - let epoch_first = blocks.front().unwrap().epoch().number(); - let epoch_next = blocks - .back() - .unwrap() - .epoch() - .minimum_epoch_number_after_n_blocks(1); - epoch_first < rfc_0049 && rfc_0049 <= epoch_next - } - } -} + unverified_block_rx: Receiver, + truncate_block_rx: Receiver, -pub(crate) struct GlobalIndex { - pub(crate) number: BlockNumber, - pub(crate) hash: Byte32, - pub(crate) unseen: bool, + stop_rx: Receiver<()>, + processor: ConsumeUnverifiedBlockProcessor, } -impl GlobalIndex { - pub(crate) fn new(number: BlockNumber, hash: Byte32, unseen: bool) -> GlobalIndex { - GlobalIndex { - number, - hash, - unseen, +impl ConsumeUnverifiedBlocks { + pub(crate) fn new( + shared: Shared, + unverified_blocks_rx: Receiver, + truncate_block_rx: Receiver, + proposal_table: ProposalTable, + is_pending_verify: Arc>, + stop_rx: Receiver<()>, + ) -> Self { + ConsumeUnverifiedBlocks { + tx_pool_controller: shared.tx_pool_controller().to_owned(), + unverified_block_rx: unverified_blocks_rx, + truncate_block_rx, + stop_rx, + processor: ConsumeUnverifiedBlockProcessor { + shared, + is_pending_verify, + proposal_table, + }, } } - pub(crate) fn forward(&mut self, hash: Byte32) { - self.number -= 1; - self.hash = hash; - } -} - -/// Chain background service -/// -/// The ChainService provides a single-threaded background executor. -pub struct ChainService { - shared: Shared, - proposal_table: ProposalTable, -} - -impl ChainService { - /// Create a new ChainService instance with shared and initial proposal_table. - pub fn new(shared: Shared, proposal_table: ProposalTable) -> ChainService { - ChainService { - shared, - proposal_table, - } - } + pub(crate) fn start(mut self) { + loop { + let trace_begin_loop = minstant::Instant::now(); + select! { + recv(self.unverified_block_rx) -> msg => match msg { + Ok(unverified_task) => { + // process this unverified block + if let Some(handle) = ckb_metrics::handle() { + handle.ckb_chain_consume_unverified_block_waiting_block_duration.observe(trace_begin_loop.elapsed().as_secs_f64()) + } + let _ = self.tx_pool_controller.suspend_chunk_process(); - /// start background single-threaded service with specified thread_name. - pub fn start(mut self, thread_name: Option) -> ChainController { - let signal_receiver = new_crossbeam_exit_rx(); - let (process_block_sender, process_block_receiver) = channel::bounded(0); - let (truncate_sender, truncate_receiver) = channel::bounded(0); + let _trace_now = minstant::Instant::now(); + self.processor.consume_unverified_blocks(unverified_task); + if let Some(handle) = ckb_metrics::handle() { + handle.ckb_chain_consume_unverified_block_duration.observe(_trace_now.elapsed().as_secs_f64()) + } - // Mainly for test: give an empty thread_name - let mut thread_builder = thread::Builder::new(); - if let Some(name) = thread_name { - thread_builder = thread_builder.name(name.to_string()); - } - let tx_control = self.shared.tx_pool_controller().clone(); - - let chain_jh = thread_builder - .spawn(move || loop { - select! { - recv(process_block_receiver) -> msg => match msg { - Ok(Request { responder, arguments: (block, verify) }) => { - let instant = Instant::now(); - - let _ = tx_control.suspend_chunk_process(); - let _ = responder.send(self.process_block(block, verify)); - let _ = tx_control.continue_chunk_process(); - - if let Some(metrics) = ckb_metrics::handle() { - metrics - .ckb_block_process_duration - .observe(instant.elapsed().as_secs_f64()); - } - }, - _ => { - error!("process_block_receiver closed"); - break; - }, + let _ = self.tx_pool_controller.continue_chunk_process(); }, - recv(truncate_receiver) -> msg => match msg { - Ok(Request { responder, arguments: target_tip_hash }) => { - let _ = tx_control.suspend_chunk_process(); - let _ = responder.send(self.truncate(&target_tip_hash)); - let _ = tx_control.continue_chunk_process(); - }, - _ => { - error!("truncate_receiver closed"); - break; - }, + Err(err) => { + error!("unverified_block_rx err: {}", err); + return; }, - recv(signal_receiver) -> _ => { - info!("ChainService received exit signal, exit now"); - break; - } + }, + recv(self.truncate_block_rx) -> msg => match msg { + Ok(Request { responder, arguments: target_tip_hash }) => { + let _ = self.tx_pool_controller.suspend_chunk_process(); + let _ = responder.send(self.processor.truncate(&target_tip_hash)); + let _ = self.tx_pool_controller.continue_chunk_process(); + }, + Err(err) => { + info!("truncate_block_tx has been closed, err: {}", err); + return; + }, + }, + recv(self.stop_rx) -> _ => { + info!("consume_unverified_blocks thread received exit signal, exit now"); + break; } - }) - .expect("Start ChainService failed"); - register_thread("ChainService", chain_jh); - - ChainController::new(process_block_sender, truncate_sender) - } - - fn make_fork_for_truncate(&self, target: &HeaderView, current_tip: &HeaderView) -> ForkChanges { - let mut fork = ForkChanges::default(); - let store = self.shared.store(); - for bn in (target.number() + 1)..=current_tip.number() { - let hash = store.get_block_hash(bn).expect("index checked"); - let old_block = store.get_block(&hash).expect("index checked"); - fork.detached_blocks.push_back(old_block); + } } - is_sorted_assert(&fork); - fork } +} - // Truncate the main chain - // Use for testing only, can only truncate less than 50000 blocks each time - pub(crate) fn truncate(&mut self, target_tip_hash: &Byte32) -> Result<(), Error> { - let snapshot = Arc::clone(&self.shared.snapshot()); - assert!(snapshot.is_main_chain(target_tip_hash)); - - let target_tip_header = snapshot.get_block_header(target_tip_hash).expect("checked"); - let target_block_ext = snapshot.get_block_ext(target_tip_hash).expect("checked"); - let target_epoch_ext = snapshot - .get_block_epoch_index(target_tip_hash) - .and_then(|index| snapshot.get_epoch_ext(&index)) - .expect("checked"); - let origin_proposals = snapshot.proposals(); - - let block_count = snapshot - .tip_header() - .number() - .saturating_sub(target_tip_header.number()); - - if block_count > 5_0000 { - let err = format!( - "trying to truncate too many blocks: {}, exceed 50000", - block_count - ); - return Err(InternalErrorKind::Database.other(err).into()); - } - let mut fork = self.make_fork_for_truncate(&target_tip_header, snapshot.tip_header()); +impl ConsumeUnverifiedBlockProcessor { + pub(crate) fn consume_unverified_blocks(&mut self, unverified_block: UnverifiedBlock) { + let UnverifiedBlock { + block, + switch, + verify_callback, + parent_header, + } = unverified_block; + let block_hash = block.hash(); + // process this unverified block + let verify_result = self.verify_block(&block, &parent_header, switch); + match &verify_result { + Ok(_) => { + let log_now = std::time::Instant::now(); + self.shared.remove_block_status(&block_hash); + let log_elapsed_remove_block_status = log_now.elapsed(); + self.shared.remove_header_view(&block_hash); + debug!( + "block {} remove_block_status cost: {:?}, and header_view cost: {:?}", + block_hash, + log_elapsed_remove_block_status, + log_now.elapsed() + ); + } + Err(err) => { + error!("verify block {} failed: {}", block_hash, err); - let db_txn = self.shared.store().begin_transaction(); - self.rollback(&fork, &db_txn)?; + let tip = self + .shared + .store() + .get_tip_header() + .expect("tip_header must exist"); + let tip_ext = self + .shared + .store() + .get_block_ext(&tip.hash()) + .expect("tip header's ext must exist"); - db_txn.insert_tip_header(&target_tip_header)?; - db_txn.insert_current_epoch_ext(&target_epoch_ext)?; + self.shared.set_unverified_tip(ckb_shared::HeaderIndex::new( + tip.number(), + tip.hash(), + tip_ext.total_difficulty, + )); - // Currently, we only move the target tip header here, we don't delete the block for performance - // TODO: delete the blocks if we need in the future + self.delete_unverified_block(&block); - db_txn.commit()?; + if !is_internal_db_error(err) { + self.shared + .insert_block_status(block_hash.clone(), BlockStatus::BLOCK_INVALID); + } else { + error!("internal db error, remove block status: {}", block_hash); + self.shared.remove_block_status(&block_hash); + } - self.update_proposal_table(&fork); - let (detached_proposal_id, new_proposals) = self - .proposal_table - .finalize(origin_proposals, target_tip_header.number()); - fork.detached_proposal_id = detached_proposal_id; + error!( + "set_unverified tip to {}-{}, because verify {} failed: {}", + tip.number(), + tip.hash(), + block_hash, + err + ); + } + } - let new_snapshot = self.shared.new_snapshot( - target_tip_header, - target_block_ext.total_difficulty, - target_epoch_ext, - new_proposals, - ); + self.is_pending_verify.remove(&block_hash); - self.shared.store_snapshot(Arc::clone(&new_snapshot)); + if let Some(callback) = verify_callback { + callback(verify_result); + } + } - // NOTE: Dont update tx-pool when truncate - Ok(()) + fn delete_unverified_block(&self, block: &BlockView) { + delete_unverified_block( + self.shared.store(), + block.hash(), + block.number(), + block.parent_hash(), + ) } - // visible pub just for test - #[doc(hidden)] - pub fn process_block(&mut self, block: Arc, switch: Switch) -> Result { - let block_number = block.number(); + fn verify_block( + &mut self, + block: &BlockView, + parent_header: &HeaderView, + switch: Option, + ) -> VerifyResult { + let switch: Switch = switch.unwrap_or_else(|| { + let mut assume_valid_target = self.shared.assume_valid_target(); + match *assume_valid_target { + Some(ref target) => { + // if the target has been reached, delete it + if target + == &ckb_types::prelude::Unpack::::unpack(&BlockView::hash(block)) + { + assume_valid_target.take(); + Switch::NONE + } else { + Switch::DISABLE_SCRIPT + } + } + None => Switch::NONE, + } + }); + let block_hash = block.hash(); + let parent_hash = block.parent_hash(); - debug!("Begin processing block: {}-{}", block_number, block_hash); - if block_number < 1 { - warn!("Receive 0 number block: 0-{}", block_hash); + { + let parent_status = self.shared.get_block_status(&parent_hash); + if parent_status.eq(&BlockStatus::BLOCK_INVALID) { + return Err(InternalErrorKind::Other + .other(format!( + "block: {}'s parent: {} previously verified failed", + block_hash, parent_hash + )) + .into()); + } } - self.insert_block(block, switch).map(|ret| { - debug!("Finish processing block"); - ret - }) - } + let parent_ext = self.shared.store().get_block_ext(&parent_hash).ok_or( + InternalErrorKind::Other.other(format!( + "block: {}'s parent: {}'s block ext not found", + block_hash, parent_hash + )), + )?; - fn non_contextual_verify(&self, block: &BlockView) -> Result<(), Error> { - let consensus = self.shared.consensus(); - BlockVerifier::new(consensus).verify(block).map_err(|e| { - debug!("[process_block] BlockVerifier error {:?}", e); - e - })?; - - NonContextualBlockTxsVerifier::new(consensus) - .verify(block) - .map_err(|e| { + if let Some(ext) = self.shared.store().get_block_ext(&block.hash()) { + if let Some(verified) = ext.verified { debug!( - "[process_block] NonContextualBlockTxsVerifier error {:?}", - e + "block {}-{} has been verified, previously verified result: {}", + block.number(), + block.hash(), + verified ); - e - }) - .map(|_| ()) - } - - fn insert_block(&mut self, block: Arc, switch: Switch) -> Result { - let db_txn = Arc::new(self.shared.store().begin_transaction()); - let txn_snapshot = db_txn.get_snapshot(); - let _snapshot_tip_hash = db_txn.get_update_for_tip_hash(&txn_snapshot); - - // insert_block are assumed be executed in single thread - if txn_snapshot.block_exists(&block.header().hash()) { - return Ok(false); - } - // non-contextual verify - if !switch.disable_non_contextual() { - self.non_contextual_verify(&block)?; + return if verified { + Ok(false) + } else { + Err(InternalErrorKind::Other + .other("block previously verified failed") + .into()) + }; + } } - let mut total_difficulty = U256::zero(); - let mut fork = ForkChanges::default(); - - let parent_ext = txn_snapshot - .get_block_ext(&block.data().header().raw().parent_hash()) - .expect("parent already store"); - - let parent_header = txn_snapshot - .get_block_header(&block.data().header().raw().parent_hash()) - .expect("parent already store"); - let cannon_total_difficulty = parent_ext.total_difficulty.to_owned() + block.header().difficulty(); @@ -419,16 +266,6 @@ impl ChainService { .into()); } - db_txn.insert_block(&block)?; - - let next_block_epoch = self - .shared - .consensus() - .next_epoch_ext(&parent_header, &txn_snapshot.borrow_as_data_loader()) - .expect("epoch should be stored"); - let new_epoch = next_block_epoch.is_head(); - let epoch = next_block_epoch.epoch(); - let ext = BlockExt { received_at: unix_time_as_millis(), total_difficulty: cannon_total_difficulty.clone(), @@ -439,46 +276,60 @@ impl ChainService { txs_sizes: None, }; - db_txn.insert_block_epoch_index( - &block.header().hash(), - &epoch.last_block_hash_in_previous_epoch(), - )?; - if new_epoch { - db_txn.insert_epoch_ext(&epoch.last_block_hash_in_previous_epoch(), &epoch)?; - } - let shared_snapshot = Arc::clone(&self.shared.snapshot()); let origin_proposals = shared_snapshot.proposals(); let current_tip_header = shared_snapshot.tip_header(); - let current_total_difficulty = shared_snapshot.total_difficulty().to_owned(); - debug!( - "Current difficulty = {:#x}, cannon = {:#x}", - current_total_difficulty, cannon_total_difficulty, - ); // is_better_than let new_best_block = cannon_total_difficulty > current_total_difficulty; + let mut fork = ForkChanges::default(); + + let next_block_epoch = self + .shared + .consensus() + .next_epoch_ext(parent_header, &self.shared.store().borrow_as_data_loader()) + .expect("epoch should be stored"); + let new_epoch = next_block_epoch.is_head(); + let epoch = next_block_epoch.epoch(); + + let db_txn = Arc::new(self.shared.store().begin_transaction()); + let txn_snapshot = db_txn.get_snapshot(); + let _snapshot_tip_hash = db_txn.get_update_for_tip_hash(&txn_snapshot); + + db_txn.insert_block_epoch_index( + &block.header().hash(), + &epoch.last_block_hash_in_previous_epoch(), + )?; + if new_epoch { + db_txn.insert_epoch_ext(&epoch.last_block_hash_in_previous_epoch(), &epoch)?; + } + if new_best_block { - debug!( - "Newly found best block : {} => {:#x}, difficulty diff = {:#x}", + info!( + "[verify block] new best block found: {} => {:#x}, difficulty diff = {:#x}, unverified_tip: {}", block.header().number(), block.header().hash(), - &cannon_total_difficulty - ¤t_total_difficulty + &cannon_total_difficulty - ¤t_total_difficulty, + self.shared.get_unverified_tip().number(), ); - self.find_fork(&mut fork, current_tip_header.number(), &block, ext); + self.find_fork(&mut fork, current_tip_header.number(), block, ext); self.rollback(&fork, &db_txn)?; // update and verify chain root // MUST update index before reconcile_main_chain + let begin_reconcile_main_chain = std::time::Instant::now(); self.reconcile_main_chain(Arc::clone(&db_txn), &mut fork, switch)?; + trace!( + "reconcile_main_chain cost {:?}", + begin_reconcile_main_chain.elapsed() + ); db_txn.insert_tip_header(&block.header())?; if new_epoch || fork.has_detached() { db_txn.insert_current_epoch_ext(&epoch)?; } - total_difficulty = cannon_total_difficulty.clone(); } else { db_txn.insert_block_ext(&block.header().hash(), &ext)?; } @@ -491,7 +342,7 @@ impl ChainService { tip_header.number(), tip_header.hash(), tip_header.epoch(), - total_difficulty, + cannon_total_difficulty, block.transactions().len() ); @@ -503,7 +354,7 @@ impl ChainService { let new_snapshot = self.shared - .new_snapshot(tip_header, total_difficulty, epoch, new_proposals); + .new_snapshot(tip_header, cannon_total_difficulty, epoch, new_proposals); self.shared.store_snapshot(Arc::clone(&new_snapshot)); @@ -515,15 +366,14 @@ impl ChainService { fork.detached_proposal_id().clone(), new_snapshot, ) { - error!("Notify update_tx_pool_for_reorg error {}", e); + error!("[verify block] notify update_tx_pool_for_reorg error {}", e); } } - let block_ref: &BlockView = █ self.shared .notify_controller() - .notify_new_block(block_ref.clone()); - if log_enabled!(ckb_logger::Level::Debug) { + .notify_new_block(block.to_owned()); + if log_enabled!(ckb_logger::Level::Trace) { self.print_chain(10); } if let Some(metrics) = ckb_metrics::handle() { @@ -532,7 +382,7 @@ impl ChainService { } else { self.shared.refresh_snapshot(); info!( - "uncle: {}, hash: {:#x}, epoch: {:#}, total_diff: {:#x}, txs: {}", + "[verify block] uncle: {}, hash: {:#x}, epoch: {:#}, total_diff: {:#x}, txs: {}", block.header().number(), block.header().hash(), block.header().epoch(), @@ -542,13 +392,11 @@ impl ChainService { let tx_pool_controller = self.shared.tx_pool_controller(); if tx_pool_controller.service_started() { - let block_ref: &BlockView = █ - if let Err(e) = tx_pool_controller.notify_new_uncle(block_ref.as_uncle()) { - error!("Notify new_uncle error {}", e); + if let Err(e) = tx_pool_controller.notify_new_uncle(block.as_uncle()) { + error!("[verify block] notify new_uncle error {}", e); } } } - Ok(true) } @@ -585,7 +433,7 @@ impl ChainService { let proposal_start = cmp::max(1, (new_tip + 1).saturating_sub(proposal_window.farthest())); - debug!("Reload_proposal_table [{}, {}]", proposal_start, common); + debug!("reload_proposal_table [{}, {}]", proposal_start, common); for bn in proposal_start..=common { let blk = self .shared @@ -776,7 +624,13 @@ impl ChainService { { if !switch.disable_all() { if found_error.is_none() { + let log_now = std::time::Instant::now(); let resolved = self.resolve_block_transactions(&txn, b, &verify_context); + debug!( + "resolve_block_transactions {} cost: {:?}", + b.hash(), + log_now.elapsed() + ); match resolved { Ok(resolved) => { let verified = { @@ -787,7 +641,14 @@ impl ChainService { Arc::clone(&txs_verify_cache), &mmr, ); - contextual_block_verifier.verify(&resolved, b) + let log_now = std::time::Instant::now(); + let verify_result = contextual_block_verifier.verify(&resolved, b); + debug!( + "contextual_block_verifier {} cost: {:?}", + b.hash(), + log_now.elapsed() + ); + verify_result }; match verified { Ok((cycles, cache_entries)) => { @@ -939,13 +800,13 @@ impl ChainService { fn print_error(&self, b: &BlockView, err: &Error) { error!( - "Block verify error. Block number: {}, hash: {}, error: {:?}", + "block verify error, block number: {}, hash: {}, error: {:?}", b.header().number(), b.header().hash(), err ); if log_enabled!(ckb_logger::Level::Trace) { - trace!("Block {}", b.data()); + trace!("block {}", b); } } @@ -968,6 +829,62 @@ impl ChainService { debug!("}}"); } + + fn make_fork_for_truncate(&self, target: &HeaderView, current_tip: &HeaderView) -> ForkChanges { + let mut fork = ForkChanges::default(); + let store = self.shared.store(); + for bn in (target.number() + 1)..=current_tip.number() { + let hash = store.get_block_hash(bn).expect("index checked"); + let old_block = store.get_block(&hash).expect("index checked"); + fork.detached_blocks.push_back(old_block); + } + is_sorted_assert(&fork); + fork + } + + // Truncate the main chain + // Use for testing only + pub(crate) fn truncate(&mut self, target_tip_hash: &Byte32) -> Result<(), Error> { + let snapshot = Arc::clone(&self.shared.snapshot()); + assert!(snapshot.is_main_chain(target_tip_hash)); + + let target_tip_header = snapshot.get_block_header(target_tip_hash).expect("checked"); + let target_block_ext = snapshot.get_block_ext(target_tip_hash).expect("checked"); + let target_epoch_ext = snapshot + .get_block_epoch_index(target_tip_hash) + .and_then(|index| snapshot.get_epoch_ext(&index)) + .expect("checked"); + let origin_proposals = snapshot.proposals(); + let mut fork = self.make_fork_for_truncate(&target_tip_header, snapshot.tip_header()); + + let db_txn = self.shared.store().begin_transaction(); + self.rollback(&fork, &db_txn)?; + + db_txn.insert_tip_header(&target_tip_header)?; + db_txn.insert_current_epoch_ext(&target_epoch_ext)?; + + for blk in fork.attached_blocks() { + db_txn.delete_block(blk)?; + } + db_txn.commit()?; + + self.update_proposal_table(&fork); + let (detached_proposal_id, new_proposals) = self + .proposal_table + .finalize(origin_proposals, target_tip_header.number()); + fork.detached_proposal_id = detached_proposal_id; + + let new_snapshot = self.shared.new_snapshot( + target_tip_header, + target_block_ext.total_difficulty, + target_epoch_ext, + new_proposals, + ); + + self.shared.store_snapshot(Arc::clone(&new_snapshot)); + + Ok(()) + } } #[cfg(debug_assertions)] diff --git a/chain/src/init.rs b/chain/src/init.rs new file mode 100644 index 0000000000..4dc9d2d919 --- /dev/null +++ b/chain/src/init.rs @@ -0,0 +1,135 @@ +#![allow(missing_docs)] + +//! Bootstrap InitLoadUnverified, PreloadUnverifiedBlock, ChainService and ConsumeUnverified threads. +use crate::chain_service::ChainService; +use crate::consume_unverified::ConsumeUnverifiedBlocks; +use crate::init_load_unverified::InitLoadUnverified; +use crate::orphan_broker::OrphanBroker; +use crate::preload_unverified_blocks_channel::PreloadUnverifiedBlocksChannel; +use crate::utils::orphan_block_pool::OrphanBlockPool; +use crate::{chain_controller::ChainController, LonelyBlockHash, UnverifiedBlock}; +use ckb_channel::{self as channel, SendError}; +use ckb_constant::sync::BLOCK_DOWNLOAD_WINDOW; +use ckb_logger::warn; +use ckb_shared::ChainServicesBuilder; +use ckb_stop_handler::register_thread; +use ckb_types::packed::Byte32; +use dashmap::DashSet; +use std::sync::atomic::AtomicBool; +use std::sync::Arc; +use std::thread; + +const ORPHAN_BLOCK_SIZE: usize = BLOCK_DOWNLOAD_WINDOW as usize; + +pub fn start_chain_services(builder: ChainServicesBuilder) -> ChainController { + let orphan_blocks_broker = Arc::new(OrphanBlockPool::with_capacity(ORPHAN_BLOCK_SIZE)); + + let (truncate_block_tx, truncate_block_rx) = channel::bounded(1); + + let (preload_unverified_stop_tx, preload_unverified_stop_rx) = ckb_channel::bounded::<()>(1); + + let (preload_unverified_tx, preload_unverified_rx) = + channel::bounded::(BLOCK_DOWNLOAD_WINDOW as usize * 10); + + let (unverified_queue_stop_tx, unverified_queue_stop_rx) = ckb_channel::bounded::<()>(1); + let (unverified_block_tx, unverified_block_rx) = channel::bounded::(128usize); + + let is_pending_verify: Arc> = Arc::new(DashSet::new()); + + let consumer_unverified_thread = thread::Builder::new() + .name("consume_unverified_blocks".into()) + .spawn({ + let shared = builder.shared.clone(); + let is_pending_verify = Arc::clone(&is_pending_verify); + move || { + let consume_unverified = ConsumeUnverifiedBlocks::new( + shared, + unverified_block_rx, + truncate_block_rx, + builder.proposal_table, + is_pending_verify, + unverified_queue_stop_rx, + ); + + consume_unverified.start(); + } + }) + .expect("start unverified_queue consumer thread should ok"); + + let preload_unverified_block_thread = thread::Builder::new() + .name("preload_unverified_block".into()) + .spawn({ + let shared = builder.shared.clone(); + move || { + let preload_unverified_block = PreloadUnverifiedBlocksChannel::new( + shared, + preload_unverified_rx, + unverified_block_tx, + preload_unverified_stop_rx, + ); + preload_unverified_block.start() + } + }) + .expect("start preload_unverified_block should ok"); + + let (process_block_tx, process_block_rx) = channel::bounded(0); + + let is_verifying_unverified_blocks_on_startup = Arc::new(AtomicBool::new(true)); + + let chain_controller = ChainController::new( + process_block_tx, + truncate_block_tx, + Arc::clone(&orphan_blocks_broker), + Arc::clone(&is_verifying_unverified_blocks_on_startup), + ); + + let init_load_unverified_thread = thread::Builder::new() + .name("init_load_unverified_blocks".into()) + .spawn({ + let chain_controller = chain_controller.clone(); + let shared = builder.shared.clone(); + + move || { + let init_load_unverified: InitLoadUnverified = InitLoadUnverified::new( + shared, + chain_controller, + is_verifying_unverified_blocks_on_startup, + ); + init_load_unverified.start(); + } + }) + .expect("start unverified_queue consumer thread should ok"); + + let consume_orphan = OrphanBroker::new( + builder.shared.clone(), + orphan_blocks_broker, + preload_unverified_tx, + is_pending_verify, + ); + + let chain_service: ChainService = + ChainService::new(builder.shared, process_block_rx, consume_orphan); + let chain_service_thread = thread::Builder::new() + .name("ChainService".into()) + .spawn({ + move || { + chain_service.start_process_block(); + + let _ = init_load_unverified_thread.join(); + + if preload_unverified_stop_tx.send(()).is_err(){ + warn!("trying to notify preload unverified thread to stop, but preload_unverified_stop_tx already closed"); + } + let _ = preload_unverified_block_thread.join(); + + if let Err(SendError(_)) = unverified_queue_stop_tx.send(()) { + warn!("trying to notify consume unverified thread to stop, but unverified_queue_stop_tx already closed"); + } + let _ = consumer_unverified_thread.join(); + } + }) + .expect("start chain_service thread should ok"); + register_thread("ChainService", chain_service_thread); + + chain_controller +} diff --git a/chain/src/init_load_unverified.rs b/chain/src/init_load_unverified.rs new file mode 100644 index 0000000000..e2c4ebae00 --- /dev/null +++ b/chain/src/init_load_unverified.rs @@ -0,0 +1,122 @@ +use crate::utils::orphan_block_pool::EXPIRED_EPOCH; +use crate::{ChainController, LonelyBlock}; +use ckb_constant::sync::BLOCK_DOWNLOAD_WINDOW; +use ckb_db::{Direction, IteratorMode}; +use ckb_db_schema::COLUMN_NUMBER_HASH; +use ckb_logger::info; +use ckb_shared::Shared; +use ckb_stop_handler::has_received_stop_signal; +use ckb_store::ChainStore; +use ckb_types::core::{BlockNumber, BlockView}; +use ckb_types::packed; +use ckb_types::prelude::{Entity, FromSliceShouldBeOk, Pack, Reader}; +use std::cmp; +use std::sync::atomic::AtomicBool; +use std::sync::Arc; + +pub(crate) struct InitLoadUnverified { + shared: Shared, + chain_controller: ChainController, + is_verifying_unverified_blocks_on_startup: Arc, +} + +impl InitLoadUnverified { + pub(crate) fn new( + shared: Shared, + chain_controller: ChainController, + is_verifying_unverified_blocks_on_startup: Arc, + ) -> Self { + InitLoadUnverified { + shared, + chain_controller, + is_verifying_unverified_blocks_on_startup, + } + } + + fn find_unverified_block_hashes(&self, check_unverified_number: u64) -> Vec { + let pack_number: packed::Uint64 = check_unverified_number.pack(); + let prefix = pack_number.as_slice(); + + // If a block has `COLUMN_NUMBER_HASH` but not `BlockExt`, + // it indicates an unverified block inserted during the last shutdown. + let unverified_hashes: Vec = self + .shared + .store() + .get_iter( + COLUMN_NUMBER_HASH, + IteratorMode::From(prefix, Direction::Forward), + ) + .take_while(|(key, _)| key.starts_with(prefix)) + .map(|(key_number_hash, _v)| { + let reader = + packed::NumberHashReader::from_slice_should_be_ok(key_number_hash.as_ref()); + let unverified_block_hash = reader.block_hash().to_entity(); + unverified_block_hash + }) + .filter(|hash| self.shared.store().get_block_ext(hash).is_none()) + .collect::>(); + unverified_hashes + } + + pub(crate) fn start(&self) { + info!( + "finding unverified blocks, current tip: {}-{}", + self.shared.snapshot().tip_number(), + self.shared.snapshot().tip_hash() + ); + + self.find_and_verify_unverified_blocks(); + + self.is_verifying_unverified_blocks_on_startup + .store(false, std::sync::atomic::Ordering::Release); + info!("find unverified blocks finished"); + } + + fn find_unverified_blocks(&self, f: F) + where + F: Fn(&packed::Byte32), + { + let tip_number: BlockNumber = self.shared.snapshot().tip_number(); + let start_check_number = cmp::max( + 1, + tip_number.saturating_sub(EXPIRED_EPOCH * self.shared.consensus().max_epoch_length()), + ); + let end_check_number = tip_number + BLOCK_DOWNLOAD_WINDOW * 10; + + for check_unverified_number in start_check_number..=end_check_number { + if has_received_stop_signal() { + info!("init_unverified_blocks thread received exit signal, exit now"); + return; + } + + // start checking `check_unverified_number` have COLUMN_NUMBER_HASH value in db? + let unverified_hashes: Vec = + self.find_unverified_block_hashes(check_unverified_number); + + for unverified_hash in unverified_hashes { + f(&unverified_hash); + } + } + } + + fn find_and_verify_unverified_blocks(&self) { + self.find_unverified_blocks(|unverified_hash| { + let unverified_block: BlockView = self + .shared + .store() + .get_block(unverified_hash) + .expect("unverified block must be in db"); + + if has_received_stop_signal() { + return; + } + + self.chain_controller + .asynchronous_process_lonely_block(LonelyBlock { + block: Arc::new(unverified_block), + switch: None, + verify_callback: None, + }); + }); + } +} diff --git a/chain/src/lib.rs b/chain/src/lib.rs index 5898633b83..5ffd268222 100644 --- a/chain/src/lib.rs +++ b/chain/src/lib.rs @@ -1,3 +1,5 @@ +#![allow(missing_docs)] + //! CKB chain service. //! //! [`ChainService`] background base on database, handle block importing, @@ -5,7 +7,226 @@ //! //! [`ChainService`]: chain/struct.ChainService.html //! [`ChainController`]: chain/struct.ChainController.html +use ckb_error::Error; +use ckb_types::core::service::Request; +use ckb_types::core::{BlockNumber, BlockView, EpochNumber, HeaderView}; +use ckb_types::packed::Byte32; +use ckb_verification_traits::Switch; +use std::sync::Arc; -pub mod chain; +mod chain_controller; +mod chain_service; +pub mod consume_unverified; +mod init; +mod init_load_unverified; +mod orphan_broker; +mod preload_unverified_blocks_channel; #[cfg(test)] mod tests; +mod utils; + +pub use chain_controller::ChainController; +use ckb_logger::{error, info}; +use ckb_store::{ChainDB, ChainStore}; +use ckb_types::prelude::{Pack, Unpack}; +use ckb_types::{BlockNumberAndHash, H256}; +pub use init::start_chain_services; + +type ProcessBlockRequest = Request; +type TruncateRequest = Request>; + +/// VerifyResult is the result type to represent the result of block verification +/// +/// Ok(true) : it's a newly verified block +/// Ok(false): it's a block is a uncle block, not verified +/// Err(err) : it's a block which failed to verify +pub type VerifyResult = Result; + +/// VerifyCallback is the callback type to be called after block verification +pub type VerifyCallback = Box; + +/// RemoteBlock is received from ckb-sync and ckb-relayer +pub struct RemoteBlock { + /// block + pub block: Arc, + + /// Relayer and Synchronizer will have callback to ban peer + pub verify_callback: VerifyCallback, +} + +/// LonelyBlock is the block which we have not check weather its parent is stored yet +pub struct LonelyBlock { + /// block + pub block: Arc, + + /// The Switch to control the verification process + pub switch: Option, + + /// The optional verify_callback + pub verify_callback: Option, +} + +/// LonelyBlock is the block which we have not check weather its parent is stored yet +pub struct LonelyBlockHash { + /// block + pub block_number_and_hash: BlockNumberAndHash, + + pub parent_hash: Byte32, + + pub epoch_number: EpochNumber, + + /// The Switch to control the verification process + pub switch: Option, + + /// The optional verify_callback + pub verify_callback: Option, +} + +impl From for LonelyBlockHash { + fn from(val: LonelyBlock) -> Self { + let LonelyBlock { + block, + switch, + verify_callback, + } = val; + let block_hash_h256: H256 = block.hash().unpack(); + let block_number: BlockNumber = block.number(); + let parent_hash_h256: H256 = block.parent_hash().unpack(); + let block_hash = block_hash_h256.pack(); + let parent_hash = parent_hash_h256.pack(); + + let epoch_number: EpochNumber = block.epoch().number(); + + LonelyBlockHash { + block_number_and_hash: BlockNumberAndHash { + number: block_number, + hash: block_hash, + }, + parent_hash, + epoch_number, + switch, + verify_callback, + } + } +} + +impl LonelyBlockHash { + pub fn execute_callback(self, verify_result: VerifyResult) { + if let Some(verify_callback) = self.verify_callback { + verify_callback(verify_result); + } + } + + pub fn number_hash(&self) -> BlockNumberAndHash { + self.block_number_and_hash.clone() + } + + pub fn epoch_number(&self) -> EpochNumber { + self.epoch_number + } + + pub fn hash(&self) -> Byte32 { + self.block_number_and_hash.hash() + } + + pub fn parent_hash(&self) -> Byte32 { + self.parent_hash.clone() + } + + pub fn number(&self) -> BlockNumber { + self.block_number_and_hash.number() + } +} + +impl LonelyBlock { + pub(crate) fn block(&self) -> &Arc { + &self.block + } + + pub fn switch(&self) -> Option { + self.switch + } + + pub fn execute_callback(self, verify_result: VerifyResult) { + if let Some(verify_callback) = self.verify_callback { + verify_callback(verify_result); + } + } +} + +pub(crate) struct GlobalIndex { + pub(crate) number: BlockNumber, + pub(crate) hash: Byte32, + pub(crate) unseen: bool, +} + +impl GlobalIndex { + pub(crate) fn new(number: BlockNumber, hash: Byte32, unseen: bool) -> GlobalIndex { + GlobalIndex { + number, + hash, + unseen, + } + } + + pub(crate) fn forward(&mut self, hash: Byte32) { + self.number -= 1; + self.hash = hash; + } +} + +/// UnverifiedBlock will be consumed by ConsumeUnverified thread +struct UnverifiedBlock { + // block + block: Arc, + // the switch to control the verification process + switch: Option, + // verify callback + verify_callback: Option, + // parent header + parent_header: HeaderView, +} + +pub(crate) fn delete_unverified_block( + store: &ChainDB, + block_hash: Byte32, + block_number: BlockNumber, + parent_hash: Byte32, +) { + info!( + "parent: {}, deleting this block {}-{}", + parent_hash, block_number, block_hash, + ); + + let db_txn = store.begin_transaction(); + let block_op: Option = db_txn.get_block(&block_hash); + match block_op { + Some(block) => { + if let Err(err) = db_txn.delete_block(&block) { + error!( + "delete block {}-{} failed {:?}", + block_number, block_hash, err + ); + return; + } + if let Err(err) = db_txn.commit() { + error!( + "commit delete block {}-{} failed {:?}", + block_number, block_hash, err + ); + return; + } + + info!( + "parent: {}, deleted this block {}-{}", + parent_hash, block_number, block_hash, + ); + } + None => { + error!( + "want to delete block {}-{}, but it not found in db", + block_number, block_hash + ); + } + } +} diff --git a/chain/src/orphan_broker.rs b/chain/src/orphan_broker.rs new file mode 100644 index 0000000000..6becc7824d --- /dev/null +++ b/chain/src/orphan_broker.rs @@ -0,0 +1,217 @@ +#![allow(missing_docs)] + +use crate::utils::orphan_block_pool::{OrphanBlockPool, ParentHash}; +use crate::{delete_unverified_block, LonelyBlockHash, VerifyResult}; +use ckb_channel::Sender; +use ckb_error::InternalErrorKind; +use ckb_logger::internal::trace; +use ckb_logger::{debug, error, info}; +use ckb_shared::block_status::BlockStatus; +use ckb_shared::Shared; +use ckb_store::ChainStore; +use ckb_types::{packed::Byte32, U256}; +use dashmap::DashSet; +use std::sync::Arc; + +pub(crate) struct OrphanBroker { + shared: Shared, + + orphan_blocks_broker: Arc, + is_pending_verify: Arc>, + preload_unverified_tx: Sender, +} + +impl OrphanBroker { + pub(crate) fn new( + shared: Shared, + orphan_block_pool: Arc, + preload_unverified_tx: Sender, + is_pending_verify: Arc>, + ) -> OrphanBroker { + OrphanBroker { + shared, + orphan_blocks_broker: orphan_block_pool, + is_pending_verify, + preload_unverified_tx, + } + } + + fn search_orphan_leader(&self, leader_hash: ParentHash) { + let leader_status = self.shared.get_block_status(&leader_hash); + + if leader_status.eq(&BlockStatus::BLOCK_INVALID) { + let descendants: Vec = self + .orphan_blocks_broker + .remove_blocks_by_parent(&leader_hash); + for descendant in descendants { + self.process_invalid_block(descendant); + } + return; + } + + let leader_is_pending_verify = self.is_pending_verify.contains(&leader_hash); + if !leader_is_pending_verify && !leader_status.contains(BlockStatus::BLOCK_STORED) { + trace!( + "orphan leader: {} not stored {:?} and not in is_pending_verify: {}", + leader_hash, + leader_status, + leader_is_pending_verify + ); + return; + } + + let descendants: Vec = self + .orphan_blocks_broker + .remove_blocks_by_parent(&leader_hash); + if descendants.is_empty() { + error!( + "leader {} does not have any descendants, this shouldn't happen", + leader_hash + ); + return; + } + self.accept_descendants(descendants); + } + + fn search_orphan_leaders(&self) { + for leader_hash in self.orphan_blocks_broker.clone_leaders() { + self.search_orphan_leader(leader_hash); + } + } + + fn delete_block(&self, lonely_block: &LonelyBlockHash) { + let block_hash = lonely_block.block_number_and_hash.hash(); + let block_number = lonely_block.block_number_and_hash.number(); + let parent_hash = lonely_block.parent_hash(); + + delete_unverified_block(self.shared.store(), block_hash, block_number, parent_hash); + } + + fn process_invalid_block(&self, lonely_block: LonelyBlockHash) { + let block_hash = lonely_block.block_number_and_hash.hash(); + let block_number = lonely_block.block_number_and_hash.number(); + let parent_hash = lonely_block.parent_hash(); + + self.delete_block(&lonely_block); + + self.shared + .insert_block_status(block_hash.clone(), BlockStatus::BLOCK_INVALID); + + let err: VerifyResult = Err(InternalErrorKind::Other + .other(format!( + "parent {} is invalid, so block {}-{} is invalid too", + parent_hash, block_number, block_hash + )) + .into()); + lonely_block.execute_callback(err); + } + + pub(crate) fn process_lonely_block(&self, lonely_block: LonelyBlockHash) { + let block_hash = lonely_block.block_number_and_hash.hash(); + let block_number = lonely_block.block_number_and_hash.number(); + let parent_hash = lonely_block.parent_hash(); + let parent_is_pending_verify = self.is_pending_verify.contains(&parent_hash); + let parent_status = self.shared.get_block_status(&parent_hash); + if parent_is_pending_verify || parent_status.contains(BlockStatus::BLOCK_STORED) { + debug!( + "parent {} has stored: {:?} or is_pending_verify: {}, processing descendant directly {}-{}", + parent_hash, + parent_status, + parent_is_pending_verify, + block_number, + block_hash, + ); + self.process_descendant(lonely_block); + } else if parent_status.eq(&BlockStatus::BLOCK_INVALID) { + self.process_invalid_block(lonely_block); + } else { + self.orphan_blocks_broker.insert(lonely_block); + } + + self.search_orphan_leaders(); + + if let Some(metrics) = ckb_metrics::handle() { + metrics + .ckb_chain_orphan_count + .set(self.orphan_blocks_broker.len() as i64) + } + } + + pub(crate) fn clean_expired_orphans(&self) { + debug!("clean expired orphans"); + let tip_epoch_number = self + .shared + .store() + .get_tip_header() + .expect("tip header") + .epoch() + .number(); + let expired_orphans = self + .orphan_blocks_broker + .clean_expired_blocks(tip_epoch_number); + for expired_orphan in expired_orphans { + self.delete_block(&expired_orphan); + self.shared.remove_header_view(&expired_orphan.hash()); + self.shared.remove_block_status(&expired_orphan.hash()); + info!( + "cleaned expired orphan: {}-{}", + expired_orphan.number(), + expired_orphan.hash() + ); + } + } + + fn send_unverified_block(&self, lonely_block: LonelyBlockHash) { + let block_number = lonely_block.block_number_and_hash.number(); + let block_hash = lonely_block.block_number_and_hash.hash(); + + if let Some(metrics) = ckb_metrics::handle() { + metrics + .ckb_chain_preload_unverified_block_ch_len + .set(self.preload_unverified_tx.len() as i64) + } + + match self.preload_unverified_tx.send(lonely_block) { + Ok(_) => { + debug!( + "process desendant block success {}-{}", + block_number, block_hash + ); + } + Err(_) => { + info!("send unverified_block_tx failed, the receiver has been closed"); + return; + } + }; + if block_number > self.shared.snapshot().tip_number() { + self.shared.set_unverified_tip(ckb_shared::HeaderIndex::new( + block_number, + block_hash.clone(), + U256::from(0u64), + )); + + if let Some(handle) = ckb_metrics::handle() { + handle.ckb_chain_unverified_tip.set(block_number as i64); + } + debug!( + "set unverified_tip to {}-{}, while unverified_tip - verified_tip = {}", + block_number.clone(), + block_hash, + block_number.saturating_sub(self.shared.snapshot().tip_number()) + ) + } + } + + fn process_descendant(&self, lonely_block: LonelyBlockHash) { + self.is_pending_verify + .insert(lonely_block.block_number_and_hash.hash()); + + self.send_unverified_block(lonely_block) + } + + fn accept_descendants(&self, descendants: Vec) { + for descendant_block in descendants { + self.process_descendant(descendant_block); + } + } +} diff --git a/chain/src/preload_unverified_blocks_channel.rs b/chain/src/preload_unverified_blocks_channel.rs new file mode 100644 index 0000000000..23f593bd79 --- /dev/null +++ b/chain/src/preload_unverified_blocks_channel.rs @@ -0,0 +1,105 @@ +use crate::{LonelyBlockHash, UnverifiedBlock}; +use ckb_channel::{Receiver, Sender}; +use ckb_logger::{debug, error, info}; +use ckb_shared::Shared; +use ckb_store::ChainStore; +use crossbeam::select; +use std::sync::Arc; + +pub(crate) struct PreloadUnverifiedBlocksChannel { + shared: Shared, + preload_unverified_rx: Receiver, + + unverified_block_tx: Sender, + + stop_rx: Receiver<()>, +} + +impl PreloadUnverifiedBlocksChannel { + pub(crate) fn new( + shared: Shared, + preload_unverified_rx: Receiver, + unverified_block_tx: Sender, + stop_rx: Receiver<()>, + ) -> Self { + PreloadUnverifiedBlocksChannel { + shared, + preload_unverified_rx, + unverified_block_tx, + stop_rx, + } + } + + pub(crate) fn start(&self) { + loop { + select! { + recv(self.preload_unverified_rx) -> msg => match msg { + Ok(preload_unverified_block_task) =>{ + self.preload_unverified_channel(preload_unverified_block_task); + }, + Err(err) =>{ + error!("recv preload_task_rx failed, err: {:?}", err); + break; + } + }, + recv(self.stop_rx) -> _ => { + info!("preload_unverified_blocks thread received exit signal, exit now"); + break; + } + } + } + } + + fn preload_unverified_channel(&self, task: LonelyBlockHash) { + let block_number = task.block_number_and_hash.number(); + let block_hash = task.block_number_and_hash.hash(); + let unverified_block: UnverifiedBlock = self.load_full_unverified_block_by_hash(task); + + if let Some(metrics) = ckb_metrics::handle() { + metrics + .ckb_chain_unverified_block_ch_len + .set(self.unverified_block_tx.len() as i64) + }; + + if self.unverified_block_tx.send(unverified_block).is_err() { + info!( + "send unverified_block to unverified_block_tx failed, the receiver has been closed" + ); + } else { + debug!("preload unverified block {}-{}", block_number, block_hash,); + } + } + + fn load_full_unverified_block_by_hash(&self, task: LonelyBlockHash) -> UnverifiedBlock { + let _trace_timecost = ckb_metrics::handle() + .map(|metrics| metrics.ckb_chain_load_full_unverified_block.start_timer()); + + let LonelyBlockHash { + block_number_and_hash, + parent_hash, + epoch_number: _epoch_number, + switch, + verify_callback, + } = task; + + let block_view = self + .shared + .store() + .get_block(&block_number_and_hash.hash()) + .expect("block stored"); + let block = Arc::new(block_view); + let parent_header = { + self.shared + .store() + .get_block_header(&parent_hash) + .expect("parent header stored") + }; + + UnverifiedBlock { + block, + switch, + verify_callback, + parent_header, + } + } +} diff --git a/chain/src/tests/basic.rs b/chain/src/tests/basic.rs index e8ad1bf182..b1d2947a82 100644 --- a/chain/src/tests/basic.rs +++ b/chain/src/tests/basic.rs @@ -1,5 +1,5 @@ -use crate::chain::ChainController; use crate::tests::util::start_chain; +use crate::ChainController; use ckb_chain_spec::consensus::{Consensus, ConsensusBuilder}; use ckb_dao_utils::genesis_dao_data; use ckb_error::assert_error_eq; @@ -34,7 +34,7 @@ fn repeat_process_block() { let block = Arc::new(chain.blocks().last().unwrap().clone()); assert!(chain_controller - .internal_process_block(Arc::clone(&block), Switch::DISABLE_EXTENSION) + .blocking_process_block_with_switch(Arc::clone(&block), Switch::DISABLE_EXTENSION) .expect("process block ok")); assert_eq!( shared @@ -46,7 +46,7 @@ fn repeat_process_block() { ); assert!(!chain_controller - .internal_process_block(Arc::clone(&block), Switch::DISABLE_EXTENSION) + .blocking_process_block_with_switch(Arc::clone(&block), Switch::DISABLE_EXTENSION) .expect("process block ok")); assert_eq!( shared @@ -58,6 +58,59 @@ fn repeat_process_block() { ); } +#[test] +fn process_genesis_block() { + let tx = TransactionBuilder::default() + .witness(Script::default().into_witness()) + .input(CellInput::new(OutPoint::null(), 0)) + .outputs(vec![ + CellOutputBuilder::default() + .capacity(capacity_bytes!(100_000_000).pack()) + .build(); + 100 + ]) + .outputs_data(vec![Bytes::new(); 100].pack()) + .build(); + let always_success_tx = create_always_success_tx(); + + let dao = genesis_dao_data(vec![&tx, &always_success_tx]).unwrap(); + + let genesis_block = BlockBuilder::default() + .transaction(tx.clone()) + .transaction(always_success_tx.clone()) + .compact_target(difficulty_to_compact(U256::from(1000u64)).pack()) + .dao(dao.clone()) + .build(); + + let consensus = ConsensusBuilder::default() + .genesis_block(genesis_block) + .build(); + let (chain_controller, shared, _parent) = start_chain(Some(consensus)); + + let block = Arc::new(shared.consensus().genesis_block().clone()); + + let result = chain_controller.blocking_process_block(Arc::clone(&block)); + assert!(!result.expect("process block ok")); + assert_eq!( + shared + .store() + .get_block_ext(&block.header().hash()) + .unwrap() + .verified, + Some(true) + ); + + let different_genesis_block = BlockBuilder::default() + .transaction(tx) + .transaction(always_success_tx) + // Difficulty is changed here + .compact_target(difficulty_to_compact(U256::from(999u64)).pack()) + .dao(dao) + .build(); + let result = chain_controller.blocking_process_block(Arc::new(different_genesis_block)); + assert!(result.is_err()); +} + #[test] fn test_genesis_transaction_spend() { // let data: Vec = ; @@ -108,7 +161,7 @@ fn test_genesis_transaction_spend() { for block in &chain.blocks()[0..10] { assert!(chain_controller - .internal_process_block(Arc::new(block.clone()), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(block.clone()), Switch::DISABLE_ALL) .is_ok()); } @@ -165,7 +218,7 @@ fn test_transaction_spend_in_same_block() { for block in chain.blocks() { chain_controller - .internal_process_block( + .blocking_process_block_with_switch( Arc::new(block.clone()), Switch::DISABLE_EPOCH | Switch::DISABLE_EXTENSION, ) @@ -208,7 +261,7 @@ fn test_transaction_spend_in_same_block() { parent_number4, epoch.number_with_fraction(parent_number4), parent_hash4, - 2 + 2, )), mem_cell_data: None, mem_cell_data_hash: None, @@ -239,13 +292,13 @@ fn test_transaction_conflict_in_same_block() { for block in chain.blocks().iter().take(3) { chain_controller - .internal_process_block(Arc::new(block.clone()), Switch::DISABLE_EXTENSION) + .blocking_process_block_with_switch(Arc::new(block.clone()), Switch::DISABLE_EXTENSION) .expect("process block ok"); } assert_error_eq!( OutPointError::Dead(OutPoint::new(tx1_hash, 0)), chain_controller - .internal_process_block( + .blocking_process_block_with_switch( Arc::new(chain.blocks()[3].clone()), Switch::DISABLE_EXTENSION ) @@ -279,13 +332,13 @@ fn test_transaction_conflict_in_different_blocks() { for block in chain.blocks().iter().take(4) { chain_controller - .internal_process_block(Arc::new(block.clone()), Switch::DISABLE_EXTENSION) + .blocking_process_block_with_switch(Arc::new(block.clone()), Switch::DISABLE_EXTENSION) .expect("process block ok"); } assert_error_eq!( OutPointError::Unknown(OutPoint::new(tx1_hash, 0)), chain_controller - .internal_process_block( + .blocking_process_block_with_switch( Arc::new(chain.blocks()[4].clone()), Switch::DISABLE_EXTENSION ) @@ -316,13 +369,13 @@ fn test_invalid_out_point_index_in_same_block() { for block in chain.blocks().iter().take(3) { chain_controller - .internal_process_block(Arc::new(block.clone()), Switch::DISABLE_EXTENSION) + .blocking_process_block_with_switch(Arc::new(block.clone()), Switch::DISABLE_EXTENSION) .expect("process block ok"); } assert_error_eq!( OutPointError::Unknown(OutPoint::new(tx1_hash, 1)), chain_controller - .internal_process_block( + .blocking_process_block_with_switch( Arc::new(chain.blocks()[3].clone()), Switch::DISABLE_EXTENSION ) @@ -354,14 +407,14 @@ fn test_invalid_out_point_index_in_different_blocks() { for block in chain.blocks().iter().take(4) { chain_controller - .internal_process_block(Arc::new(block.clone()), Switch::DISABLE_EXTENSION) + .blocking_process_block_with_switch(Arc::new(block.clone()), Switch::DISABLE_EXTENSION) .expect("process block ok"); } assert_error_eq!( OutPointError::Unknown(OutPoint::new(tx1_hash, 1)), chain_controller - .internal_process_block( + .blocking_process_block_with_switch( Arc::new(chain.blocks()[4].clone()), Switch::DISABLE_EXTENSION ) @@ -426,13 +479,13 @@ fn test_chain_fork_by_total_difficulty() { for block in chain1.blocks() { chain_controller - .internal_process_block(Arc::new(block.clone()), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(block.clone()), Switch::DISABLE_ALL) .expect("process block ok"); } for block in chain2.blocks() { chain_controller - .internal_process_block(Arc::new(block.clone()), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(block.clone()), Switch::DISABLE_ALL) .expect("process block ok"); } assert_eq!( @@ -469,7 +522,7 @@ fn test_chain_fork_by_first_received() { for chain in vec![chain1.clone(), chain2.clone(), chain3.clone()] { for block in chain.blocks() { chain_controller - .internal_process_block(Arc::new(block.clone()), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(block.clone()), Switch::DISABLE_ALL) .expect("process block ok"); } } @@ -530,7 +583,7 @@ fn prepare_context_chain( .build(); chain_controller - .internal_process_block(Arc::new(new_block.clone()), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(new_block.clone()), Switch::DISABLE_ALL) .expect("process block ok"); chain1.push(new_block.clone()); mock_store.insert_block(&new_block, &epoch); @@ -570,7 +623,7 @@ fn prepare_context_chain( .build(); chain_controller - .internal_process_block(Arc::new(new_block.clone()), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(new_block.clone()), Switch::DISABLE_ALL) .expect("process block ok"); chain2.push(new_block.clone()); mock_store.insert_block(&new_block, &epoch); diff --git a/chain/src/tests/block_assembler.rs b/chain/src/tests/block_assembler.rs index d5f34c3188..caf0eb84ca 100644 --- a/chain/src/tests/block_assembler.rs +++ b/chain/src/tests/block_assembler.rs @@ -1,10 +1,10 @@ -use crate::chain::{ChainController, ChainService}; use crate::tests::util::dummy_network; +use crate::{start_chain_services, ChainController}; use ckb_app_config::BlockAssemblerConfig; use ckb_chain_spec::consensus::Consensus; use ckb_dao_utils::genesis_dao_data; use ckb_jsonrpc_types::ScriptHashType; -use ckb_shared::{Shared, SharedBuilder, Snapshot}; +use ckb_shared::{ChainServicesBuilder, Shared, SharedBuilder, Snapshot}; use ckb_store::ChainStore; use ckb_tx_pool::{block_assembler::CandidateUncles, PlugTarget, TxEntry}; use ckb_types::{ @@ -47,8 +47,13 @@ fn start_chain(consensus: Option) -> (ChainController, Shared) { let network = dummy_network(&shared); pack.take_tx_pool_builder().start(network); - let chain_service = ChainService::new(shared.clone(), pack.take_proposal_table()); - let chain_controller = chain_service.start::<&str>(None); + let chain_services_builder: ChainServicesBuilder = pack.take_chain_services_builder(); + let chain_controller: ChainController = start_chain_services(chain_services_builder); + + while chain_controller.is_verifying_unverified_blocks_on_startup() { + std::thread::sleep(std::time::Duration::from_millis(10)); + } + (chain_controller, shared) } @@ -142,7 +147,7 @@ fn test_block_template_timestamp() { let block = gen_block(&genesis, 0, &epoch); chain_controller - .internal_process_block(Arc::new(block.clone()), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(block.clone()), Switch::DISABLE_ALL) .unwrap(); let mut block_template = shared @@ -209,13 +214,13 @@ fn test_prepare_uncles() { let block1_1 = gen_block(&block0_1.header(), 10, &epoch); chain_controller - .internal_process_block(Arc::new(block0_1), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(block0_1), Switch::DISABLE_ALL) .unwrap(); chain_controller - .internal_process_block(Arc::new(block0_0.clone()), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(block0_0.clone()), Switch::DISABLE_ALL) .unwrap(); chain_controller - .internal_process_block(Arc::new(block1_1.clone()), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(block1_1.clone()), Switch::DISABLE_ALL) .unwrap(); let mut block_template = shared @@ -239,7 +244,7 @@ fn test_prepare_uncles() { let block2_1 = gen_block(&block1_1.header(), 10, &epoch); chain_controller - .internal_process_block(Arc::new(block2_1.clone()), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(block2_1.clone()), Switch::DISABLE_ALL) .unwrap(); let mut block_template = shared @@ -263,7 +268,7 @@ fn test_prepare_uncles() { let block3_1 = gen_block(&block2_1.header(), 10, &epoch); chain_controller - .internal_process_block(Arc::new(block3_1), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(block3_1), Switch::DISABLE_ALL) .unwrap(); let mut block_template = shared @@ -282,6 +287,8 @@ fn test_prepare_uncles() { #[test] fn test_candidate_uncles_retain() { + let _log_guard = ckb_logger_service::init_for_test("debug").expect("init log"); + let mut consensus = Consensus::default(); consensus.genesis_epoch_ext.set_length(5); let epoch = consensus.genesis_epoch_ext().clone(); @@ -299,13 +306,13 @@ fn test_candidate_uncles_retain() { let block1_1 = gen_block(&block0_1.header(), 10, &epoch); chain_controller - .internal_process_block(Arc::new(block0_1), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(block0_1), Switch::DISABLE_ALL) .unwrap(); chain_controller - .internal_process_block(Arc::new(block0_0.clone()), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(block0_0.clone()), Switch::DISABLE_ALL) .unwrap(); chain_controller - .internal_process_block(Arc::new(block1_1.clone()), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(block1_1.clone()), Switch::DISABLE_ALL) .unwrap(); candidate_uncles.insert(block0_0.as_uncle()); @@ -326,7 +333,7 @@ fn test_candidate_uncles_retain() { let block2_0 = gen_block(&block1_0.header(), 13, &epoch); for block in vec![block1_0, block2_0.clone()] { chain_controller - .internal_process_block(Arc::new(block), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(block), Switch::DISABLE_ALL) .unwrap(); } @@ -346,7 +353,7 @@ fn test_candidate_uncles_retain() { let block3_0 = gen_block(&block2_0.header(), 10, &epoch); chain_controller - .internal_process_block(Arc::new(block3_0.clone()), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(block3_0.clone()), Switch::DISABLE_ALL) .unwrap(); { @@ -413,7 +420,7 @@ fn test_package_basic() { for _i in 0..4 { let block = gen_block(&parent_header, 11, &epoch); chain_controller - .internal_process_block(Arc::new(block.clone()), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(block.clone()), Switch::DISABLE_ALL) .expect("process block"); parent_header = block.header().to_owned(); blocks.push(block); @@ -520,7 +527,7 @@ fn test_package_multi_best_scores() { for _i in 0..4 { let block = gen_block(&parent_header, 11, &epoch); chain_controller - .internal_process_block(Arc::new(block.clone()), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(block.clone()), Switch::DISABLE_ALL) .expect("process block"); parent_header = block.header().to_owned(); blocks.push(block); @@ -621,6 +628,8 @@ fn test_package_multi_best_scores() { #[test] fn test_package_low_fee_descendants() { + let _log_guard = ckb_logger_service::init_for_test("debug").expect("init log"); + let mut consensus = Consensus::default(); consensus.genesis_epoch_ext.set_length(5); let epoch = consensus.genesis_epoch_ext().clone(); @@ -636,7 +645,7 @@ fn test_package_low_fee_descendants() { for _i in 0..4 { let block = gen_block(&parent_header, 11, &epoch); chain_controller - .internal_process_block(Arc::new(block.clone()), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(block.clone()), Switch::DISABLE_ALL) .expect("process block"); parent_header = block.header().to_owned(); blocks.push(block); diff --git a/chain/src/tests/delay_verify.rs b/chain/src/tests/delay_verify.rs index 77ed3780b7..bd36fa558f 100644 --- a/chain/src/tests/delay_verify.rs +++ b/chain/src/tests/delay_verify.rs @@ -46,7 +46,7 @@ fn test_dead_cell_in_same_block() { for block in chain1.blocks() { chain_controller - .internal_process_block( + .blocking_process_block_with_switch( Arc::new(block.clone()), Switch::DISABLE_EPOCH | Switch::DISABLE_EXTENSION, ) @@ -55,7 +55,7 @@ fn test_dead_cell_in_same_block() { for block in chain2.blocks().iter().take(switch_fork_number + 1) { chain_controller - .internal_process_block( + .blocking_process_block_with_switch( Arc::new(block.clone()), Switch::DISABLE_EPOCH | Switch::DISABLE_EXTENSION, ) @@ -65,7 +65,7 @@ fn test_dead_cell_in_same_block() { assert_error_eq!( OutPointError::Dead(OutPoint::new(tx1_hash, 0)), chain_controller - .internal_process_block( + .blocking_process_block_with_switch( Arc::new(chain2.blocks()[switch_fork_number + 1].clone()), Switch::DISABLE_EPOCH | Switch::DISABLE_EXTENSION, ) @@ -107,7 +107,7 @@ fn test_dead_cell_in_different_block() { for block in chain1.blocks() { chain_controller - .internal_process_block( + .blocking_process_block_with_switch( Arc::new(block.clone()), Switch::DISABLE_EPOCH | Switch::DISABLE_EXTENSION, ) @@ -116,7 +116,7 @@ fn test_dead_cell_in_different_block() { for block in chain2.blocks().iter().take(switch_fork_number + 2) { chain_controller - .internal_process_block( + .blocking_process_block_with_switch( Arc::new(block.clone()), Switch::DISABLE_EPOCH | Switch::DISABLE_EXTENSION, ) @@ -126,7 +126,7 @@ fn test_dead_cell_in_different_block() { assert_error_eq!( OutPointError::Unknown(OutPoint::new(tx1_hash, 0)), chain_controller - .internal_process_block( + .blocking_process_block_with_switch( Arc::new(chain2.blocks()[switch_fork_number + 2].clone()), Switch::DISABLE_EPOCH | Switch::DISABLE_EXTENSION, ) @@ -169,7 +169,7 @@ fn test_invalid_out_point_index_in_same_block() { for block in chain1.blocks() { chain_controller - .internal_process_block( + .blocking_process_block_with_switch( Arc::new(block.clone()), Switch::DISABLE_EPOCH | Switch::DISABLE_EXTENSION, ) @@ -178,7 +178,7 @@ fn test_invalid_out_point_index_in_same_block() { for block in chain2.blocks().iter().take(switch_fork_number + 1) { chain_controller - .internal_process_block( + .blocking_process_block_with_switch( Arc::new(block.clone()), Switch::DISABLE_EPOCH | Switch::DISABLE_EXTENSION, ) @@ -188,7 +188,7 @@ fn test_invalid_out_point_index_in_same_block() { assert_error_eq!( OutPointError::Unknown(OutPoint::new(tx1_hash, 1)), chain_controller - .internal_process_block( + .blocking_process_block_with_switch( Arc::new(chain2.blocks()[switch_fork_number + 1].clone()), Switch::DISABLE_EPOCH | Switch::DISABLE_EXTENSION, ) @@ -232,7 +232,7 @@ fn test_invalid_out_point_index_in_different_blocks() { for block in chain1.blocks() { chain_controller - .internal_process_block( + .blocking_process_block_with_switch( Arc::new(block.clone()), Switch::DISABLE_EPOCH | Switch::DISABLE_EXTENSION, ) @@ -241,7 +241,7 @@ fn test_invalid_out_point_index_in_different_blocks() { for block in chain2.blocks().iter().take(switch_fork_number + 2) { chain_controller - .internal_process_block( + .blocking_process_block_with_switch( Arc::new(block.clone()), Switch::DISABLE_EPOCH | Switch::DISABLE_EXTENSION, ) @@ -251,7 +251,7 @@ fn test_invalid_out_point_index_in_different_blocks() { assert_error_eq!( OutPointError::Unknown(OutPoint::new(tx1_hash, 1)), chain_controller - .internal_process_block( + .blocking_process_block_with_switch( Arc::new(chain2.blocks()[switch_fork_number + 2].clone()), Switch::DISABLE_EPOCH | Switch::DISABLE_EXTENSION, ) @@ -295,7 +295,7 @@ fn test_full_dead_transaction() { .build(); chain_controller - .internal_process_block( + .blocking_process_block_with_switch( Arc::new(block.clone()), Switch::DISABLE_EPOCH | Switch::DISABLE_EXTENSION, ) @@ -373,7 +373,7 @@ fn test_full_dead_transaction() { .build() }; chain_controller - .internal_process_block( + .blocking_process_block_with_switch( Arc::new(new_block.clone()), Switch::DISABLE_EPOCH | Switch::DISABLE_EXTENSION, ) @@ -456,7 +456,7 @@ fn test_full_dead_transaction() { .build() }; chain_controller - .internal_process_block( + .blocking_process_block_with_switch( Arc::new(new_block.clone()), Switch::DISABLE_EPOCH | Switch::DISABLE_EXTENSION, ) @@ -528,7 +528,7 @@ fn test_full_dead_transaction() { .build() }; chain_controller - .internal_process_block( + .blocking_process_block_with_switch( Arc::new(new_block.clone()), Switch::DISABLE_EPOCH | Switch::DISABLE_EXTENSION, ) diff --git a/chain/src/tests/dep_cell.rs b/chain/src/tests/dep_cell.rs index cac812d6ae..64e3fbe7d4 100644 --- a/chain/src/tests/dep_cell.rs +++ b/chain/src/tests/dep_cell.rs @@ -152,7 +152,7 @@ fn test_package_txs_with_deps() { ) .build(); chain_controller - .internal_process_block(Arc::new(block), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(block), Switch::DISABLE_ALL) .unwrap(); } @@ -168,7 +168,7 @@ fn test_package_txs_with_deps() { let block: Block = block_template.clone().into(); let block = block.as_advanced_builder().build(); chain_controller - .internal_process_block(Arc::new(block), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(block), Switch::DISABLE_ALL) .unwrap(); } @@ -298,7 +298,7 @@ fn test_package_txs_with_deps_unstable_sort() { ) .build(); chain_controller - .internal_process_block(Arc::new(block), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(block), Switch::DISABLE_ALL) .unwrap(); } @@ -314,7 +314,7 @@ fn test_package_txs_with_deps_unstable_sort() { let block: Block = block_template.clone().into(); let block = block.as_advanced_builder().build(); chain_controller - .internal_process_block(Arc::new(block), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(block), Switch::DISABLE_ALL) .unwrap(); } @@ -437,7 +437,7 @@ fn test_package_txs_with_deps2() { ) .build(); chain_controller - .internal_process_block(Arc::new(block), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(block), Switch::DISABLE_ALL) .unwrap(); } // skip gap @@ -452,7 +452,7 @@ fn test_package_txs_with_deps2() { let block: Block = block_template.clone().into(); let block = block.as_advanced_builder().build(); chain_controller - .internal_process_block(Arc::new(block), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(block), Switch::DISABLE_ALL) .unwrap(); } @@ -562,7 +562,7 @@ fn test_package_txs_with_deps_priority() { let block: Block = block_template.clone().into(); let block = block.as_advanced_builder().build(); chain_controller - .internal_process_block(Arc::new(block), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(block), Switch::DISABLE_ALL) .unwrap(); } @@ -578,7 +578,7 @@ fn test_package_txs_with_deps_priority() { let block: Block = block_template.clone().into(); let block = block.as_advanced_builder().build(); chain_controller - .internal_process_block(Arc::new(block), Switch::DISABLE_ALL) + .blocking_process_block_with_switch(Arc::new(block), Switch::DISABLE_ALL) .unwrap(); } diff --git a/chain/src/tests/find_fork.rs b/chain/src/tests/find_fork.rs index eb1f4208a8..93fa67f118 100644 --- a/chain/src/tests/find_fork.rs +++ b/chain/src/tests/find_fork.rs @@ -1,5 +1,8 @@ -use crate::chain::{ChainService, ForkChanges}; +use crate::consume_unverified::ConsumeUnverifiedBlockProcessor; +use crate::utils::forkchanges::ForkChanges; +use crate::{start_chain_services, UnverifiedBlock}; use ckb_chain_spec::consensus::{Consensus, ProposalWindow}; +use ckb_proposal_table::ProposalTable; use ckb_shared::SharedBuilder; use ckb_store::ChainStore; use ckb_systemtime::unix_time_as_millis; @@ -11,9 +14,31 @@ use ckb_types::{ U256, }; use ckb_verification_traits::Switch; +use dashmap::DashSet; use std::collections::HashSet; use std::sync::Arc; +fn process_block( + consume_unverified_block_processor: &mut ConsumeUnverifiedBlockProcessor, + blk: &BlockView, + switch: Switch, +) { + let store = consume_unverified_block_processor.shared.store(); + let db_txn = store.begin_transaction(); + db_txn.insert_block(blk).unwrap(); + db_txn.commit().unwrap(); + + let parent_header = store.get_block_header(&blk.parent_hash()).unwrap(); + let unverified_block = UnverifiedBlock { + block: Arc::new(blk.to_owned()), + switch: Some(switch), + verify_callback: None, + parent_header, + }; + + consume_unverified_block_processor.consume_unverified_blocks(unverified_block); +} + // 0--1--2--3--4 // \ // \ @@ -21,8 +46,10 @@ use std::sync::Arc; #[test] fn test_find_fork_case1() { let builder = SharedBuilder::with_temp_db(); - let (shared, mut pack) = builder.consensus(Consensus::default()).build().unwrap(); - let mut chain_service = ChainService::new(shared.clone(), pack.take_proposal_table()); + let consensus = Consensus::default(); + let proposal_table = ProposalTable::new(consensus.tx_proposal_window()); + let (shared, mut _pack) = builder.consensus(consensus).build().unwrap(); + let genesis = shared .store() .get_block_header(&shared.store().get_block_hash(0).unwrap()) @@ -40,18 +67,32 @@ fn test_find_fork_case1() { fork2.gen_empty_block_with_diff(90u64, &mock_store); } + let is_pending_verify = Arc::new(DashSet::new()); + + let mut consume_unverified_block_processor = ConsumeUnverifiedBlockProcessor { + shared: shared.clone(), + is_pending_verify, + proposal_table, + }; + // fork1 total_difficulty 400 for blk in fork1.blocks() { - chain_service - .process_block(Arc::new(blk.clone()), Switch::DISABLE_ALL) - .unwrap(); + println!("proceb1, fork1 block: {}-{}", blk.number(), blk.hash()); + process_block( + &mut consume_unverified_block_processor, + blk, + Switch::DISABLE_ALL, + ); } // fork2 total_difficulty 270 for blk in fork2.blocks() { - chain_service - .process_block(Arc::new(blk.clone()), Switch::DISABLE_ALL) - .unwrap(); + println!("procb2, fork1 block: {}-{}", blk.number(), blk.hash()); + process_block( + &mut consume_unverified_block_processor, + blk, + Switch::DISABLE_ALL, + ); } let tip_number = { shared.snapshot().tip_number() }; @@ -72,7 +113,7 @@ fn test_find_fork_case1() { let mut fork = ForkChanges::default(); - chain_service.find_fork(&mut fork, tip_number, fork2.tip(), ext); + consume_unverified_block_processor.find_fork(&mut fork, tip_number, fork2.tip(), ext); let detached_blocks: HashSet = fork1.blocks().clone().into_iter().collect(); let attached_blocks: HashSet = fork2.blocks().clone().into_iter().collect(); @@ -93,8 +134,8 @@ fn test_find_fork_case1() { #[test] fn test_find_fork_case2() { let builder = SharedBuilder::with_temp_db(); - let (shared, mut pack) = builder.consensus(Consensus::default()).build().unwrap(); - let mut chain_service = ChainService::new(shared.clone(), pack.take_proposal_table()); + let consensus = Consensus::default(); + let (shared, _pack) = builder.consensus(consensus.clone()).build().unwrap(); let genesis = shared .store() @@ -111,19 +152,29 @@ fn test_find_fork_case2() { for _ in 0..2 { fork2.gen_empty_block_with_diff(90u64, &mock_store); } + let proposal_table = ProposalTable::new(consensus.tx_proposal_window()); + let mut consume_unverified_block_processor = ConsumeUnverifiedBlockProcessor { + shared: shared.clone(), + is_pending_verify: Arc::new(DashSet::new()), + proposal_table, + }; // fork1 total_difficulty 400 for blk in fork1.blocks() { - chain_service - .process_block(Arc::new(blk.clone()), Switch::DISABLE_ALL) - .unwrap(); + process_block( + &mut consume_unverified_block_processor, + blk, + Switch::DISABLE_ALL, + ); } // fork2 total_difficulty 280 for blk in fork2.blocks() { - chain_service - .process_block(Arc::new(blk.clone()), Switch::DISABLE_ALL) - .unwrap(); + process_block( + &mut consume_unverified_block_processor, + blk, + Switch::DISABLE_ALL, + ); } let tip_number = { shared.snapshot().tip_number() }; @@ -144,7 +195,7 @@ fn test_find_fork_case2() { let mut fork = ForkChanges::default(); - chain_service.find_fork(&mut fork, tip_number, fork2.tip(), ext); + consume_unverified_block_processor.find_fork(&mut fork, tip_number, fork2.tip(), ext); let detached_blocks: HashSet = fork1.blocks()[1..].iter().cloned().collect(); let attached_blocks: HashSet = fork2.blocks().clone().into_iter().collect(); @@ -165,8 +216,8 @@ fn test_find_fork_case2() { #[test] fn test_find_fork_case3() { let builder = SharedBuilder::with_temp_db(); - let (shared, mut pack) = builder.consensus(Consensus::default()).build().unwrap(); - let mut chain_service = ChainService::new(shared.clone(), pack.take_proposal_table()); + let consensus = Consensus::default(); + let (shared, _pack) = builder.consensus(consensus.clone()).build().unwrap(); let genesis = shared .store() @@ -184,19 +235,28 @@ fn test_find_fork_case3() { for _ in 0..5 { fork2.gen_empty_block_with_diff(40u64, &mock_store) } - + let proposal_table = ProposalTable::new(consensus.tx_proposal_window()); + let mut consume_unverified_block_processor = ConsumeUnverifiedBlockProcessor { + shared: shared.clone(), + is_pending_verify: Arc::new(DashSet::new()), + proposal_table, + }; // fork1 total_difficulty 240 for blk in fork1.blocks() { - chain_service - .process_block(Arc::new(blk.clone()), Switch::DISABLE_ALL) - .unwrap(); + process_block( + &mut consume_unverified_block_processor, + blk, + Switch::DISABLE_ALL, + ); } // fork2 total_difficulty 200 for blk in fork2.blocks() { - chain_service - .process_block(Arc::new(blk.clone()), Switch::DISABLE_ALL) - .unwrap(); + process_block( + &mut consume_unverified_block_processor, + blk, + Switch::DISABLE_ALL, + ); } let tip_number = { shared.snapshot().tip_number() }; @@ -216,7 +276,7 @@ fn test_find_fork_case3() { }; let mut fork = ForkChanges::default(); - chain_service.find_fork(&mut fork, tip_number, fork2.tip(), ext); + consume_unverified_block_processor.find_fork(&mut fork, tip_number, fork2.tip(), ext); let detached_blocks: HashSet = fork1.blocks().clone().into_iter().collect(); let attached_blocks: HashSet = fork2.blocks().clone().into_iter().collect(); @@ -237,8 +297,8 @@ fn test_find_fork_case3() { #[test] fn test_find_fork_case4() { let builder = SharedBuilder::with_temp_db(); - let (shared, mut pack) = builder.consensus(Consensus::default()).build().unwrap(); - let mut chain_service = ChainService::new(shared.clone(), pack.take_proposal_table()); + let consensus = Consensus::default(); + let (shared, _pack) = builder.consensus(consensus.clone()).build().unwrap(); let genesis = shared .store() @@ -256,19 +316,29 @@ fn test_find_fork_case4() { for _ in 0..2 { fork2.gen_empty_block_with_diff(80u64, &mock_store); } + let proposal_table = ProposalTable::new(consensus.tx_proposal_window()); + let mut consume_unverified_block_processor = ConsumeUnverifiedBlockProcessor { + shared: shared.clone(), + is_pending_verify: Arc::new(DashSet::new()), + proposal_table, + }; // fork1 total_difficulty 200 for blk in fork1.blocks() { - chain_service - .process_block(Arc::new(blk.clone()), Switch::DISABLE_ALL) - .unwrap(); + process_block( + &mut consume_unverified_block_processor, + blk, + Switch::DISABLE_ALL, + ); } // fork2 total_difficulty 160 for blk in fork2.blocks() { - chain_service - .process_block(Arc::new(blk.clone()), Switch::DISABLE_ALL) - .unwrap(); + process_block( + &mut consume_unverified_block_processor, + blk, + Switch::DISABLE_ALL, + ); } let tip_number = { shared.snapshot().tip_number() }; @@ -289,7 +359,7 @@ fn test_find_fork_case4() { let mut fork = ForkChanges::default(); - chain_service.find_fork(&mut fork, tip_number, fork2.tip(), ext); + consume_unverified_block_processor.find_fork(&mut fork, tip_number, fork2.tip(), ext); let detached_blocks: HashSet = fork1.blocks().clone().into_iter().collect(); let attached_blocks: HashSet = fork2.blocks().clone().into_iter().collect(); @@ -306,8 +376,9 @@ fn test_find_fork_case4() { // this case is create for issues from https://github.com/nervosnetwork/ckb/pull/1470 #[test] fn repeatedly_switch_fork() { - let (shared, _) = SharedBuilder::with_temp_db() - .consensus(Consensus::default()) + let consensus = Consensus::default(); + let (shared, mut pack) = SharedBuilder::with_temp_db() + .consensus(consensus.clone()) .build() .unwrap(); let genesis = shared @@ -318,11 +389,7 @@ fn repeatedly_switch_fork() { let mut fork1 = MockChain::new(genesis.clone(), shared.consensus()); let mut fork2 = MockChain::new(genesis, shared.consensus()); - let (shared, mut pack) = SharedBuilder::with_temp_db() - .consensus(Consensus::default()) - .build() - .unwrap(); - let mut chain_service = ChainService::new(shared.clone(), pack.take_proposal_table()); + let chain_controller = start_chain_services(pack.take_chain_services_builder()); for _ in 0..2 { fork1.gen_empty_block_with_nonce(1u128, &mock_store); @@ -331,17 +398,27 @@ fn repeatedly_switch_fork() { for _ in 0..2 { fork2.gen_empty_block_with_nonce(2u128, &mock_store); } + let proposal_table = ProposalTable::new(consensus.tx_proposal_window()); + let mut consume_unverified_block_processor = ConsumeUnverifiedBlockProcessor { + shared: shared.clone(), + is_pending_verify: Arc::new(DashSet::new()), + proposal_table, + }; for blk in fork1.blocks() { - chain_service - .process_block(Arc::new(blk.clone()), Switch::DISABLE_ALL) - .unwrap(); + process_block( + &mut consume_unverified_block_processor, + blk, + Switch::DISABLE_ALL, + ); } for blk in fork2.blocks() { - chain_service - .process_block(Arc::new(blk.clone()), Switch::DISABLE_ALL) - .unwrap(); + process_block( + &mut consume_unverified_block_processor, + blk, + Switch::DISABLE_ALL, + ); } //switch fork1 @@ -360,8 +437,8 @@ fn repeatedly_switch_fork() { .nonce(1u128.pack()) .uncle(uncle) .build(); - chain_service - .process_block(Arc::new(new_block1.clone()), Switch::DISABLE_ALL) + chain_controller + .blocking_process_block_with_switch(Arc::new(new_block1.clone()), Switch::DISABLE_ALL) .unwrap(); //switch fork2 @@ -379,8 +456,8 @@ fn repeatedly_switch_fork() { .nonce(2u128.pack()) .build(); parent = new_block2.clone(); - chain_service - .process_block(Arc::new(new_block2), Switch::DISABLE_ALL) + chain_controller + .blocking_process_block_with_switch(Arc::new(new_block2), Switch::DISABLE_ALL) .unwrap(); let epoch = shared .consensus() @@ -394,8 +471,8 @@ fn repeatedly_switch_fork() { .epoch(epoch.number_with_fraction(parent.number() + 1).pack()) .nonce(2u128.pack()) .build(); - chain_service - .process_block(Arc::new(new_block3), Switch::DISABLE_ALL) + chain_controller + .blocking_process_block_with_switch(Arc::new(new_block3), Switch::DISABLE_ALL) .unwrap(); //switch fork1 @@ -412,8 +489,8 @@ fn repeatedly_switch_fork() { .epoch(epoch.number_with_fraction(parent.number() + 1).pack()) .nonce(1u128.pack()) .build(); - chain_service - .process_block(Arc::new(new_block4.clone()), Switch::DISABLE_ALL) + chain_controller + .blocking_process_block_with_switch(Arc::new(new_block4.clone()), Switch::DISABLE_ALL) .unwrap(); parent = new_block4; @@ -429,8 +506,8 @@ fn repeatedly_switch_fork() { .epoch(epoch.number_with_fraction(parent.number() + 1).pack()) .nonce(1u128.pack()) .build(); - chain_service - .process_block(Arc::new(new_block5), Switch::DISABLE_ALL) + chain_controller + .blocking_process_block_with_switch(Arc::new(new_block5), Switch::DISABLE_ALL) .unwrap(); } @@ -448,7 +525,7 @@ fn test_fork_proposal_table() { }; let (shared, mut pack) = builder.consensus(consensus).build().unwrap(); - let mut chain_service = ChainService::new(shared.clone(), pack.take_proposal_table()); + let chain_controller = start_chain_services(pack.take_chain_services_builder()); let genesis = shared .store() @@ -466,8 +543,8 @@ fn test_fork_proposal_table() { } for blk in mock.blocks() { - chain_service - .process_block(Arc::new(blk.clone()), Switch::DISABLE_ALL) + chain_controller + .blocking_process_block_with_switch(Arc::new(blk.clone()), Switch::DISABLE_ALL) .unwrap(); } @@ -483,8 +560,8 @@ fn test_fork_proposal_table() { } for blk in mock.blocks().iter().skip(3) { - chain_service - .process_block(Arc::new(blk.clone()), Switch::DISABLE_ALL) + chain_controller + .blocking_process_block_with_switch(Arc::new(blk.clone()), Switch::DISABLE_ALL) .unwrap(); } @@ -495,7 +572,7 @@ fn test_fork_proposal_table() { assert_eq!( &vec![ packed::ProposalShortId::new([0u8, 0, 0, 0, 0, 0, 0, 0, 0, 3]), - packed::ProposalShortId::new([1u8, 0, 0, 0, 0, 0, 0, 0, 0, 4]) + packed::ProposalShortId::new([1u8, 0, 0, 0, 0, 0, 0, 0, 0, 4]), ] .into_iter() .collect::>(), diff --git a/chain/src/tests/load_code_with_snapshot.rs b/chain/src/tests/load_code_with_snapshot.rs index 42db84283d..9a51e6e82b 100644 --- a/chain/src/tests/load_code_with_snapshot.rs +++ b/chain/src/tests/load_code_with_snapshot.rs @@ -113,7 +113,7 @@ fn test_load_code() { let tx_status = tx_pool.get_tx_status(tx.hash()); assert_eq!( tx_status.unwrap().unwrap(), - (TxStatus::Pending, Some(11174)) + (TxStatus::Pending, Some(11325)) ); } diff --git a/chain/src/tests/mod.rs b/chain/src/tests/mod.rs index cafc0d6a57..ea5909c044 100644 --- a/chain/src/tests/mod.rs +++ b/chain/src/tests/mod.rs @@ -8,6 +8,7 @@ mod load_code_with_snapshot; mod load_input_cell_data; mod load_input_data_hash_cell; mod non_contextual_block_txs_verify; +mod orphan_block_pool; mod reward; mod truncate; mod uncle; diff --git a/chain/src/tests/non_contextual_block_txs_verify.rs b/chain/src/tests/non_contextual_block_txs_verify.rs index b8317363a3..68178658d8 100644 --- a/chain/src/tests/non_contextual_block_txs_verify.rs +++ b/chain/src/tests/non_contextual_block_txs_verify.rs @@ -156,7 +156,7 @@ fn non_contextual_block_txs_verify() { let block = gen_block(&parent, vec![tx0, tx1], &shared, &mock_store); - let ret = chain_controller.process_block(Arc::new(block)); + let ret = chain_controller.blocking_process_block(Arc::new(block)); assert!(ret.is_err()); assert_eq!( format!("{}", ret.err().unwrap()), diff --git a/chain/src/tests/orphan_block_pool.rs b/chain/src/tests/orphan_block_pool.rs new file mode 100644 index 0000000000..bc0ba1ceb9 --- /dev/null +++ b/chain/src/tests/orphan_block_pool.rs @@ -0,0 +1,260 @@ +#![allow(dead_code)] +use crate::tests::util::start_chain; +use crate::{LonelyBlock, LonelyBlockHash}; +use ckb_chain_spec::consensus::ConsensusBuilder; +use ckb_systemtime::unix_time_as_millis; +use ckb_types::core::{BlockBuilder, EpochNumberWithFraction, HeaderView}; +use ckb_types::packed::Byte32; +use ckb_types::prelude::*; +use std::collections::HashSet; +use std::sync::Arc; +use std::thread; + +use crate::utils::orphan_block_pool::OrphanBlockPool; + +fn gen_lonely_block(parent_header: &HeaderView) -> LonelyBlock { + let number = parent_header.number() + 1; + let block = BlockBuilder::default() + .parent_hash(parent_header.hash()) + .timestamp(unix_time_as_millis().pack()) + .number(number.pack()) + .epoch(EpochNumberWithFraction::new(number / 1000, number % 1000, 1000).pack()) + .nonce((parent_header.nonce() + 1).pack()) + .build(); + LonelyBlock { + block: Arc::new(block), + switch: None, + verify_callback: None, + } +} + +fn assert_leaders_have_children(pool: &OrphanBlockPool) { + for leader in pool.clone_leaders() { + let children = pool.remove_blocks_by_parent(&leader); + assert!(!children.is_empty()); + // `remove_blocks_by_parent` will remove all children from the pool, + // so we need to put them back here. + for child in children { + pool.insert(child); + } + } +} + +fn assert_blocks_are_sorted(blocks: &[LonelyBlockHash]) { + let mut parent_hash = blocks[0].parent_hash(); + let mut windows = blocks.windows(2); + // Orphans are sorted in a breadth-first search manner. We iterate through them and + // check that this is the case. + // The `parent_or_sibling` may be a sibling or child of current `parent_hash`, + // and `child_or_sibling` may be a sibling or child of `parent_or_sibling`. + while let Some([parent_or_sibling, child_or_sibling]) = windows.next() { + // `parent_or_sibling` is a child of the block with current `parent_hash`. + // Make `parent_or_sibling`'s parent the current `parent_hash`. + if parent_or_sibling.parent_hash() != parent_hash { + parent_hash = parent_or_sibling.parent_hash(); + } + + // If `child_or_sibling`'s parent is not the current `parent_hash`, i.e. it is not a sibling of + // `parent_or_sibling`, then it must be a child of `parent_or_sibling`. + if child_or_sibling.parent_hash() != parent_hash { + assert_eq!(child_or_sibling.parent_hash(), parent_or_sibling.hash()); + // Move `parent_hash` forward. + parent_hash = child_or_sibling.parent_hash(); + } + } +} + +#[test] +fn test_remove_blocks_by_parent() { + let consensus = ConsensusBuilder::default().build(); + let block_number = 200; + let mut blocks = Vec::new(); + let mut parent = consensus.genesis_block().header(); + let pool = OrphanBlockPool::with_capacity(200); + for _ in 1..block_number { + let lonely_block = gen_lonely_block(&parent); + let new_block_clone = Arc::clone(lonely_block.block()); + let new_block = LonelyBlock { + block: Arc::clone(&new_block_clone), + switch: None, + verify_callback: None, + }; + blocks.push(new_block_clone); + + parent = new_block.block().header(); + pool.insert(new_block.into()); + } + + let orphan = pool.remove_blocks_by_parent(&consensus.genesis_block().hash()); + + assert_eq!(orphan[0].parent_hash(), consensus.genesis_block().hash()); + assert_blocks_are_sorted(orphan.as_slice()); + + let orphan_set: HashSet<_> = orphan.into_iter().map(|b| b.hash()).collect(); + let blocks_set: HashSet<_> = blocks.into_iter().map(|b| b.hash()).collect(); + assert_eq!(orphan_set, blocks_set) +} + +#[test] +fn test_remove_blocks_by_parent_and_get_block_should_not_deadlock() { + let consensus = ConsensusBuilder::default().build(); + + let (_chain_controller, shared, _parent) = start_chain(Some(consensus.clone())); + + let pool = OrphanBlockPool::with_capacity(1024); + let mut header = consensus.genesis_block().header(); + let mut hashes = Vec::new(); + for _ in 1..1024 { + let lonely_block = gen_lonely_block(&header); + let new_block = lonely_block.block(); + let new_block_clone = LonelyBlock { + block: Arc::clone(new_block), + switch: None, + verify_callback: None, + }; + pool.insert(new_block_clone.into()); + header = new_block.header(); + hashes.push(header.hash()); + } + + let pool_arc1 = Arc::new(pool); + let pool_arc2 = Arc::clone(&pool_arc1); + + let thread1 = thread::spawn(move || { + pool_arc1.remove_blocks_by_parent(&consensus.genesis_block().hash()); + }); + + for hash in hashes.iter().rev() { + pool_arc2.get_block(shared.store(), hash); + } + + thread1.join().unwrap(); +} + +#[test] +fn test_leaders() { + let consensus = ConsensusBuilder::default().build(); + let block_number = 20; + let mut blocks = Vec::new(); + let mut parent = consensus.genesis_block().header(); + let pool = OrphanBlockPool::with_capacity(20); + for i in 0..block_number - 1 { + let lonely_block = gen_lonely_block(&parent); + let new_block = LonelyBlock { + block: Arc::clone(lonely_block.block()), + switch: None, + verify_callback: None, + }; + blocks.push(lonely_block); + parent = new_block.block().header(); + if i % 5 != 0 { + pool.insert(new_block.into()); + } + } + assert_leaders_have_children(&pool); + assert_eq!(pool.len(), 15); + assert_eq!(pool.leaders_len(), 4); + + pool.insert( + LonelyBlock { + block: Arc::clone(blocks[5].block()), + switch: None, + verify_callback: None, + } + .into(), + ); + assert_leaders_have_children(&pool); + assert_eq!(pool.len(), 16); + assert_eq!(pool.leaders_len(), 3); + + pool.insert( + LonelyBlock { + block: Arc::clone(blocks[10].block()), + switch: None, + verify_callback: None, + } + .into(), + ); + assert_leaders_have_children(&pool); + assert_eq!(pool.len(), 17); + assert_eq!(pool.leaders_len(), 2); + + // index 0 doesn't in the orphan pool, so do nothing + let orphan = pool.remove_blocks_by_parent(&consensus.genesis_block().hash()); + assert!(orphan.is_empty()); + assert_eq!(pool.len(), 17); + assert_eq!(pool.leaders_len(), 2); + + pool.insert( + LonelyBlock { + block: Arc::clone(blocks[0].block()), + switch: None, + verify_callback: None, + } + .into(), + ); + assert_leaders_have_children(&pool); + assert_eq!(pool.len(), 18); + assert_eq!(pool.leaders_len(), 2); + + let orphan = pool.remove_blocks_by_parent(&consensus.genesis_block().hash()); + assert_eq!(pool.len(), 3); + assert_eq!(pool.leaders_len(), 1); + + pool.insert( + LonelyBlock { + block: Arc::clone(blocks[15].block()), + switch: None, + verify_callback: None, + } + .into(), + ); + assert_leaders_have_children(&pool); + assert_eq!(pool.len(), 4); + assert_eq!(pool.leaders_len(), 1); + + let orphan_1 = pool.remove_blocks_by_parent(&blocks[14].block.hash()); + + let orphan_set: HashSet = orphan + .into_iter() + .map(|b| b.hash()) + .chain(orphan_1.into_iter().map(|b| b.hash())) + .collect(); + let blocks_set: HashSet = blocks.into_iter().map(|b| b.block().hash()).collect(); + assert_eq!(orphan_set, blocks_set); + assert_eq!(pool.len(), 0); + assert_eq!(pool.leaders_len(), 0); +} + +#[test] +fn test_remove_expired_blocks() { + let consensus = ConsensusBuilder::default().build(); + let block_number = 20; + let mut parent = consensus.genesis_block().header(); + let pool = OrphanBlockPool::with_capacity(block_number); + + let deprecated = EpochNumberWithFraction::new(10, 0, 10); + + for _ in 1..block_number { + let new_block = BlockBuilder::default() + .parent_hash(parent.hash()) + .timestamp(unix_time_as_millis().pack()) + .number((parent.number() + 1).pack()) + .epoch(deprecated.clone().pack()) + .nonce((parent.nonce() + 1).pack()) + .build(); + + parent = new_block.header(); + let lonely_block = LonelyBlock { + block: Arc::new(new_block), + switch: None, + verify_callback: None, + }; + pool.insert(lonely_block.into()); + } + assert_eq!(pool.leaders_len(), 1); + + let v = pool.clean_expired_blocks(20_u64); + assert_eq!(v.len(), 19); + assert_eq!(pool.leaders_len(), 0); +} diff --git a/chain/src/tests/reward.rs b/chain/src/tests/reward.rs index 73de141c86..876a1495bf 100644 --- a/chain/src/tests/reward.rs +++ b/chain/src/tests/reward.rs @@ -229,7 +229,7 @@ fn finalize_reward() { parent = block.header().clone(); chain_controller - .internal_process_block(Arc::new(block.clone()), Switch::DISABLE_EXTENSION) + .blocking_process_block_with_switch(Arc::new(block.clone()), Switch::DISABLE_EXTENSION) .expect("process block ok"); blocks.push(block); } @@ -266,7 +266,7 @@ fn finalize_reward() { parent = block.header(); chain_controller - .internal_process_block(Arc::new(block.clone()), Switch::DISABLE_EXTENSION) + .blocking_process_block_with_switch(Arc::new(block.clone()), Switch::DISABLE_EXTENSION) .expect("process block ok"); let (target, reward) = RewardCalculator::new(shared.consensus(), shared.snapshot().as_ref()) @@ -300,6 +300,6 @@ fn finalize_reward() { ); chain_controller - .internal_process_block(Arc::new(block), Switch::DISABLE_EXTENSION) + .blocking_process_block_with_switch(Arc::new(block), Switch::DISABLE_EXTENSION) .expect("process block ok"); } diff --git a/chain/src/tests/truncate.rs b/chain/src/tests/truncate.rs index a9c892c7ee..57fec63256 100644 --- a/chain/src/tests/truncate.rs +++ b/chain/src/tests/truncate.rs @@ -1,4 +1,4 @@ -use crate::chain::ChainService; +use crate::start_chain_services; use ckb_chain_spec::consensus::Consensus; use ckb_shared::SharedBuilder; use ckb_store::ChainStore; @@ -11,7 +11,7 @@ fn test_truncate() { let builder = SharedBuilder::with_temp_db(); let (shared, mut pack) = builder.consensus(Consensus::default()).build().unwrap(); - let mut chain_service = ChainService::new(shared.clone(), pack.take_proposal_table()); + let chain_controller = start_chain_services(pack.take_chain_services_builder()); let genesis = shared .store() @@ -26,8 +26,8 @@ fn test_truncate() { } for blk in mock.blocks() { - chain_service - .process_block(Arc::new(blk.clone()), Switch::DISABLE_ALL) + chain_controller + .blocking_process_block_with_switch(Arc::new(blk.clone()), Switch::DISABLE_ALL) .unwrap(); } @@ -38,12 +38,12 @@ fn test_truncate() { } for blk in mock.blocks() { - chain_service - .process_block(Arc::new(blk.clone()), Switch::DISABLE_ALL) + chain_controller + .blocking_process_block_with_switch(Arc::new(blk.clone()), Switch::DISABLE_ALL) .unwrap(); } - chain_service.truncate(&target.hash()).unwrap(); + chain_controller.truncate(target.hash()).unwrap(); assert_eq!(shared.snapshot().tip_header(), &target); } diff --git a/chain/src/tests/uncle.rs b/chain/src/tests/uncle.rs index 3d8d4da0a0..fe23f5cf34 100644 --- a/chain/src/tests/uncle.rs +++ b/chain/src/tests/uncle.rs @@ -1,4 +1,4 @@ -use crate::chain::ChainService; +use crate::start_chain_services; use ckb_chain_spec::consensus::Consensus; use ckb_shared::SharedBuilder; use ckb_store::ChainStore; @@ -10,7 +10,8 @@ use std::sync::Arc; fn test_get_block_body_after_inserting() { let builder = SharedBuilder::with_temp_db(); let (shared, mut pack) = builder.consensus(Consensus::default()).build().unwrap(); - let mut chain_service = ChainService::new(shared.clone(), pack.take_proposal_table()); + let chain_controller = start_chain_services(pack.take_chain_services_builder()); + let genesis = shared .store() .get_block_header(&shared.store().get_block_hash(0).unwrap()) @@ -26,15 +27,15 @@ fn test_get_block_body_after_inserting() { } for blk in fork1.blocks() { - chain_service - .process_block(Arc::new(blk.clone()), Switch::DISABLE_ALL) + chain_controller + .blocking_process_block_with_switch(Arc::new(blk.clone()), Switch::DISABLE_ALL) .unwrap(); let len = shared.snapshot().get_block_body(&blk.hash()).len(); assert_eq!(len, 1, "[fork1] snapshot.get_block_body({})", blk.hash(),); } for blk in fork2.blocks() { - chain_service - .process_block(Arc::new(blk.clone()), Switch::DISABLE_ALL) + chain_controller + .blocking_process_block_with_switch(Arc::new(blk.clone()), Switch::DISABLE_ALL) .unwrap(); let snapshot = shared.snapshot(); assert!(snapshot.get_block_header(&blk.hash()).is_some()); diff --git a/chain/src/tests/util.rs b/chain/src/tests/util.rs index 0d42b0def6..f29cd97ad7 100644 --- a/chain/src/tests/util.rs +++ b/chain/src/tests/util.rs @@ -1,4 +1,4 @@ -use crate::chain::{ChainController, ChainService}; +use crate::{start_chain_services, ChainController}; use ckb_app_config::TxPoolConfig; use ckb_app_config::{BlockAssemblerConfig, NetworkConfig}; use ckb_chain_spec::consensus::{Consensus, ConsensusBuilder}; @@ -85,8 +85,7 @@ pub(crate) fn start_chain_with_tx_pool_config( let network = dummy_network(&shared); pack.take_tx_pool_builder().start(network); - let chain_service = ChainService::new(shared.clone(), pack.take_proposal_table()); - let chain_controller = chain_service.start::<&str>(None); + let chain_controller = start_chain_services(pack.take_chain_services_builder()); let parent = { let snapshot = shared.snapshot(); snapshot diff --git a/chain/src/utils/forkchanges.rs b/chain/src/utils/forkchanges.rs new file mode 100644 index 0000000000..561ae94545 --- /dev/null +++ b/chain/src/utils/forkchanges.rs @@ -0,0 +1,85 @@ +use ckb_types::core::hardfork::HardForks; +use ckb_types::core::{BlockExt, BlockView}; +use ckb_types::packed::ProposalShortId; +#[cfg(debug_assertions)] +use is_sorted::IsSorted; +use std::collections::{HashSet, VecDeque}; + +/// The struct represent fork +#[derive(Debug, Default)] +pub struct ForkChanges { + /// Blocks attached to index after forks + pub(crate) attached_blocks: VecDeque, + /// Blocks detached from index after forks + pub(crate) detached_blocks: VecDeque, + /// HashSet with proposal_id detached to index after forks + pub(crate) detached_proposal_id: HashSet, + /// to be updated exts + pub(crate) dirty_exts: VecDeque, +} + +impl ForkChanges { + /// blocks attached to index after forks + pub fn attached_blocks(&self) -> &VecDeque { + &self.attached_blocks + } + + /// blocks detached from index after forks + pub fn detached_blocks(&self) -> &VecDeque { + &self.detached_blocks + } + + /// proposal_id detached to index after forks + pub fn detached_proposal_id(&self) -> &HashSet { + &self.detached_proposal_id + } + + /// are there any block should be detached + pub fn has_detached(&self) -> bool { + !self.detached_blocks.is_empty() + } + + /// cached verified attached block num + pub fn verified_len(&self) -> usize { + self.attached_blocks.len() - self.dirty_exts.len() + } + + /// assertion for make sure attached_blocks and detached_blocks are sorted + #[cfg(debug_assertions)] + pub fn is_sorted(&self) -> bool { + IsSorted::is_sorted_by_key(&mut self.attached_blocks().iter(), |blk| { + blk.header().number() + }) && IsSorted::is_sorted_by_key(&mut self.detached_blocks().iter(), |blk| { + blk.header().number() + }) + } + + pub fn during_hardfork(&self, hardfork_switch: &HardForks) -> bool { + let hardfork_during_detach = + self.check_if_hardfork_during_blocks(hardfork_switch, &self.detached_blocks); + let hardfork_during_attach = + self.check_if_hardfork_during_blocks(hardfork_switch, &self.attached_blocks); + + hardfork_during_detach || hardfork_during_attach + } + + fn check_if_hardfork_during_blocks( + &self, + hardfork: &HardForks, + blocks: &VecDeque, + ) -> bool { + if blocks.is_empty() { + false + } else { + // This method assumes that the input blocks are sorted and unique. + let rfc_0049 = hardfork.ckb2023.rfc_0049(); + let epoch_first = blocks.front().unwrap().epoch().number(); + let epoch_next = blocks + .back() + .unwrap() + .epoch() + .minimum_epoch_number_after_n_blocks(1); + epoch_first < rfc_0049 && rfc_0049 <= epoch_next + } + } +} diff --git a/chain/src/utils/mod.rs b/chain/src/utils/mod.rs new file mode 100644 index 0000000000..efdc1e092a --- /dev/null +++ b/chain/src/utils/mod.rs @@ -0,0 +1,2 @@ +pub mod forkchanges; +pub mod orphan_block_pool; diff --git a/sync/src/orphan_block_pool.rs b/chain/src/utils/orphan_block_pool.rs similarity index 70% rename from sync/src/orphan_block_pool.rs rename to chain/src/utils/orphan_block_pool.rs index 20d6eda26d..602cd6adba 100644 --- a/sync/src/orphan_block_pool.rs +++ b/chain/src/utils/orphan_block_pool.rs @@ -1,27 +1,27 @@ -use ckb_logger::{debug, error}; -use ckb_types::core::EpochNumber; -use ckb_types::{core, packed}; +use crate::LonelyBlockHash; +use ckb_logger::debug; +use ckb_store::{ChainDB, ChainStore}; +use ckb_types::core::{BlockView, EpochNumber}; +use ckb_types::packed; use ckb_util::{parking_lot::RwLock, shrink_to_fit}; use std::collections::{HashMap, HashSet, VecDeque}; +use std::sync::Arc; pub type ParentHash = packed::Byte32; -const SHRINK_THRESHOLD: usize = 100; -// Orphan pool will remove expired blocks whose epoch is less than tip_epoch - EXPIRED_EPOCH, -const EXPIRED_EPOCH: u64 = 6; +const SHRINK_THRESHOLD: usize = 100; +pub const EXPIRED_EPOCH: u64 = 6; #[derive(Default)] struct InnerPool { // Group by blocks in the pool by the parent hash. - blocks: HashMap>, + blocks: HashMap>, // The map tells the parent hash when given the hash of a block in the pool. // // The block is in the orphan pool if and only if the block hash exists as a key in this map. parents: HashMap, // Leaders are blocks not in the orphan pool but having at least a child in the pool. leaders: HashSet, - // block size of pool - block_size: usize, } impl InnerPool { @@ -30,26 +30,16 @@ impl InnerPool { blocks: HashMap::with_capacity(capacity), parents: HashMap::new(), leaders: HashSet::new(), - block_size: 0, } } - fn insert(&mut self, block: core::BlockView) { - let hash = block.header().hash(); - let parent_hash = block.data().header().raw().parent_hash(); - - self.block_size = self - .block_size - .checked_add(block.data().total_size()) - .unwrap_or_else(|| { - error!("orphan pool block size add overflow"); - usize::MAX - }); + fn insert(&mut self, lonely_block: LonelyBlockHash) { + let hash = lonely_block.hash(); + let parent_hash = lonely_block.parent_hash(); self.blocks .entry(parent_hash.clone()) .or_default() - .insert(hash.clone(), block); - + .insert(hash.clone(), lonely_block); // Out-of-order insertion needs to be deduplicated self.leaders.remove(&hash); // It is a possible optimization to make the judgment in advance, @@ -63,7 +53,7 @@ impl InnerPool { self.parents.insert(hash, parent_hash); } - pub fn remove_blocks_by_parent(&mut self, parent_hash: &ParentHash) -> Vec { + pub fn remove_blocks_by_parent(&mut self, parent_hash: &ParentHash) -> Vec { // try remove leaders first if !self.leaders.remove(parent_hash) { return Vec::new(); @@ -72,7 +62,7 @@ impl InnerPool { let mut queue: VecDeque = VecDeque::new(); queue.push_back(parent_hash.to_owned()); - let mut removed: Vec = Vec::new(); + let mut removed: Vec = Vec::new(); while let Some(parent_hash) = queue.pop_front() { if let Some(orphaned) = self.blocks.remove(&parent_hash) { let (hashes, blocks): (Vec<_>, Vec<_>) = orphaned.into_iter().unzip(); @@ -84,13 +74,6 @@ impl InnerPool { } } - self.block_size = self - .block_size - .checked_sub(removed.iter().map(|b| b.data().total_size()).sum::()) - .unwrap_or_else(|| { - error!("orphan pool block size sub overflow"); - 0 - }); debug!("orphan pool pop chain len: {}", removed.len()); debug_assert_ne!( removed.len(), @@ -104,23 +87,23 @@ impl InnerPool { removed } - pub fn get_block(&self, hash: &packed::Byte32) -> Option { + pub fn get_block(&self, hash: &packed::Byte32) -> Option<&LonelyBlockHash> { self.parents.get(hash).and_then(|parent_hash| { self.blocks .get(parent_hash) - .and_then(|blocks| blocks.get(hash).cloned()) + .and_then(|blocks| blocks.get(hash)) }) } /// cleanup expired blocks(epoch + EXPIRED_EPOCH < tip_epoch) - pub fn clean_expired_blocks(&mut self, tip_epoch: EpochNumber) -> Vec { + pub fn clean_expired_blocks(&mut self, tip_epoch: EpochNumber) -> Vec { let mut result = vec![]; for hash in self.leaders.clone().iter() { if self.need_clean(hash, tip_epoch) { // remove items in orphan pool and return hash to callee(clean header map) let descendants = self.remove_blocks_by_parent(hash); - result.extend(descendants.iter().map(|block| block.hash())); + result.extend(descendants); } } result @@ -131,9 +114,9 @@ impl InnerPool { self.blocks .get(parent_hash) .and_then(|map| { - map.iter() - .next() - .map(|(_, block)| block.header().epoch().number() + EXPIRED_EPOCH < tip_epoch) + map.iter().next().map(|(_, lonely_block)| { + lonely_block.epoch_number() + EXPIRED_EPOCH < tip_epoch + }) }) .unwrap_or_default() } @@ -155,19 +138,21 @@ impl OrphanBlockPool { } /// Insert orphaned block, for which we have already requested its parent block - pub fn insert(&self, block: core::BlockView) { - self.inner.write().insert(block); + pub fn insert(&self, lonely_block: LonelyBlockHash) { + self.inner.write().insert(lonely_block); } - pub fn remove_blocks_by_parent(&self, parent_hash: &ParentHash) -> Vec { + pub fn remove_blocks_by_parent(&self, parent_hash: &ParentHash) -> Vec { self.inner.write().remove_blocks_by_parent(parent_hash) } - pub fn get_block(&self, hash: &packed::Byte32) -> Option { - self.inner.read().get_block(hash) + pub fn get_block(&self, store: &ChainDB, hash: &packed::Byte32) -> Option> { + let inner = self.inner.read(); + let lonely_block_hash: &LonelyBlockHash = inner.get_block(hash)?; + store.get_block(&lonely_block_hash.hash()).map(Arc::new) } - pub fn clean_expired_blocks(&self, epoch: EpochNumber) -> Vec { + pub fn clean_expired_blocks(&self, epoch: EpochNumber) -> Vec { self.inner.write().clean_expired_blocks(epoch) } @@ -175,14 +160,6 @@ impl OrphanBlockPool { self.inner.read().parents.len() } - pub fn is_empty(&self) -> bool { - self.len() == 0 - } - - pub fn total_size(&self) -> usize { - self.inner.read().block_size - } - pub fn clone_leaders(&self) -> Vec { self.inner.read().leaders.iter().cloned().collect() } diff --git a/ckb-bin/Cargo.toml b/ckb-bin/Cargo.toml index be46b1a0ed..af9ef93cd9 100644 --- a/ckb-bin/Cargo.toml +++ b/ckb-bin/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ckb-bin" -version = "0.117.0-pre" +version = "0.118.0-pre" license = "MIT" authors = ["Nervos Core Dev "] edition = "2021" @@ -14,37 +14,38 @@ serde = { version = "1.0", features = ["derive"] } serde_json = { version = "1.0" } serde_plain = "0.3.0" toml = "0.5" -ckb-app-config = { path = "../util/app-config", version = "= 0.117.0-pre" } -ckb-logger = { path = "../util/logger", version = "= 0.117.0-pre" } -ckb-logger-service = { path = "../util/logger-service", version = "= 0.117.0-pre" } -ckb-metrics-service = { path = "../util/metrics-service", version = "= 0.117.0-pre" } -ckb-util = { path = "../util", version = "= 0.117.0-pre" } -ckb-types = { path = "../util/types", version = "= 0.117.0-pre" } -ckb-channel = { path = "../util/channel", version = "= 0.117.0-pre" } -ckb-jsonrpc-types = { path = "../util/jsonrpc-types", version = "= 0.117.0-pre" } -ckb-chain = { path = "../chain", version = "= 0.117.0-pre" } -ckb-shared = { path = "../shared", version = "= 0.117.0-pre" } -ckb-store = { path = "../store", version = "= 0.117.0-pre" } -ckb-chain-spec = {path = "../spec", version = "= 0.117.0-pre"} -ckb-miner = { path = "../miner", version = "= 0.117.0-pre" } -ckb-network = { path = "../network", version = "= 0.117.0-pre"} -ckb-resource = { path = "../resource", version = "= 0.117.0-pre"} +ckb-app-config = { path = "../util/app-config", version = "= 0.118.0-pre" } +ckb-logger = { path = "../util/logger", version = "= 0.118.0-pre" } +ckb-logger-service = { path = "../util/logger-service", version = "= 0.118.0-pre" } +ckb-metrics-service = { path = "../util/metrics-service", version = "= 0.118.0-pre" } +ckb-util = { path = "../util", version = "= 0.118.0-pre" } +ckb-types = { path = "../util/types", version = "= 0.118.0-pre" } +ckb-channel = { path = "../util/channel", version = "= 0.118.0-pre" } +ckb-jsonrpc-types = { path = "../util/jsonrpc-types", version = "= 0.118.0-pre" } +ckb-chain = { path = "../chain", version = "= 0.118.0-pre" } +ckb-shared = { path = "../shared", version = "= 0.118.0-pre" } +ckb-store = { path = "../store", version = "= 0.118.0-pre" } +ckb-chain-spec = { path = "../spec", version = "= 0.118.0-pre" } +ckb-miner = { path = "../miner", version = "= 0.118.0-pre" } +ckb-network = { path = "../network", version = "= 0.118.0-pre" } +ckb-resource = { path = "../resource", version = "= 0.118.0-pre" } ctrlc = { version = "3.1", features = ["termination"] } -ckb-instrument = { path = "../util/instrument", version = "= 0.117.0-pre", features = ["progress_bar"] } -ckb-build-info = { path = "../util/build-info", version = "= 0.117.0-pre" } -ckb-memory-tracker = { path = "../util/memory-tracker", version = "= 0.117.0-pre" } -ckb-chain-iter = { path = "../util/chain-iter", version = "= 0.117.0-pre" } -ckb-verification-traits = { path = "../verification/traits", version = "= 0.117.0-pre" } -ckb-async-runtime = { path = "../util/runtime", version = "= 0.117.0-pre" } -ckb-migrate = { path = "../util/migrate", version = "= 0.117.0-pre" } -ckb-launcher = { path = "../util/launcher", version = "= 0.117.0-pre" } +ckb-instrument = { path = "../util/instrument", version = "= 0.118.0-pre", features = ["progress_bar"] } +ckb-build-info = { path = "../util/build-info", version = "= 0.118.0-pre" } +ckb-memory-tracker = { path = "../util/memory-tracker", version = "= 0.118.0-pre" } +ckb-chain-iter = { path = "../util/chain-iter", version = "= 0.118.0-pre" } +ckb-verification-traits = { path = "../verification/traits", version = "= 0.118.0-pre" } +ckb-async-runtime = { path = "../util/runtime", version = "= 0.118.0-pre" } +ckb-migrate = { path = "../util/migrate", version = "= 0.118.0-pre" } +ckb-launcher = { path = "../util/launcher", version = "= 0.118.0-pre" } base64 = "0.21.0" tempfile.workspace = true rayon = "1.0" sentry = { version = "0.26.0", optional = true } is-terminal = "0.4.7" fdlimit = "0.2.1" -ckb-stop-handler = { path = "../util/stop-handler", version = "= 0.117.0-pre" } +ckb-stop-handler = { path = "../util/stop-handler", version = "= 0.118.0-pre" } +tokio = { version = "1", features = ["sync"] } [target.'cfg(not(target_os="windows"))'.dependencies] daemonize = { version = "0.5.0" } @@ -53,7 +54,7 @@ colored = "2.0" [features] deadlock_detection = ["ckb-util/deadlock_detection"] -profiling = ["ckb-memory-tracker/profiling"] +profiling = ["ckb-memory-tracker/profiling", "ckb-shared/stats"] with_sentry = ["sentry", "ckb-launcher/with_sentry", "ckb-network/with_sentry", "ckb-app-config/with_sentry", "ckb-logger-service/with_sentry"] with_dns_seeding = ["ckb-network/with_dns_seeding"] portable = ["ckb-launcher/portable"] diff --git a/ckb-bin/src/subcommand/import.rs b/ckb-bin/src/subcommand/import.rs index d6fba348c3..0b3eabc175 100644 --- a/ckb-bin/src/subcommand/import.rs +++ b/ckb-bin/src/subcommand/import.rs @@ -1,6 +1,5 @@ use ckb_app_config::{ExitCode, ImportArgs}; use ckb_async_runtime::Handle; -use ckb_chain::chain::ChainService; use ckb_instrument::Import; use ckb_shared::SharedBuilder; @@ -13,10 +12,9 @@ pub fn import(args: ImportArgs, async_handle: Handle) -> Result<(), ExitCode> { async_handle, args.consensus, )?; - let (shared, mut pack) = builder.build()?; + let (_shared, mut pack) = builder.build()?; - let chain_service = ChainService::new(shared, pack.take_proposal_table()); - let chain_controller = chain_service.start::<&str>(Some("ImportChainService")); + let chain_controller = ckb_chain::start_chain_services(pack.take_chain_services_builder()); // manual drop tx_pool_builder and relay_tx_receiver pack.take_tx_pool_builder(); diff --git a/ckb-bin/src/subcommand/replay.rs b/ckb-bin/src/subcommand/replay.rs index ac7da08fb2..fcadb6a23a 100644 --- a/ckb-bin/src/subcommand/replay.rs +++ b/ckb-bin/src/subcommand/replay.rs @@ -1,9 +1,9 @@ use ckb_app_config::{ExitCode, ReplayArgs}; use ckb_async_runtime::Handle; -use ckb_chain::chain::ChainService; +use ckb_chain::ChainController; use ckb_chain_iter::ChainIterator; use ckb_instrument::{ProgressBar, ProgressStyle}; -use ckb_shared::{Shared, SharedBuilder}; +use ckb_shared::{ChainServicesBuilder, Shared, SharedBuilder}; use ckb_store::ChainStore; use ckb_verification_traits::Switch; use std::sync::Arc; @@ -46,13 +46,14 @@ pub fn replay(args: ReplayArgs, async_handle: Handle) -> Result<(), ExitCode> { async_handle, args.consensus, )?; - let (tmp_shared, mut pack) = shared_builder.tx_pool_config(args.config.tx_pool).build()?; - let chain = ChainService::new(tmp_shared, pack.take_proposal_table()); + let (_tmp_shared, mut pack) = shared_builder.tx_pool_config(args.config.tx_pool).build()?; + let chain_service_builder: ChainServicesBuilder = pack.take_chain_services_builder(); + let chain_controller = ckb_chain::start_chain_services(chain_service_builder); if let Some((from, to)) = args.profile { - profile(shared, chain, from, to); + profile(shared, chain_controller, from, to); } else if args.sanity_check { - sanity_check(shared, chain, args.full_verification); + sanity_check(shared, chain_controller, args.full_verification); } } tmp_db_dir.close().map_err(|err| { @@ -63,16 +64,16 @@ pub fn replay(args: ReplayArgs, async_handle: Handle) -> Result<(), ExitCode> { Ok(()) } -fn profile(shared: Shared, mut chain: ChainService, from: Option, to: Option) { +fn profile(shared: Shared, chain_controller: ChainController, from: Option, to: Option) { let tip_number = shared.snapshot().tip_number(); let from = from.map(|v| std::cmp::max(1, v)).unwrap_or(1); let to = to .map(|v| std::cmp::min(v, tip_number)) .unwrap_or(tip_number); - process_range_block(&shared, &mut chain, 1..from); - println!("Start profiling; re-process blocks {from}..{to}:"); + process_range_block(&shared, chain_controller.clone(), 1..from); + println!("Start profiling, re-process blocks {from}..{to}:"); let now = std::time::Instant::now(); - let tx_count = process_range_block(&shared, &mut chain, from..=to); + let tx_count = process_range_block(&shared, chain_controller, from..=to); let duration = std::time::Instant::now().saturating_duration_since(now); if duration.as_secs() >= MIN_PROFILING_TIME { println!( @@ -97,7 +98,7 @@ fn profile(shared: Shared, mut chain: ChainService, from: Option, to: Optio fn process_range_block( shared: &Shared, - chain: &mut ChainService, + chain_controller: ChainController, range: impl Iterator, ) -> usize { let mut tx_count = 0; @@ -108,12 +109,14 @@ fn process_range_block( .and_then(|hash| snapshot.get_block(&hash)) .expect("read block from store"); tx_count += block.transactions().len().saturating_sub(1); - chain.process_block(Arc::new(block), Switch::NONE).unwrap(); + chain_controller + .blocking_process_block_with_switch(Arc::new(block), Switch::NONE) + .unwrap(); } tx_count } -fn sanity_check(shared: Shared, mut chain: ChainService, full_verification: bool) { +fn sanity_check(shared: Shared, chain_controller: ChainController, full_verification: bool) { let tip_header = shared.snapshot().tip_header().clone(); let chain_iter = ChainIterator::new(shared.store()); let pb = ProgressBar::new(chain_iter.len()); @@ -132,7 +135,8 @@ fn sanity_check(shared: Shared, mut chain: ChainService, full_verification: bool let mut cursor = shared.consensus().genesis_block().header(); for block in chain_iter { let header = block.header(); - if let Err(e) = chain.process_block(Arc::new(block), switch) { + if let Err(e) = chain_controller.blocking_process_block_with_switch(Arc::new(block), switch) + { eprintln!( "Replay sanity-check error: {:?} at block({}-{})", e, diff --git a/ckb-bin/src/subcommand/run.rs b/ckb-bin/src/subcommand/run.rs index 827999fa5a..41e9413947 100644 --- a/ckb-bin/src/subcommand/run.rs +++ b/ckb-bin/src/subcommand/run.rs @@ -6,6 +6,7 @@ use ckb_async_runtime::{new_global_runtime, Handle}; use ckb_build_info::Version; use ckb_launcher::Launcher; use ckb_logger::info; + use ckb_stop_handler::{broadcast_exit_signals, wait_all_ckb_services_exit}; use ckb_types::core::cell::setup_system_cell_cache; @@ -45,7 +46,8 @@ pub fn run(args: RunArgs, version: Version, async_handle: Handle) -> Result<(), Some(shared.store().db().inner()), ); - let chain_controller = launcher.start_chain_service(&shared, pack.take_proposal_table()); + let chain_controller = + launcher.start_chain_service(&shared, pack.take_chain_services_builder()); launcher.start_block_filter(&shared); diff --git a/db-migration/Cargo.toml b/db-migration/Cargo.toml index eb2fd60fb4..875709bd89 100644 --- a/db-migration/Cargo.toml +++ b/db-migration/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ckb-db-migration" -version = "0.117.0-pre" +version = "0.118.0-pre" license = "MIT" authors = ["Nervos Core Dev "] edition = "2021" @@ -11,19 +11,19 @@ repository = "https://github.com/nervosnetwork/ckb" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -ckb-db = { path = "../db", version = "= 0.117.0-pre" } -ckb-logger = { path = "../util/logger", version = "= 0.117.0-pre" } -ckb-error = { path = "../error", version = "= 0.117.0-pre" } -ckb-db-schema = { path = "../db-schema", version = "= 0.117.0-pre" } -ckb-channel = { path = "../util/channel", version = "= 0.117.0-pre" } -ckb-stop-handler = { path = "../util/stop-handler", version = "= 0.117.0-pre" } +ckb-db = { path = "../db", version = "= 0.118.0-pre" } +ckb-logger = { path = "../util/logger", version = "= 0.118.0-pre" } +ckb-error = { path = "../error", version = "= 0.118.0-pre" } +ckb-db-schema = { path = "../db-schema", version = "= 0.118.0-pre" } +ckb-channel = { path = "../util/channel", version = "= 0.118.0-pre" } +ckb-stop-handler = { path = "../util/stop-handler", version = "= 0.118.0-pre" } once_cell = "1.8.0" indicatif = "0.16" console = ">=0.9.1, <1.0.0" [dev-dependencies] tempfile.workspace = true -ckb-app-config = { path = "../util/app-config", version = "= 0.117.0-pre" } +ckb-app-config = { path = "../util/app-config", version = "= 0.118.0-pre" } [features] portable = ["ckb-db/portable"] diff --git a/db-schema/Cargo.toml b/db-schema/Cargo.toml index 229ed2ed08..3953449e27 100644 --- a/db-schema/Cargo.toml +++ b/db-schema/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ckb-db-schema" -version = "0.117.0-pre" +version = "0.118.0-pre" license = "MIT" authors = ["Nervos Core Dev "] edition = "2021" diff --git a/db/Cargo.toml b/db/Cargo.toml index f1a1c61086..9b72795487 100644 --- a/db/Cargo.toml +++ b/db/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ckb-db" -version = "0.117.0-pre" +version = "0.118.0-pre" license = "MIT" authors = ["Nervos Core Dev "] edition = "2021" @@ -9,12 +9,12 @@ homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" [dependencies] -ckb-app-config = { path = "../util/app-config", version = "= 0.117.0-pre" } -ckb-logger = { path = "../util/logger", version = "= 0.117.0-pre" } -ckb-error = { path = "../error", version = "= 0.117.0-pre" } +ckb-app-config = { path = "../util/app-config", version = "= 0.118.0-pre" } +ckb-logger = { path = "../util/logger", version = "= 0.118.0-pre" } +ckb-error = { path = "../error", version = "= 0.118.0-pre" } libc = "0.2" rocksdb = { package = "ckb-rocksdb", version ="=0.21.1", features = ["snappy"], default-features = false } -ckb-db-schema = { path = "../db-schema", version = "= 0.117.0-pre" } +ckb-db-schema = { path = "../db-schema", version = "= 0.118.0-pre" } [dev-dependencies] tempfile.workspace = true diff --git a/deny.toml b/deny.toml index a3eba54265..aadefc53d2 100644 --- a/deny.toml +++ b/deny.toml @@ -1,57 +1,248 @@ +# This template contains all of the possible sections and their default values + +# Note that all fields that take a lint level have these possible values: +# * deny - An error will be produced and the check will fail +# * warn - A warning will be produced, but the check will not fail +# * allow - No warning or error will be produced, though in some cases a note +# will be + +# The values provided in this template are the default values that will be used +# when any section or field is not specified in your own configuration + +# Root options + +# The graph table configures how the dependency graph is constructed and thus +# which crates the checks are performed against +[graph] +# If 1 or more target triples (and optionally, target_features) are specified, +# only the specified targets will be checked when running `cargo deny check`. +# This means, if a particular package is only ever used as a target specific +# dependency, such as, for example, the `nix` crate only being used via the +# `target_family = "unix"` configuration, that only having windows targets in +# this list would mean the nix crate, as well as any of its exclusive +# dependencies not shared by any other crates, would be ignored, as the target +# list here is effectively saying which targets you are building for. +targets = [ + # The triple can be any string, but only the target triples built in to + # rustc (as of 1.40) can be checked against actual config expressions + #"x86_64-unknown-linux-musl", + # You can also specify which target_features you promise are enabled for a + # particular target. target_features are currently not validated against + # the actual valid features supported by the target architecture. + #{ triple = "wasm32-unknown-unknown", features = ["atomics"] }, +] +# When creating the dependency graph used as the source of truth when checks are +# executed, this field can be used to prune crates from the graph, removing them +# from the view of cargo-deny. This is an extremely heavy hammer, as if a crate +# is pruned from the graph, all of its dependencies will also be pruned unless +# they are connected to another crate in the graph that hasn't been pruned, +# so it should be used with care. The identifiers are [Package ID Specifications] +# (https://doc.rust-lang.org/cargo/reference/pkgid-spec.html) +#exclude = [] +# If true, metadata will be collected with `--all-features`. Note that this can't +# be toggled off if true, if you want to conditionally enable `--all-features` it +# is recommended to pass `--all-features` on the cmd line instead +all-features = false +# If true, metadata will be collected with `--no-default-features`. The same +# caveat with `all-features` applies +no-default-features = false +# If set, these feature will be enabled when collecting metadata. If `--features` +# is specified on the cmd line they will take precedence over this option. +#features = [] + +# The output table provides options for how/if diagnostics are outputted +[output] +# When outputting inclusion graphs in diagnostics that include features, this +# option can be used to specify the depth at which feature edges will be added. +# This option is included since the graphs can be quite large and the addition +# of features from the crate(s) to all of the graph roots can be far too verbose. +# This option can be overridden via `--feature-depth` on the cmd line +feature-depth = 1 + +# This section is considered when running `cargo deny check advisories` +# More documentation for the advisories section can be found here: +# https://embarkstudios.github.io/cargo-deny/checks/advisories/cfg.html [advisories] -vulnerability = "deny" -unmaintained = "warn" -yanked = "deny" -notice = "deny" +# The path where the advisory databases are cloned/fetched into +#db-path = "$CARGO_HOME/advisory-dbs" +# The url(s) of the advisory databases to use +#db-urls = ["https://github.com/rustsec/advisory-db"] +# A list of advisory IDs to ignore. Note that ignored advisories will still +# output a note when they are encountered. ignore = [ - # The CVE can be kept under control for its triggering. - # See https://github.com/launchbadge/sqlx/pull/2455#issuecomment-1507657825 for more information. - # Meanwhile, awaiting SQLx's new version (> 0.7.3) for full support of any DB driver. - "RUSTSEC-2022-0090", - # ckb-rich-indexer need sqlx's runtime-tokio-rustls feature, - # ignore https://rustsec.org/advisories/RUSTSEC-2024-0336 - "RUSTSEC-2024-0336" +# https://rustsec.org/advisories/RUSTSEC-2022-0090 +# It was sometimes possible for SQLite versions >= 1.0.12, < 3.39.2 to allow an array-bounds overflow when large string were input into SQLite's `printf` function. + "RUSTSEC-2022-0090", +# https://rustsec.org/advisories/RUSTSEC-2024-0336 +# `rustls::ConnectionCommon::complete_io` could fall into an infinite loop based on network input + "RUSTSEC-2024-0336" +#"RUSTSEC-0000-0000", +#{ id = "RUSTSEC-0000-0000", reason = "you can specify a reason the advisory is ignored" }, +#"a-crate-that-is-yanked@0.1.1", # you can also ignore yanked crate versions if you wish +#{ crate = "a-crate-that-is-yanked@0.1.1", reason = "you can specify why you are ignoring the yanked crate" }, ] +# If this is true, then cargo deny will use the git executable to fetch advisory database. +# If this is false, then it uses a built-in git library. +# Setting this to true can be helpful if you have special authentication requirements that cargo-deny does not support. +# See Git Authentication for more information about setting up git authentication. +#git-fetch-with-cli = true +# This section is considered when running `cargo deny check licenses` +# More documentation for the licenses section can be found here: +# https://embarkstudios.github.io/cargo-deny/checks/licenses/cfg.html [licenses] -unlicensed = "deny" +# List of explicitly allowed licenses +# See https://spdx.org/licenses/ for list of possible licenses +# [possible values: any SPDX 3.11 short identifier (+ optional exception)]. allow = [ + "MIT", "Apache-2.0", + "MPL-2.0", + "BSL-1.0", "BSD-3-Clause", - "CC0-1.0", "ISC", - "MIT", - # https://softwareengineering.stackexchange.com/questions/317944/can-i-include-code-licensed-under-mpl-within-a-project-under-mit-license - "MPL-2.0", + "CC0-1.0", "Unicode-DFS-2016", - "BSL-1.0", + "OpenSSL" + #"Apache-2.0 WITH LLVM-exception", ] -copyleft = "deny" -default = "deny" +# The confidence threshold for detecting a license from license text. +# The higher the value, the more closely the license text must be to the +# canonical license text of a valid SPDX license file. +# [possible values: any between 0.0 and 1.0]. +confidence-threshold = 0.8 +# Allow 1 or more licenses on a per-crate basis, so that particular licenses +# aren't accepted for every possible crate as with the normal allow list exceptions = [ - { allow = ["MIT", "ISC", "OpenSSL"], name = "ring", version = "*" }, + # Each entry is the crate and version constraint, and its specific allow + # list + # { allow = ["Zlib"], crate = "adler32" }, ] +# Some crates don't have (easily) machine readable licensing information, +# adding a clarification entry for it allows you to manually specify the +# licensing information [[licenses.clarify]] -name = "ring" -version = "*" +# The package spec the clarification applies to +crate = "ring" +# The SPDX expression for the license requirements of the crate expression = "MIT AND ISC AND OpenSSL" +# One or more files in the crate's source used as the "source of truth" for +# the license expression. If the contents match, the clarification will be used +# when running the license check, otherwise the clarification will be ignored +# and the crate will be checked normally, which may produce warnings or errors +# depending on the rest of your configuration license-files = [ - { path = "LICENSE", hash = 0xbd0eed23 } +# Each entry is a crate relative path, and the (opaque) hash of its contents +{ path = "LICENSE", hash = 0xbd0eed23 } ] -[[licenses.clarify]] -name = "encoding_rs" -version = "*" -expression = "(Apache-2.0 OR MIT) AND BSD-3-Clause" -license-files = [ - { path = "COPYRIGHT", hash = 0x39f8ad31 } +[licenses.private] +# If true, ignores workspace crates that aren't published, or are only +# published to private registries. +# To see how to mark a crate as unpublished (to the official registry), +# visit https://doc.rust-lang.org/cargo/reference/manifest.html#the-publish-field. +ignore = true +# One or more private registries that you might publish crates to, if a crate +# is only published to private registries, and ignore is true, the crate will +# not have its license(s) checked +registries = [ + #"https://sekretz.com/registry ] +# This section is considered when running `cargo deny check bans`. +# More documentation about the 'bans' section can be found here: +# https://embarkstudios.github.io/cargo-deny/checks/bans/cfg.html [bans] +# Lint level for when multiple versions of the same crate are detected multiple-versions = "warn" -wildcards = "deny" +# Lint level for when a crate version requirement is `*` +wildcards = "allow" +# The graph highlighting used when creating dotgraphs for crates +# with multiple versions +# * lowest-version - The path to the lowest versioned duplicate is highlighted +# * simplest-path - The path to the version with the fewest edges is highlighted +# * all - Both lowest-version and simplest-path are used +highlight = "all" +# The default lint level for `default` features for crates that are members of +# the workspace that is being checked. This can be overridden by allowing/denying +# `default` on a crate-by-crate basis if desired. +workspace-default-features = "allow" +# The default lint level for `default` features for external crates that are not +# members of the workspace. This can be overridden by allowing/denying `default` +# on a crate-by-crate basis if desired. +external-default-features = "allow" +# List of crates that are allowed. Use with care! +allow = [ + #"ansi_term@0.11.0", + #{ crate = "ansi_term@0.11.0", reason = "you can specify a reason it is allowed" }, +] +# List of crates to deny +deny = [ + #"ansi_term@0.11.0", + #{ crate = "ansi_term@0.11.0", reason = "you can specify a reason it is banned" }, + # Wrapper crates can optionally be specified to allow the crate when it + # is a direct dependency of the otherwise banned crate + #{ crate = "ansi_term@0.11.0", wrappers = ["this-crate-directly-depends-on-ansi_term"] }, +] + +# List of features to allow/deny +# Each entry the name of a crate and a version range. If version is +# not specified, all versions will be matched. +#[[bans.features]] +#crate = "reqwest" +# Features to not allow +#deny = ["json"] +# Features to allow +#allow = [ +# "rustls", +# "__rustls", +# "__tls", +# "hyper-rustls", +# "rustls", +# "rustls-pemfile", +# "rustls-tls-webpki-roots", +# "tokio-rustls", +# "webpki-roots", +#] +# If true, the allowed features must exactly match the enabled feature set. If +# this is set there is no point setting `deny` +#exact = true + +# Certain crates/versions that will be skipped when doing duplicate detection. +skip = [ + #"ansi_term@0.11.0", + #{ crate = "ansi_term@0.11.0", reason = "you can specify a reason why it can't be updated/removed" }, +] +# Similarly to `skip` allows you to skip certain crates during duplicate +# detection. Unlike skip, it also includes the entire tree of transitive +# dependencies starting at the specified crate, up to a certain depth, which is +# by default infinite. +skip-tree = [ + #"ansi_term@0.11.0", # will be skipped along with _all_ of its direct and transitive dependencies + #{ crate = "ansi_term@0.11.0", depth = 20 }, +] +# This section is considered when running `cargo deny check sources`. +# More documentation about the 'sources' section can be found here: +# https://embarkstudios.github.io/cargo-deny/checks/sources/cfg.html [sources] -unknown-registry = "deny" -unknown-git = "deny" +# Lint level for what to happen when a crate from a crate registry that is not +# in the allow list is encountered +unknown-registry = "warn" +# Lint level for what to happen when a crate from a git repository that is not +# in the allow list is encountered +unknown-git = "warn" +# List of URLs for allowed crate registries. Defaults to the crates.io index +# if not specified. If it is specified but empty, no registries are allowed. +allow-registry = ["https://github.com/rust-lang/crates.io-index"] +# List of URLs for allowed Git repositories +allow-git = [] + +[sources.allow-org] +# 1 or more github.com organizations to allow git sources for +github = [] +# 1 or more gitlab.com organizations to allow git sources for +gitlab = [] +# 1 or more bitbucket.org organizations to allow git sources for +bitbucket = [] diff --git a/devtools/doc/rpc-gen/Cargo.toml b/devtools/doc/rpc-gen/Cargo.toml index 5c33d82e5f..2b4a260a97 100644 --- a/devtools/doc/rpc-gen/Cargo.toml +++ b/devtools/doc/rpc-gen/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ckb-rpc-gen" -version = "0.117.0-pre" +version = "0.118.0-pre" edition = "2021" license = "MIT" authors = ["Nervos Core Dev "] @@ -9,7 +9,7 @@ homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" [dependencies] -ckb-rpc ={ path = "../../../rpc", version = "= 0.117.0-pre" } +ckb-rpc ={ path = "../../../rpc", version = "= 0.118.0-pre" } schemars = { version = "0.8.19", package = "ckb_schemars" } serde_json = "~1.0" tera = "1" diff --git a/docs/ckb_async_block_sync.mermaid b/docs/ckb_async_block_sync.mermaid new file mode 100644 index 0000000000..eb28cd0eb0 --- /dev/null +++ b/docs/ckb_async_block_sync.mermaid @@ -0,0 +1,81 @@ +sequenceDiagram + autonumber + participant Sr as Synchronizer::received + participant BP as BlockProcess + participant Sp as Synchronizer::poll + participant C as main thread + participant PU as PreloadUnverified thread + participant CV as ConsumeUnverifiedBlocks thread + + box crate:ckb-sync + participant Sr + participant Sp + participant BP + end + + box crate:ckb-chain + participant C + participant PU + participant CV + end + + Note left of Sr: synchronizer received
Block(122) from remote peer + Note over Sr: try_process SyncMessageUnionReader::SendBlock + Sr ->>+ BP: BlockProcess::execute(Block(122)) + BP ->>+ C: asynchronous_process_block(Block(122)) + Note over C: non_contextual_verify(Block(122)) + Note over C: insert_block(Block(122)) + Note over C: OrphanBroker.process_lonly_block(Block(122)) + + alt parent is BLOCK_STORED or parent is_pending_veryfing + Note over C: OrphanBroker.process_lonly_block(Block(122)) + Note over C: increase unverified_tip to Block(122) + C ->>+ PU: send Block(122) to PreloadUnverified via channel + else parent not found + Note over C: OrphanBroker.process_lonly_block(Block(122)) + Note over C: insert Block(122) to OrphanBroker + end + C ->>+ PU: send Block(123) to PreloadUnverified via channel + C ->>- BP: return + BP ->>- Sr: return + Note left of Sr: synchronizer received
Block(123) from remote peer + Note over Sr: try_process SyncMessageUnionReader::SendBlock + Sr ->>+ BP: BlockProcess::execute(Block(123)) + BP ->>+ C: asynchronous_process_block(Block(123)) + Note over C: non_contextual_verify(Block(123)) + Note over C: insert_block(Block(123)) + Note over C: OrphanBroker.process_lonly_block(Block(123)) + alt parent is BLOCK_STORED or parent is_pending_veryfing + Note over C: OrphanBroker.process_lonly_block(Block(123)) + Note over C: increase unverified_tip to Block(123) + C ->>+ PU: send Block(123) to PreloadUnverified via channel + else parent not found + Note over C: OrphanBroker.process_lonly_block(Block(123)) + Note over C: insert Block(123) to OrphanBroker + end + C ->>- BP: return + BP ->>- Sr: return + + loop load unverified + Note over PU: receive LonelyBlockHash + Note over PU: load UnverifiedBlock from db + PU ->>+ CV: send UnverifiedBlock to ConsumeUnverifiedBlocks + end + + loop Consume Unverified Blocks + Note over CV: start verify UnverifiedBlock if the channel is not empty + Note over CV: Verify Block in CKB VM + + alt Block is Valid + Note over CV: remove Block block_status and HeaderMap + else Block is Invalid + Note over CV: mark block as BLOCK_INVALID in block_status_map + Note over CV: Decrease Unverified TIP + end + + opt Execute Callback + Note over CV: execute callback to punish the malicious peer if block is invalid + Note over CV: callback: Box) + Send + Sync> + + end + end diff --git a/docs/ckb_sync.mermaid b/docs/ckb_sync.mermaid new file mode 100644 index 0000000000..c24a7f0640 --- /dev/null +++ b/docs/ckb_sync.mermaid @@ -0,0 +1,50 @@ +sequenceDiagram + autonumber + + participant S as Synchronizer + participant BP as BlockProcess + participant C as ChainService + + + box crate:ckb_sync + participant S + participant BP + end + + + box crate:ckb_chain + participant C + end + + Note left of S: synchronizer received
Block(122) from remote peer + + Note over S: try_process SyncMessageUnionReader::SendBlock + + + S->>+BP: BlockProcess::execute(Block(122)) + BP->>+C: process_block(Block(122)) + Note over BP: waiting ChainService to return
the result of process_block(Block(123)) + Note over C: insert_block(Block(122)) + C->>-BP: return result of process_block(Block(122)) + BP->>-S: return result of BlockProcess::execute(Block(122)) + + alt block is Valid + Note over S: going on + else block is Invalid + Note over S: punish the malicious peer + end + + Note left of S: synchronizer received
Block(123) from remote peer + Note over S: try_process SyncMessageUnionReader::SendBlock + S->>+BP: BlockProcess::execute(Block(123)) + BP->>+C: process_block(Block(123)) + Note over BP: waiting ChainService to return
the result of process_block(Block(123)) + Note over C: insert_block(Block(123)) + C->>-BP: return result of process_block(Block(123)) + BP->>-S: return result of BlockProcess::execute(Block(123)) + + alt block is Valid + Note over S: going on + else block is Invalid + Note over S: punish the malicious peer + end diff --git a/error/Cargo.toml b/error/Cargo.toml index aa3559a61b..133c880fa2 100644 --- a/error/Cargo.toml +++ b/error/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ckb-error" -version = "0.117.0-pre" +version = "0.118.0-pre" license = "MIT" authors = ["Nervos Core Dev "] edition = "2021" @@ -11,5 +11,5 @@ repository = "https://github.com/nervosnetwork/ckb" [dependencies] thiserror = "1.0.22" anyhow = "1.0.34" -ckb-occupied-capacity = { path = "../util/occupied-capacity", version = "= 0.117.0-pre" } +ckb-occupied-capacity = { path = "../util/occupied-capacity", version = "= 0.118.0-pre" } derive_more = { version = "0.99.0", default-features = false, features = ["display"] } diff --git a/error/src/lib.rs b/error/src/lib.rs index 20db9982dc..2c2dfa575e 100644 --- a/error/src/lib.rs +++ b/error/src/lib.rs @@ -92,3 +92,24 @@ impl fmt::Debug for AnyError { self.0.fmt(f) } } +/// Return whether the error's kind is `InternalErrorKind::Database` +/// +/// ### Panic +/// +/// Panic if the error kind is `InternalErrorKind::DataCorrupted`. +/// If the database is corrupted, panic is better than handle it silently. +pub fn is_internal_db_error(error: &Error) -> bool { + if error.kind() == ErrorKind::Internal { + let error_kind = error + .downcast_ref::() + .expect("error kind checked") + .kind(); + if error_kind == InternalErrorKind::DataCorrupted { + panic!("{}", error) + } else { + return error_kind == InternalErrorKind::Database + || error_kind == InternalErrorKind::System; + } + } + false +} diff --git a/freezer/Cargo.toml b/freezer/Cargo.toml index 880ca77d89..7f1c8ca2f6 100644 --- a/freezer/Cargo.toml +++ b/freezer/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ckb-freezer" -version = "0.117.0-pre" +version = "0.118.0-pre" license = "MIT" authors = ["Nervos Core Dev "] edition = "2021" @@ -10,11 +10,11 @@ repository = "https://github.com/nervosnetwork/ckb" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -ckb-types = { path = "../util/types", version = "= 0.117.0-pre" } -ckb-error = { path = "../error", version = "= 0.117.0-pre" } -ckb-logger = { path = "../util/logger", version = "= 0.117.0-pre" } -ckb-util = { path = "../util", version = "= 0.117.0-pre" } -ckb-metrics = { path = "../util/metrics", version = "= 0.117.0-pre" } +ckb-types = { path = "../util/types", version = "= 0.118.0-pre" } +ckb-error = { path = "../error", version = "= 0.118.0-pre" } +ckb-logger = { path = "../util/logger", version = "= 0.118.0-pre" } +ckb-util = { path = "../util", version = "= 0.118.0-pre" } +ckb-metrics = { path = "../util/metrics", version = "= 0.118.0-pre" } fs2 = "0.4.3" fail = "0.4" snap = "1" diff --git a/miner/Cargo.toml b/miner/Cargo.toml index 97f613ba23..4270197a0a 100644 --- a/miner/Cargo.toml +++ b/miner/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ckb-miner" -version = "0.117.0-pre" +version = "0.118.0-pre" license = "MIT" authors = ["Nervos Core Dev "] edition = "2021" @@ -9,23 +9,23 @@ homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" [dependencies] -ckb-logger = { path = "../util/logger", version = "= 0.117.0-pre" } -ckb-app-config = { path = "../util/app-config", version = "= 0.117.0-pre" } -ckb-types = { path = "../util/types", version = "= 0.117.0-pre" } -ckb-channel = { path = "../util/channel", version = "= 0.117.0-pre" } -ckb-hash = { path = "../util/hash", version = "= 0.117.0-pre" } -ckb-pow = { path = "../pow", version = "= 0.117.0-pre" } +ckb-logger = { path = "../util/logger", version = "= 0.118.0-pre" } +ckb-app-config = { path = "../util/app-config", version = "= 0.118.0-pre" } +ckb-types = { path = "../util/types", version = "= 0.118.0-pre" } +ckb-channel = { path = "../util/channel", version = "= 0.118.0-pre" } +ckb-hash = { path = "../util/hash", version = "= 0.118.0-pre" } +ckb-pow = { path = "../pow", version = "= 0.118.0-pre" } rand = "0.8" rand_distr = "0.4" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" -ckb-jsonrpc-types = { path = "../util/jsonrpc-types", version = "= 0.117.0-pre" } +ckb-jsonrpc-types = { path = "../util/jsonrpc-types", version = "= 0.118.0-pre" } hyper = { version = "0.14", features = ["client", "http2", "http1", "server"] } hyper-tls = "0.5" futures = "0.3" lru = "0.7.1" -ckb-stop-handler = { path = "../util/stop-handler", version = "= 0.117.0-pre" } -ckb-async-runtime = { path = "../util/runtime", version = "= 0.117.0-pre" } +ckb-stop-handler = { path = "../util/stop-handler", version = "= 0.118.0-pre" } +ckb-async-runtime = { path = "../util/runtime", version = "= 0.118.0-pre" } indicatif = "0.16" console = ">=0.9.1, <1.0.0" eaglesong = "0.1" diff --git a/network/Cargo.toml b/network/Cargo.toml index e840fe4170..28d22658de 100644 --- a/network/Cargo.toml +++ b/network/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ckb-network" -version = "0.117.0-pre" +version = "0.118.0-pre" license = "MIT" authors = ["Nervos Core Dev "] edition = "2021" @@ -12,28 +12,28 @@ exclude = ["fuzz"] [dependencies] rand = "0.8" serde = { version = "1.0", features = ["derive"] } -ckb-util = { path = "../util", version = "= 0.117.0-pre" } -ckb-stop-handler = { path = "../util/stop-handler", version = "= 0.117.0-pre" } -ckb-logger = { path = "../util/logger", version = "= 0.117.0-pre" } -ckb-app-config = { path = "../util/app-config", version = "= 0.117.0-pre" } -ckb-metrics = { path = "../util/metrics", version = "= 0.117.0-pre" } +ckb-util = { path = "../util", version = "= 0.118.0-pre" } +ckb-stop-handler = { path = "../util/stop-handler", version = "= 0.118.0-pre" } +ckb-logger = { path = "../util/logger", version = "= 0.118.0-pre" } +ckb-app-config = { path = "../util/app-config", version = "= 0.118.0-pre" } +ckb-metrics = { path = "../util/metrics", version = "= 0.118.0-pre" } tokio = { version = "1", features = ["sync", "macros"] } tokio-util = { version = "0.7", features = ["codec"] } futures = "0.3" -ckb-systemtime = { path = "../util/systemtime", version = "= 0.117.0-pre" } +ckb-systemtime = { path = "../util/systemtime", version = "= 0.118.0-pre" } lazy_static = { version = "1.3.0", optional = true } bs58 = { version = "0.4.0", optional = true } sentry = { version = "0.26.0", optional = true } faster-hex = { version = "0.6", optional = true } -ckb-hash = { path = "../util/hash", version = "= 0.117.0-pre" } +ckb-hash = { path = "../util/hash", version = "= 0.118.0-pre" } secp256k1 = { version = "0.29", features = ["recovery"], optional = true } trust-dns-resolver = { version = "0.20", optional = true } snap = "1" -ckb-types = { path = "../util/types", version = "= 0.117.0-pre" } +ckb-types = { path = "../util/types", version = "= 0.118.0-pre" } ipnetwork = "0.18" serde_json = "1.0" bloom-filters = "0.1" -ckb-spawn = { path = "../util/spawn", version = "= 0.117.0-pre" } +ckb-spawn = { path = "../util/spawn", version = "= 0.118.0-pre" } socket2 = "0.5" bitflags = "1.0" @@ -60,7 +60,7 @@ criterion = "0.5" proptest = "1.0" num_cpus = "1.10" once_cell = "1.8.0" -ckb-systemtime = { path = "../util/systemtime", version = "= 0.117.0-pre", features = [ +ckb-systemtime = { path = "../util/systemtime", version = "= 0.118.0-pre", features = [ "enable_faketime", ] } diff --git a/network/fuzz/Cargo.toml b/network/fuzz/Cargo.toml index f29a3f976a..d79f09d0f5 100644 --- a/network/fuzz/Cargo.toml +++ b/network/fuzz/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ckb-network-fuzz" -version = "0.117.0-pre" +version = "0.118.0-pre" publish = false edition = "2021" license = "MIT" diff --git a/notify/Cargo.toml b/notify/Cargo.toml index 0ff95b2d9c..cb4395c4f0 100644 --- a/notify/Cargo.toml +++ b/notify/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ckb-notify" -version = "0.117.0-pre" +version = "0.118.0-pre" authors = ["Nervos Core Dev "] edition = "2021" license = "MIT" @@ -9,11 +9,11 @@ homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" [dependencies] -ckb-logger = { path = "../util/logger", version = "= 0.117.0-pre" } -ckb-app-config = { path = "../util/app-config", version = "= 0.117.0-pre" } -ckb-types = { path = "../util/types", version = "= 0.117.0-pre" } -ckb-stop-handler = { path = "../util/stop-handler", version = "= 0.117.0-pre" } -ckb-async-runtime = { path = "../util/runtime", version = "= 0.117.0-pre" } +ckb-logger = { path = "../util/logger", version = "= 0.118.0-pre" } +ckb-app-config = { path = "../util/app-config", version = "= 0.118.0-pre" } +ckb-types = { path = "../util/types", version = "= 0.118.0-pre" } +ckb-stop-handler = { path = "../util/stop-handler", version = "= 0.118.0-pre" } +ckb-async-runtime = { path = "../util/runtime", version = "= 0.118.0-pre" } tokio = { version = "1", features = ["sync"] } [dev-dependencies] diff --git a/pow/Cargo.toml b/pow/Cargo.toml index ff1901b8d2..f767a86a41 100644 --- a/pow/Cargo.toml +++ b/pow/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ckb-pow" -version = "0.117.0-pre" +version = "0.118.0-pre" authors = ["Nervos Core Dev "] edition = "2021" license = "MIT" @@ -10,8 +10,8 @@ repository = "https://github.com/nervosnetwork/ckb" [dependencies] byteorder = "1.3.1" -ckb-types = { path = "../util/types", version = "= 0.117.0-pre" } -ckb-hash = { path = "../util/hash", version = "= 0.117.0-pre"} +ckb-types = { path = "../util/types", version = "= 0.118.0-pre" } +ckb-hash = { path = "../util/hash", version = "= 0.118.0-pre"} serde = { version = "1.0", features = ["derive"] } eaglesong = "0.1" log = "0.4" diff --git a/resource/Cargo.toml b/resource/Cargo.toml index 4e0cf0e2ca..25907be6f2 100644 --- a/resource/Cargo.toml +++ b/resource/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ckb-resource" -version = "0.117.0-pre" +version = "0.118.0-pre" license = "MIT" authors = ["Nervos Core Dev "] edition = "2021" @@ -13,13 +13,13 @@ repository = "https://github.com/nervosnetwork/ckb" phf = "0.8.0" includedir = "0.6.0" serde = { version = "1.0", features = ["derive"] } -ckb-types = { path = "../util/types", version = "= 0.117.0-pre" } +ckb-types = { path = "../util/types", version = "= 0.118.0-pre" } ckb-system-scripts = { version = "= 0.5.4" } [build-dependencies] includedir_codegen = "0.6.0" walkdir = "2.1.4" -ckb-types = { path = "../util/types", version = "= 0.117.0-pre" } +ckb-types = { path = "../util/types", version = "= 0.118.0-pre" } ckb-system-scripts = { version = "= 0.5.4" } [dev-dependencies] diff --git a/resource/ckb.toml b/resource/ckb.toml index 20d96b9ac9..e47a37cff4 100644 --- a/resource/ckb.toml +++ b/resource/ckb.toml @@ -131,6 +131,11 @@ enable_deprecated_rpc = false # {{ # integration => enable_deprecated_rpc = true # }} +# By default, there is no limitation on the size of batch request size +# a huge batch request may cost a lot of memory or makes the RPC server slow, +# to avoid this, you may want to add a limit for the batch request size. +# rpc_batch_limit = 2000 + [tx_pool] max_tx_pool_size = 180_000_000 # 180mb min_fee_rate = 1_000 # Here fee_rate are calculated directly using size in units of shannons/KB diff --git a/rpc/Cargo.toml b/rpc/Cargo.toml index 461dc7a4fb..7c595ffa1b 100644 --- a/rpc/Cargo.toml +++ b/rpc/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ckb-rpc" -version = "0.117.0-pre" +version = "0.118.0-pre" license = "MIT" authors = ["Nervos Core Dev "] edition = "2021" @@ -9,38 +9,38 @@ homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" [dependencies] -ckb-chain-spec = { path = "../spec", version = "= 0.117.0-pre" } -ckb-types = { path = "../util/types", version = "= 0.117.0-pre" } -ckb-network = { path = "../network", version = "= 0.117.0-pre" } -ckb-notify = { path = "../notify", version = "= 0.117.0-pre" } -ckb-shared = { path = "../shared", version = "= 0.117.0-pre" } -ckb-store = { path = "../store", version = "= 0.117.0-pre" } -ckb-sync = { path = "../sync", version = "= 0.117.0-pre" } -ckb-chain = { path = "../chain", version = "= 0.117.0-pre" } -ckb-logger = { path = "../util/logger", version = "= 0.117.0-pre" } -ckb-logger-service = { path = "../util/logger-service", version = "= 0.117.0-pre" } -ckb-network-alert = { path = "../util/network-alert", version = "= 0.117.0-pre" } -ckb-app-config = { path = "../util/app-config", version = "= 0.117.0-pre" } -ckb-constant = { path = "../util/constant", version = "= 0.117.0-pre" } +ckb-chain-spec = { path = "../spec", version = "= 0.118.0-pre" } +ckb-types = { path = "../util/types", version = "= 0.118.0-pre" } +ckb-network = { path = "../network", version = "= 0.118.0-pre" } +ckb-notify = { path = "../notify", version = "= 0.118.0-pre" } +ckb-shared = { path = "../shared", version = "= 0.118.0-pre" } +ckb-store = { path = "../store", version = "= 0.118.0-pre" } +ckb-sync = { path = "../sync", version = "= 0.118.0-pre" } +ckb-chain = { path = "../chain", version = "= 0.118.0-pre" } +ckb-logger = { path = "../util/logger", version = "= 0.118.0-pre" } +ckb-logger-service = { path = "../util/logger-service", version = "= 0.118.0-pre" } +ckb-network-alert = { path = "../util/network-alert", version = "= 0.118.0-pre" } +ckb-app-config = { path = "../util/app-config", version = "= 0.118.0-pre" } +ckb-constant = { path = "../util/constant", version = "= 0.118.0-pre" } jsonrpc-core = "18.0" serde_json = "1.0" jsonrpc-utils = { version = "0.2.6", features = ["server", "macros", "axum"] } -ckb-jsonrpc-types = { path = "../util/jsonrpc-types", version = "= 0.117.0-pre" } -ckb-verification = { path = "../verification", version = "= 0.117.0-pre" } -ckb-verification-traits = { path = "../verification/traits", version = "= 0.117.0-pre" } -ckb-traits = { path = "../traits", version = "= 0.117.0-pre" } -ckb-util = { path = "../util", version = "= 0.117.0-pre" } -ckb-systemtime = { path = "../util/systemtime", version = "= 0.117.0-pre" } -ckb-dao = { path = "../util/dao", version = "= 0.117.0-pre" } -ckb-error = { path = "../error", version = "= 0.117.0-pre" } -ckb-reward-calculator = { path = "../util/reward-calculator", version = "= 0.117.0-pre" } -ckb-tx-pool = { path = "../tx-pool", version = "= 0.117.0-pre" } -ckb-memory-tracker = { path = "../util/memory-tracker", version = "= 0.117.0-pre" } -ckb-pow = { path = "../pow", version = "= 0.117.0-pre" } -ckb-indexer = { path = "../util/indexer", version = "= 0.117.0-pre" } -ckb-indexer-sync = { path = "../util/indexer-sync", version = "= 0.117.0-pre" } -ckb-rich-indexer = { path = "../util/rich-indexer", version = "= 0.117.0-pre" } -ckb-stop-handler = { path = "../util/stop-handler", version = "= 0.117.0-pre" } +ckb-jsonrpc-types = { path = "../util/jsonrpc-types", version = "= 0.118.0-pre" } +ckb-verification = { path = "../verification", version = "= 0.118.0-pre" } +ckb-verification-traits = { path = "../verification/traits", version = "= 0.118.0-pre" } +ckb-traits = { path = "../traits", version = "= 0.118.0-pre" } +ckb-util = { path = "../util", version = "= 0.118.0-pre" } +ckb-systemtime = { path = "../util/systemtime", version = "= 0.118.0-pre" } +ckb-dao = { path = "../util/dao", version = "= 0.118.0-pre" } +ckb-error = { path = "../error", version = "= 0.118.0-pre" } +ckb-reward-calculator = { path = "../util/reward-calculator", version = "= 0.118.0-pre" } +ckb-tx-pool = { path = "../tx-pool", version = "= 0.118.0-pre" } +ckb-memory-tracker = { path = "../util/memory-tracker", version = "= 0.118.0-pre" } +ckb-pow = { path = "../pow", version = "= 0.118.0-pre" } +ckb-indexer = { path = "../util/indexer", version = "= 0.118.0-pre" } +ckb-indexer-sync = { path = "../util/indexer-sync", version = "= 0.118.0-pre" } +ckb-rich-indexer = { path = "../util/rich-indexer", version = "= 0.118.0-pre" } +ckb-stop-handler = { path = "../util/stop-handler", version = "= 0.118.0-pre" } itertools.workspace = true tokio = "1" async-trait = "0.1" @@ -49,15 +49,14 @@ tokio-util = { version = "0.7.3", features = ["codec"] } futures-util = { version = "0.3.21" } tower-http = { version = "0.3.5", features = ["timeout", "cors"] } async-stream = "0.3.3" -ckb-async-runtime = { path = "../util/runtime", version = "= 0.117.0-pre" } +ckb-async-runtime = { path = "../util/runtime", version = "= 0.118.0-pre" } # issue tracking: https://github.com/GREsau/schemars/pull/251 schemars = { version = "0.8.19", package = "ckb_schemars" } - [dev-dependencies] reqwest = { version = "=0.11.20", features = ["blocking", "json"] } serde = { version = "1.0", features = ["derive"] } -ckb-shared = { path = "../shared", version = "= 0.117.0-pre" } -ckb-test-chain-utils = { path = "../util/test-chain-utils", version = "= 0.117.0-pre" } +ckb-shared = { path = "../shared", version = "= 0.118.0-pre" } +ckb-test-chain-utils = { path = "../util/test-chain-utils", version = "= 0.118.0-pre" } tempfile.workspace = true pretty_assertions = "1.3.0" -ckb-dao-utils = { path = "../util/dao/utils", version = "= 0.117.0-pre" } +ckb-dao-utils = { path = "../util/dao/utils", version = "= 0.118.0-pre" } diff --git a/rpc/README.md b/rpc/README.md index f2a27d8172..e85719ae50 100644 --- a/rpc/README.md +++ b/rpc/README.md @@ -85,6 +85,7 @@ The crate `ckb-rpc`'s minimum supported rustc version is 1.71.1. * [Method `notify_transaction`](#integration_test-notify_transaction) * [Method `generate_block_with_template`](#integration_test-generate_block_with_template) * [Method `calculate_dao_field`](#integration_test-calculate_dao_field) + * [Method `send_test_transaction`](#integration_test-send_test_transaction) * [Module Miner](#module-miner) [👉 OpenRPC spec](http://playground.open-rpc.org/?uiSchema[appBar][ui:title]=CKB-Miner&uiSchema[appBar][ui:splitView]=false&uiSchema[appBar][ui:examplesDropdown]=false&uiSchema[appBar][ui:logoUrl]=https://raw.githubusercontent.com/nervosnetwork/ckb-rpc-resources/develop/ckb-logo.jpg&schemaUrl=https://raw.githubusercontent.com/nervosnetwork/ckb-rpc-resources/develop/json/miner_rpc_doc.json) * [Method `get_block_template`](#miner-get_block_template) @@ -3549,6 +3550,95 @@ Response } ``` + +#### Method `send_test_transaction` +* `send_test_transaction(tx, outputs_validator)` + * `tx`: [`Transaction`](#type-transaction) + * `outputs_validator`: [`OutputsValidator`](#type-outputsvalidator) `|` `null` +* result: [`H256`](#type-h256) + +Submits a new test local transaction into the transaction pool, only for testing. +If the transaction is already in the pool, rebroadcast it to peers. + +###### Params + +* `transaction` - The transaction. +* `outputs_validator` - Validates the transaction outputs before entering the tx-pool. (**Optional**, default is "passthrough"). + +###### Errors + +* [`PoolRejectedTransactionByOutputsValidator (-1102)`](../enum.RPCError.html#variant.PoolRejectedTransactionByOutputsValidator) - The transaction is rejected by the validator specified by `outputs_validator`. If you really want to send transactions with advanced scripts, please set `outputs_validator` to "passthrough". +* [`PoolRejectedTransactionByMinFeeRate (-1104)`](../enum.RPCError.html#variant.PoolRejectedTransactionByMinFeeRate) - The transaction fee rate must be greater than or equal to the config option `tx_pool.min_fee_rate`. +* [`PoolRejectedTransactionByMaxAncestorsCountLimit (-1105)`](../enum.RPCError.html#variant.PoolRejectedTransactionByMaxAncestorsCountLimit) - The ancestors count must be greater than or equal to the config option `tx_pool.max_ancestors_count`. +* [`PoolIsFull (-1106)`](../enum.RPCError.html#variant.PoolIsFull) - Pool is full. +* [`PoolRejectedDuplicatedTransaction (-1107)`](../enum.RPCError.html#variant.PoolRejectedDuplicatedTransaction) - The transaction is already in the pool. +* [`TransactionFailedToResolve (-301)`](../enum.RPCError.html#variant.TransactionFailedToResolve) - Failed to resolve the referenced cells and headers used in the transaction, as inputs or dependencies. +* [`TransactionFailedToVerify (-302)`](../enum.RPCError.html#variant.TransactionFailedToVerify) - Failed to verify the transaction. + +###### Examples + +Request + +```json +{ + "id": 42, + "jsonrpc": "2.0", + "method": "send_test_transaction", + "params": [ + { + "cell_deps": [ + { + "dep_type": "code", + "out_point": { + "index": "0x0", + "tx_hash": "0xa4037a893eb48e18ed4ef61034ce26eba9c585f15c9cee102ae58505565eccc3" + } + } + ], + "header_deps": [ + "0x7978ec7ce5b507cfb52e149e36b1a23f6062ed150503c85bbf825da3599095ed" + ], + "inputs": [ + { + "previous_output": { + "index": "0x0", + "tx_hash": "0x365698b50ca0da75dca2c87f9e7b563811d3b5813736b8cc62cc3b106faceb17" + }, + "since": "0x0" + } + ], + "outputs": [ + { + "capacity": "0x2540be400", + "lock": { + "code_hash": "0x28e83a1277d48add8e72fadaa9248559e1b632bab2bd60b27955ebc4c03800a5", + "hash_type": "data", + "args": "0x" + }, + "type": null + } + ], + "outputs_data": [ + "0x" + ], + "version": "0x0", + "witnesses": [] + }, + "passthrough" + ] +} +``` + +Response + +```json +{ + "id": 42, + "jsonrpc": "2.0", + "result": "0xa0ef4eb5f4ceeb08a4c8524d84c5da95dce2f608e0ca2ec8091191b0f330c6e3" +} +``` + ### Module `Miner` - [👉 OpenRPC spec](http://playground.open-rpc.org/?uiSchema[appBar][ui:title]=CKB-Miner&uiSchema[appBar][ui:splitView]=false&uiSchema[appBar][ui:examplesDropdown]=false&uiSchema[appBar][ui:logoUrl]=https://raw.githubusercontent.com/nervosnetwork/ckb-rpc-resources/develop/ckb-logo.jpg&schemaUrl=https://raw.githubusercontent.com/nervosnetwork/ckb-rpc-resources/develop/json/miner_rpc_doc.json) @@ -4150,7 +4240,10 @@ Response "min_chain_work_reached": true, "normal_time": "0x4e2", "orphan_blocks_count": "0x0", - "orphan_blocks_size": "0x0" + "tip_hash": "0xa5f5c85987a15de25661e5a214f2c1449cd803f071acc7999820f25246471f40", + "tip_number": "0x400", + "unverified_tip_hash": "0xa5f5c85987a15de25661e5a214f2c1449cd803f071acc7999820f25246471f40", + "unverified_tip_number": "0x400" } } ``` @@ -4616,7 +4709,8 @@ Response "tip_number": "0x400", "total_tx_cycles": "0x219", "total_tx_size": "0x112", - "tx_size_limit": "0x7d000" + "tx_size_limit": "0x7d000", + "verify_queue_size": "0x0" } } ``` @@ -6873,7 +6967,13 @@ The overall chain synchronization state of this local node. If this number is too high, it indicates that block download has stuck at some block. -* `orphan_blocks_size`: [`Uint64`](#type-uint64) - The size of all download orphan blocks +* `tip_hash`: [`H256`](#type-h256) - The block hash of current tip block + +* `tip_number`: [`Uint64`](#type-uint64) - The block number of current tip block + +* `unverified_tip_hash`: [`H256`](#type-h256) - The block hash of current unverified tip block + +* `unverified_tip_number`: [`Uint64`](#type-uint64) - The block number of current unverified tip block ### Type `Timestamp` @@ -7151,6 +7251,8 @@ Transaction pool information. Transactions with a large size close to the block size limit may not be packaged, because the block header and cellbase are occupied, so the tx-pool is limited to accepting transaction up to tx_size_limit. +* `verify_queue_size`: [`Uint64`](#type-uint64) - verify_queue size + ### Type `TxStatus` Transaction status and the block hash if it is committed. diff --git a/rpc/src/module/miner.rs b/rpc/src/module/miner.rs index 814f12f91d..3338c6d5aa 100644 --- a/rpc/src/module/miner.rs +++ b/rpc/src/module/miner.rs @@ -1,10 +1,11 @@ use crate::error::RPCError; use async_trait::async_trait; -use ckb_chain::chain::ChainController; +use ckb_chain::ChainController; use ckb_jsonrpc_types::{Block, BlockTemplate, Uint64, Version}; use ckb_logger::{debug, error, info, warn}; use ckb_network::{NetworkController, PeerIndex, SupportProtocols, TargetSession}; use ckb_shared::{shared::Shared, Snapshot}; +use ckb_store::ChainStore; use ckb_systemtime::unix_time_as_millis; use ckb_types::{core, packed, prelude::*, H256}; use ckb_verification::HeaderVerifier; @@ -274,11 +275,26 @@ impl MinerRpc for MinerRpcImpl { HeaderVerifier::new(snapshot, consensus) .verify(&header) .map_err(|err| handle_submit_error(&work_id, &err))?; + if self + .shared + .snapshot() + .get_block_header(&block.parent_hash()) + .is_none() + { + let err = format!( + "Block parent {} of {}-{} not found", + block.parent_hash(), + block.number(), + block.hash() + ); + + return Err(handle_submit_error(&work_id, &err)); + } // Verify and insert block let is_new = self .chain - .process_block(Arc::clone(&block)) + .blocking_process_block(Arc::clone(&block)) .map_err(|err| handle_submit_error(&work_id, &err))?; info!( "end to submit block, work_id = {}, is_new = {}, block = #{}({})", diff --git a/rpc/src/module/net.rs b/rpc/src/module/net.rs index 0513b126ab..de75699e68 100644 --- a/rpc/src/module/net.rs +++ b/rpc/src/module/net.rs @@ -1,5 +1,6 @@ use crate::error::RPCError; use async_trait::async_trait; +use ckb_chain::ChainController; use ckb_jsonrpc_types::{ BannedAddr, LocalNode, LocalNodeProtocol, NodeAddress, PeerSyncState, RemoteNode, RemoteNodeProtocol, SyncState, Timestamp, @@ -7,7 +8,7 @@ use ckb_jsonrpc_types::{ use ckb_network::{extract_peer_id, multiaddr::Multiaddr, NetworkController}; use ckb_sync::SyncShared; use ckb_systemtime::unix_time_as_millis; -use ckb_types::prelude::Pack; +use ckb_types::prelude::{Pack, Unpack}; use jsonrpc_core::Result; use jsonrpc_utils::rpc; use std::sync::Arc; @@ -374,7 +375,10 @@ pub trait NetRpc { /// "min_chain_work_reached": true, /// "normal_time": "0x4e2", /// "orphan_blocks_count": "0x0", - /// "orphan_blocks_size": "0x0" + /// "tip_hash": "0xa5f5c85987a15de25661e5a214f2c1449cd803f071acc7999820f25246471f40", + /// "tip_number": "0x400", + /// "unverified_tip_hash": "0xa5f5c85987a15de25661e5a214f2c1449cd803f071acc7999820f25246471f40", + /// "unverified_tip_number": "0x400" /// } /// } /// ``` @@ -543,6 +547,7 @@ pub trait NetRpc { pub(crate) struct NetRpcImpl { pub network_controller: NetworkController, pub sync_shared: Arc, + pub chain_controller: Arc, } #[async_trait] @@ -721,7 +726,8 @@ impl NetRpc for NetRpcImpl { fn sync_state(&self) -> Result { let chain = self.sync_shared.active_chain(); - let state = chain.shared().state(); + let shared = chain.shared(); + let state = chain.state(); let (fast_time, normal_time, low_time) = state.read_inflight_blocks().division_point(); let best_known = state.shared_best_header(); let min_chain_work = { @@ -730,11 +736,14 @@ impl NetRpc for NetRpcImpl { .copy_from_slice(&self.sync_shared.state().min_chain_work().to_le_bytes()[..16]); u128::from_le_bytes(min_chain_work_500k_u128) }; + let unverified_tip = shared.get_unverified_tip(); let sync_state = SyncState { ibd: chain.is_initial_block_download(), - assume_valid_target_reached: state.assume_valid_target().is_none(), - assume_valid_target: state + assume_valid_target_reached: shared.assume_valid_target().is_none(), + assume_valid_target: shared .assume_valid_target_specified() + .as_ref() + .clone() .unwrap_or_default() .pack() .into(), @@ -742,10 +751,13 @@ impl NetRpc for NetRpcImpl { min_chain_work_reached: state.min_chain_work_ready(), best_known_block_number: best_known.number().into(), best_known_block_timestamp: best_known.timestamp().into(), - orphan_blocks_count: (state.orphan_pool().len() as u64).into(), - orphan_blocks_size: (state.orphan_pool().total_size() as u64).into(), + orphan_blocks_count: (self.chain_controller.orphan_blocks_len() as u64).into(), inflight_blocks_count: (state.read_inflight_blocks().total_inflight_count() as u64) .into(), + unverified_tip_number: unverified_tip.number().into(), + unverified_tip_hash: unverified_tip.hash().unpack(), + tip_number: chain.tip_number().into(), + tip_hash: chain.tip_hash().unpack(), fast_time: fast_time.into(), normal_time: normal_time.into(), low_time: low_time.into(), diff --git a/rpc/src/module/pool.rs b/rpc/src/module/pool.rs index aab1071e0d..9fe30b357e 100644 --- a/rpc/src/module/pool.rs +++ b/rpc/src/module/pool.rs @@ -287,7 +287,8 @@ pub trait PoolRpc { /// "tip_number": "0x400", /// "total_tx_cycles": "0x219", /// "total_tx_size": "0x112", - /// "tx_size_limit": "0x7d000" + /// "tx_size_limit": "0x7d000", + /// "verify_queue_size": "0x0" /// } /// } /// ``` diff --git a/rpc/src/module/test.rs b/rpc/src/module/test.rs index b5db29e0d7..1974b39404 100644 --- a/rpc/src/module/test.rs +++ b/rpc/src/module/test.rs @@ -1,8 +1,10 @@ use crate::error::RPCError; use async_trait::async_trait; -use ckb_chain::chain::ChainController; +use ckb_chain::ChainController; use ckb_dao::DaoCalculator; -use ckb_jsonrpc_types::{Block, BlockTemplate, Byte32, EpochNumberWithFraction, Transaction}; +use ckb_jsonrpc_types::{ + Block, BlockTemplate, Byte32, EpochNumberWithFraction, OutputsValidator, Transaction, +}; use ckb_logger::error; use ckb_network::{NetworkController, SupportProtocols}; use ckb_shared::{shared::Shared, Snapshot}; @@ -25,6 +27,8 @@ use jsonrpc_utils::rpc; use std::collections::HashSet; use std::sync::Arc; +use super::pool::WellKnownScriptsOnlyValidator; + /// RPC for Integration Test. #[rpc(openrpc)] #[async_trait] @@ -498,6 +502,95 @@ pub trait IntegrationTestRpc { /// ``` #[rpc(name = "calculate_dao_field")] fn calculate_dao_field(&self, block_template: BlockTemplate) -> Result; + + /// Submits a new test local transaction into the transaction pool, only for testing. + /// If the transaction is already in the pool, rebroadcast it to peers. + /// + /// ## Params + /// + /// * `transaction` - The transaction. + /// * `outputs_validator` - Validates the transaction outputs before entering the tx-pool. (**Optional**, default is "passthrough"). + /// + /// ## Errors + /// + /// * [`PoolRejectedTransactionByOutputsValidator (-1102)`](../enum.RPCError.html#variant.PoolRejectedTransactionByOutputsValidator) - The transaction is rejected by the validator specified by `outputs_validator`. If you really want to send transactions with advanced scripts, please set `outputs_validator` to "passthrough". + /// * [`PoolRejectedTransactionByMinFeeRate (-1104)`](../enum.RPCError.html#variant.PoolRejectedTransactionByMinFeeRate) - The transaction fee rate must be greater than or equal to the config option `tx_pool.min_fee_rate`. + /// * [`PoolRejectedTransactionByMaxAncestorsCountLimit (-1105)`](../enum.RPCError.html#variant.PoolRejectedTransactionByMaxAncestorsCountLimit) - The ancestors count must be greater than or equal to the config option `tx_pool.max_ancestors_count`. + /// * [`PoolIsFull (-1106)`](../enum.RPCError.html#variant.PoolIsFull) - Pool is full. + /// * [`PoolRejectedDuplicatedTransaction (-1107)`](../enum.RPCError.html#variant.PoolRejectedDuplicatedTransaction) - The transaction is already in the pool. + /// * [`TransactionFailedToResolve (-301)`](../enum.RPCError.html#variant.TransactionFailedToResolve) - Failed to resolve the referenced cells and headers used in the transaction, as inputs or dependencies. + /// * [`TransactionFailedToVerify (-302)`](../enum.RPCError.html#variant.TransactionFailedToVerify) - Failed to verify the transaction. + /// + /// ## Examples + /// + /// Request + /// + /// ```json + /// { + /// "id": 42, + /// "jsonrpc": "2.0", + /// "method": "send_test_transaction", + /// "params": [ + /// { + /// "cell_deps": [ + /// { + /// "dep_type": "code", + /// "out_point": { + /// "index": "0x0", + /// "tx_hash": "0xa4037a893eb48e18ed4ef61034ce26eba9c585f15c9cee102ae58505565eccc3" + /// } + /// } + /// ], + /// "header_deps": [ + /// "0x7978ec7ce5b507cfb52e149e36b1a23f6062ed150503c85bbf825da3599095ed" + /// ], + /// "inputs": [ + /// { + /// "previous_output": { + /// "index": "0x0", + /// "tx_hash": "0x365698b50ca0da75dca2c87f9e7b563811d3b5813736b8cc62cc3b106faceb17" + /// }, + /// "since": "0x0" + /// } + /// ], + /// "outputs": [ + /// { + /// "capacity": "0x2540be400", + /// "lock": { + /// "code_hash": "0x28e83a1277d48add8e72fadaa9248559e1b632bab2bd60b27955ebc4c03800a5", + /// "hash_type": "data", + /// "args": "0x" + /// }, + /// "type": null + /// } + /// ], + /// "outputs_data": [ + /// "0x" + /// ], + /// "version": "0x0", + /// "witnesses": [] + /// }, + /// "passthrough" + /// ] + /// } + /// ``` + /// + /// Response + /// + /// ```json + /// { + /// "id": 42, + /// "jsonrpc": "2.0", + /// "result": "0xa0ef4eb5f4ceeb08a4c8524d84c5da95dce2f608e0ca2ec8091191b0f330c6e3" + /// } + /// ``` + /// + #[rpc(name = "send_test_transaction")] + fn send_test_transaction( + &self, + tx: Transaction, + outputs_validator: Option, + ) -> Result; } #[derive(Clone)] @@ -505,6 +598,8 @@ pub(crate) struct IntegrationTestRpcImpl { pub network_controller: NetworkController, pub shared: Shared, pub chain: ChainController, + pub well_known_lock_scripts: Vec, + pub well_known_type_scripts: Vec, } #[async_trait] @@ -514,8 +609,7 @@ impl IntegrationTestRpc for IntegrationTestRpcImpl { let block: Arc = Arc::new(block.into_view()); let ret = self .chain - .internal_process_block(Arc::clone(&block), Switch::DISABLE_ALL); - + .blocking_process_block_with_switch(Arc::clone(&block), Switch::DISABLE_ALL); if broadcast { let content = packed::CompactBlock::build_from_block(&block, &HashSet::new()); let message = packed::RelayMessage::new_builder().set(content).build(); @@ -667,6 +761,49 @@ impl IntegrationTestRpc for IntegrationTestRpcImpl { .into(), ) } + + fn send_test_transaction( + &self, + tx: Transaction, + outputs_validator: Option, + ) -> Result { + let tx: packed::Transaction = tx.into(); + let tx: core::TransactionView = tx.into_view(); + + if let Err(e) = match outputs_validator { + None | Some(OutputsValidator::Passthrough) => Ok(()), + Some(OutputsValidator::WellKnownScriptsOnly) => WellKnownScriptsOnlyValidator::new( + self.shared.consensus(), + &self.well_known_lock_scripts, + &self.well_known_type_scripts, + ) + .validate(&tx), + } { + return Err(RPCError::custom_with_data( + RPCError::PoolRejectedTransactionByOutputsValidator, + format!( + "The transaction is rejected by OutputsValidator set in params[1]: {}. \ + Please check the related information in https://github.com/nervosnetwork/ckb/wiki/Transaction-%C2%BB-Default-Outputs-Validator", + outputs_validator.unwrap_or(OutputsValidator::WellKnownScriptsOnly).json_display() + ), + e, + )); + } + + let tx_pool = self.shared.tx_pool_controller(); + let submit_tx = tx_pool.submit_local_test_tx(tx.clone()); + + if let Err(e) = submit_tx { + error!("Send submit_tx request error {}", e); + return Err(RPCError::ckb_internal_error(e)); + } + + let tx_hash = tx.hash(); + match submit_tx.unwrap() { + Ok(_) => Ok(tx_hash.unpack()), + Err(reject) => Err(RPCError::from_submit_transaction_reject(&reject)), + } + } } impl IntegrationTestRpcImpl { @@ -677,7 +814,7 @@ impl IntegrationTestRpcImpl { // insert block to chain self.chain - .process_block(Arc::clone(&block_view)) + .blocking_process_block(Arc::clone(&block_view)) .map_err(|err| RPCError::custom(RPCError::CKBInternalError, err.to_string()))?; // announce new block diff --git a/rpc/src/server.rs b/rpc/src/server.rs index 13fd55e37c..4835cd6765 100644 --- a/rpc/src/server.rs +++ b/rpc/src/server.rs @@ -7,21 +7,32 @@ use ckb_async_runtime::Handle; use ckb_error::AnyError; use ckb_logger::info; -use axum::http::StatusCode; +use axum::{body::Bytes, http::StatusCode, response::Response, Json}; + +use jsonrpc_core::{MetaIoHandler, Metadata, Request}; + use ckb_stop_handler::{new_tokio_exit_rx, CancellationToken}; +use futures_util::future; +use futures_util::future::Either::{Left, Right}; +use jsonrpc_core::types::error::ErrorCode; +use jsonrpc_core::types::Response as RpcResponse; +use jsonrpc_core::Error; + use futures_util::{SinkExt, TryStreamExt}; -use jsonrpc_core::MetaIoHandler; -use jsonrpc_utils::axum_utils::{handle_jsonrpc, handle_jsonrpc_ws}; +use jsonrpc_utils::axum_utils::handle_jsonrpc_ws; use jsonrpc_utils::pub_sub::Session; use jsonrpc_utils::stream::{serve_stream_sink, StreamMsg, StreamServerConfig}; use std::net::{SocketAddr, ToSocketAddrs}; use std::sync::Arc; +use std::sync::OnceLock; use std::time::Duration; use tokio::net::TcpListener; use tokio_util::codec::{FramedRead, FramedWrite, LinesCodec, LinesCodecError}; use tower_http::cors::CorsLayer; use tower_http::timeout::TimeoutLayer; +static JSONRPC_BATCH_LIMIT: OnceLock = OnceLock::new(); + #[doc(hidden)] #[derive(Debug)] pub struct RpcServer { @@ -39,6 +50,10 @@ impl RpcServer { /// * `io_handler` - RPC methods handler. See [ServiceBuilder](../service_builder/struct.ServiceBuilder.html). /// * `handler` - Tokio runtime handle. pub fn new(config: RpcConfig, io_handler: IoHandler, handler: Handle) -> Self { + if let Some(jsonrpc_batch_limit) = config.rpc_batch_limit { + let _ = JSONRPC_BATCH_LIMIT.get_or_init(|| jsonrpc_batch_limit); + } + let rpc = Arc::new(io_handler); let http_address = Self::start_server( @@ -195,3 +210,59 @@ async fn get_error_handler() -> impl IntoResponse { "Used HTTP Method is not allowed. POST or OPTIONS is required", ) } + +async fn handle_jsonrpc( + Extension(io): Extension>>, + req_body: Bytes, +) -> Response { + let make_error_response = |error| { + Json(jsonrpc_core::Failure { + jsonrpc: Some(jsonrpc_core::Version::V2), + id: jsonrpc_core::Id::Null, + error, + }) + .into_response() + }; + + let req = match std::str::from_utf8(req_body.as_ref()) { + Ok(req) => req, + Err(_) => { + return make_error_response(jsonrpc_core::Error::parse_error()); + } + }; + + let req = serde_json::from_str::(req); + let result = match req { + Err(_error) => Left(future::ready(Some(RpcResponse::from( + Error::new(ErrorCode::ParseError), + Some(jsonrpc_core::Version::V2), + )))), + Ok(request) => { + if let Request::Batch(ref arr) = request { + if let Some(batch_size) = JSONRPC_BATCH_LIMIT.get() { + if arr.len() > *batch_size { + return make_error_response(jsonrpc_core::Error::invalid_params(format!( + "batch size is too large, expect it less than: {}", + batch_size + ))); + } + } + } + Right(io.handle_rpc_request(request, T::default())) + } + }; + + if let Some(response) = result.await { + serde_json::to_string(&response) + .map(|json| { + ( + [(axum::http::header::CONTENT_TYPE, "application/json")], + json, + ) + .into_response() + }) + .unwrap_or_else(|_| StatusCode::INTERNAL_SERVER_ERROR.into_response()) + } else { + StatusCode::NO_CONTENT.into_response() + } +} diff --git a/rpc/src/service_builder.rs b/rpc/src/service_builder.rs index 182dbcc7e1..c71dea35fe 100644 --- a/rpc/src/service_builder.rs +++ b/rpc/src/service_builder.rs @@ -9,7 +9,7 @@ use crate::module::{ }; use crate::{IoHandler, RPCError}; use ckb_app_config::{DBConfig, IndexerConfig, RpcConfig}; -use ckb_chain::chain::ChainController; +use ckb_chain::ChainController; use ckb_indexer::IndexerService; use ckb_indexer_sync::{new_secondary_db, PoolService}; use ckb_network::NetworkController; @@ -103,10 +103,12 @@ impl<'a> ServiceBuilder<'a> { mut self, network_controller: NetworkController, sync_shared: Arc, + chain_controller: Arc, ) -> Self { let methods = NetRpcImpl { network_controller, sync_shared, + chain_controller, }; set_rpc_module_methods!(self, "Net", net_enable, add_net_rpc_methods, methods) } @@ -142,6 +144,8 @@ impl<'a> ServiceBuilder<'a> { shared: Shared, network_controller: NetworkController, chain: ChainController, + well_known_lock_scripts: Vec